1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGLoopInfo.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "EHScopeStack.h"
21#include "SanitizerHandler.h"
22#include "VarBypassDetector.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/CurrentSourceLocExprScope.h"
25#include "clang/AST/ExprCXX.h"
26#include "clang/AST/ExprObjC.h"
27#include "clang/AST/ExprOpenMP.h"
28#include "clang/AST/StmtOpenACC.h"
29#include "clang/AST/StmtOpenMP.h"
30#include "clang/AST/StmtSYCL.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
33#include "clang/Basic/CapturedStmt.h"
34#include "clang/Basic/CodeGenOptions.h"
35#include "clang/Basic/OpenMPKinds.h"
36#include "clang/Basic/TargetInfo.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class ConvergenceControlInst;
51class LLVMContext;
52class MDNode;
53class SwitchInst;
54class Twine;
55class Value;
56class CanonicalLoopInfo;
57} // namespace llvm
58
59namespace clang {
60class ASTContext;
61class CXXDestructorDecl;
62class CXXForRangeStmt;
63class CXXTryStmt;
64class Decl;
65class LabelDecl;
66class FunctionDecl;
67class FunctionProtoType;
68class LabelStmt;
69class ObjCContainerDecl;
70class ObjCInterfaceDecl;
71class ObjCIvarDecl;
72class ObjCMethodDecl;
73class ObjCImplementationDecl;
74class ObjCPropertyImplDecl;
75class TargetInfo;
76class VarDecl;
77class ObjCForCollectionStmt;
78class ObjCAtTryStmt;
79class ObjCAtThrowStmt;
80class ObjCAtSynchronizedStmt;
81class ObjCAutoreleasePoolStmt;
82class OMPUseDevicePtrClause;
83class OMPUseDeviceAddrClause;
84class SVETypeFlags;
85class OMPExecutableDirective;
86
87namespace analyze_os_log {
88class OSLogBufferLayout;
89}
90
91namespace CodeGen {
92class CodeGenTypes;
93class CodeGenPGO;
94class CGCallee;
95class CGFunctionInfo;
96class CGBlockInfo;
97class CGCXXABI;
98class BlockByrefHelpers;
99class BlockByrefInfo;
100class BlockFieldFlags;
101class RegionCodeGenTy;
102class TargetCodeGenInfo;
103struct OMPTaskDataTy;
104struct CGCoroData;
105
106// clang-format off
107/// The kind of evaluation to perform on values of a particular
108/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
109/// CGExprAgg?
110///
111/// TODO: should vectors maybe be split out into their own thing?
112enum TypeEvaluationKind {
113 TEK_Scalar,
114 TEK_Complex,
115 TEK_Aggregate
116};
117// clang-format on
118
119/// Helper class with most of the code for saving a value for a
120/// conditional expression cleanup.
121struct DominatingLLVMValue {
122 typedef llvm::PointerIntPair<llvm::Value *, 1, bool> saved_type;
123
124 /// Answer whether the given value needs extra work to be saved.
125 static bool needsSaving(llvm::Value *value) {
126 if (!value)
127 return false;
128
129 // If it's not an instruction, we don't need to save.
130 if (!isa<llvm::Instruction>(Val: value))
131 return false;
132
133 // If it's an instruction in the entry block, we don't need to save.
134 llvm::BasicBlock *block = cast<llvm::Instruction>(Val: value)->getParent();
135 return (block != &block->getParent()->getEntryBlock());
136 }
137
138 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
139 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
140};
141
142/// A partial specialization of DominatingValue for llvm::Values that
143/// might be llvm::Instructions.
144template <class T> struct DominatingPointer<T, true> : DominatingLLVMValue {
145 typedef T *type;
146 static type restore(CodeGenFunction &CGF, saved_type value) {
147 return static_cast<T *>(DominatingLLVMValue::restore(CGF, value));
148 }
149};
150
151/// A specialization of DominatingValue for Address.
152template <> struct DominatingValue<Address> {
153 typedef Address type;
154
155 struct saved_type {
156 DominatingLLVMValue::saved_type BasePtr;
157 llvm::Type *ElementType;
158 CharUnits Alignment;
159 DominatingLLVMValue::saved_type Offset;
160 llvm::PointerType *EffectiveType;
161 };
162
163 static bool needsSaving(type value) {
164 if (DominatingLLVMValue::needsSaving(value: value.getBasePointer()) ||
165 DominatingLLVMValue::needsSaving(value: value.getOffset()))
166 return true;
167 return false;
168 }
169 static saved_type save(CodeGenFunction &CGF, type value) {
170 return {.BasePtr: DominatingLLVMValue::save(CGF, value: value.getBasePointer()),
171 .ElementType: value.getElementType(), .Alignment: value.getAlignment(),
172 .Offset: DominatingLLVMValue::save(CGF, value: value.getOffset()), .EffectiveType: value.getType()};
173 }
174 static type restore(CodeGenFunction &CGF, saved_type value) {
175 return Address(DominatingLLVMValue::restore(CGF, value: value.BasePtr),
176 value.ElementType, value.Alignment, CGPointerAuthInfo(),
177 DominatingLLVMValue::restore(CGF, value: value.Offset));
178 }
179};
180
181/// A specialization of DominatingValue for RValue.
182template <> struct DominatingValue<RValue> {
183 typedef RValue type;
184 class saved_type {
185 enum Kind {
186 ScalarLiteral,
187 ScalarAddress,
188 AggregateLiteral,
189 AggregateAddress,
190 ComplexAddress
191 };
192 union {
193 struct {
194 DominatingLLVMValue::saved_type first, second;
195 } Vals;
196 DominatingValue<Address>::saved_type AggregateAddr;
197 };
198 LLVM_PREFERRED_TYPE(Kind)
199 unsigned K : 3;
200
201 saved_type(DominatingLLVMValue::saved_type Val1, unsigned K)
202 : Vals{.first: Val1, .second: DominatingLLVMValue::saved_type()}, K(K) {}
203
204 saved_type(DominatingLLVMValue::saved_type Val1,
205 DominatingLLVMValue::saved_type Val2)
206 : Vals{.first: Val1, .second: Val2}, K(ComplexAddress) {}
207
208 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
209 : AggregateAddr(AggregateAddr), K(K) {}
210
211 public:
212 static bool needsSaving(RValue value);
213 static saved_type save(CodeGenFunction &CGF, RValue value);
214 RValue restore(CodeGenFunction &CGF);
215
216 // implementations in CGCleanup.cpp
217 };
218
219 static bool needsSaving(type value) { return saved_type::needsSaving(value); }
220 static saved_type save(CodeGenFunction &CGF, type value) {
221 return saved_type::save(CGF, value);
222 }
223 static type restore(CodeGenFunction &CGF, saved_type value) {
224 return value.restore(CGF);
225 }
226};
227
228/// A scoped helper to set the current source atom group for
229/// CGDebugInfo::addInstToCurrentSourceAtom. A source atom is a source construct
230/// that is "interesting" for debug stepping purposes. We use an atom group
231/// number to track the instruction(s) that implement the functionality for the
232/// atom, plus backup instructions/source locations.
233class ApplyAtomGroup {
234 uint64_t OriginalAtom = 0;
235 CGDebugInfo *DI = nullptr;
236
237 ApplyAtomGroup(const ApplyAtomGroup &) = delete;
238 void operator=(const ApplyAtomGroup &) = delete;
239
240public:
241 ApplyAtomGroup(CGDebugInfo *DI);
242 ~ApplyAtomGroup();
243};
244
245/// CodeGenFunction - This class organizes the per-function state that is used
246/// while generating LLVM code.
247class CodeGenFunction : public CodeGenTypeCache {
248 CodeGenFunction(const CodeGenFunction &) = delete;
249 void operator=(const CodeGenFunction &) = delete;
250
251 friend class CGCXXABI;
252
253public:
254 /// A jump destination is an abstract label, branching to which may
255 /// require a jump out through normal cleanups.
256 struct JumpDest {
257 JumpDest() : Block(nullptr), Index(0) {}
258 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
259 unsigned Index)
260 : Block(Block), ScopeDepth(Depth), Index(Index) {}
261
262 bool isValid() const { return Block != nullptr; }
263 llvm::BasicBlock *getBlock() const { return Block; }
264 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
265 unsigned getDestIndex() const { return Index; }
266
267 // This should be used cautiously.
268 void setScopeDepth(EHScopeStack::stable_iterator depth) {
269 ScopeDepth = depth;
270 }
271
272 private:
273 llvm::BasicBlock *Block;
274 EHScopeStack::stable_iterator ScopeDepth;
275 unsigned Index;
276 };
277
278 CodeGenModule &CGM; // Per-module state.
279 const TargetInfo &Target;
280
281 // For EH/SEH outlined funclets, this field points to parent's CGF
282 CodeGenFunction *ParentCGF = nullptr;
283
284 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
285 LoopInfoStack LoopStack;
286 CGBuilderTy Builder;
287
288 // Stores variables for which we can't generate correct lifetime markers
289 // because of jumps.
290 VarBypassDetector Bypasses;
291
292 /// List of recently emitted OMPCanonicalLoops.
293 ///
294 /// Since OMPCanonicalLoops are nested inside other statements (in particular
295 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
296 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
297 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
298 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
299 /// this stack when done. Entering a new loop requires clearing this list; it
300 /// either means we start parsing a new loop nest (in which case the previous
301 /// loop nest goes out of scope) or a second loop in the same level in which
302 /// case it would be ambiguous into which of the two (or more) loops the loop
303 /// nest would extend.
304 SmallVector<llvm::CanonicalLoopInfo *, 4> OMPLoopNestStack;
305
306 /// Stack to track the Logical Operator recursion nest for MC/DC.
307 SmallVector<const BinaryOperator *, 16> MCDCLogOpStack;
308
309 /// Stack to track the controlled convergence tokens.
310 SmallVector<llvm::ConvergenceControlInst *, 4> ConvergenceTokenStack;
311
312 /// Number of nested loop to be consumed by the last surrounding
313 /// loop-associated directive.
314 int ExpectedOMPLoopDepth = 0;
315
316 // CodeGen lambda for loops and support for ordered clause
317 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
318 JumpDest)>
319 CodeGenLoopTy;
320 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
321 const unsigned, const bool)>
322 CodeGenOrderedTy;
323
324 // Codegen lambda for loop bounds in worksharing loop constructs
325 typedef llvm::function_ref<std::pair<LValue, LValue>(
326 CodeGenFunction &, const OMPExecutableDirective &S)>
327 CodeGenLoopBoundsTy;
328
329 // Codegen lambda for loop bounds in dispatch-based loop implementation
330 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
331 CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
332 Address UB)>
333 CodeGenDispatchBoundsTy;
334
335 /// CGBuilder insert helper. This function is called after an
336 /// instruction is created using Builder.
337 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
338 llvm::BasicBlock::iterator InsertPt) const;
339
340 /// CurFuncDecl - Holds the Decl for the current outermost
341 /// non-closure context.
342 const Decl *CurFuncDecl = nullptr;
343 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
344 const Decl *CurCodeDecl = nullptr;
345 const CGFunctionInfo *CurFnInfo = nullptr;
346 QualType FnRetTy;
347 llvm::Function *CurFn = nullptr;
348
349 /// Save Parameter Decl for coroutine.
350 llvm::SmallVector<const ParmVarDecl *, 4> FnArgs;
351
352 // Holds coroutine data if the current function is a coroutine. We use a
353 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
354 // in this header.
355 struct CGCoroInfo {
356 std::unique_ptr<CGCoroData> Data;
357 bool InSuspendBlock = false;
358 CGCoroInfo();
359 ~CGCoroInfo();
360 };
361 CGCoroInfo CurCoro;
362
363 bool isCoroutine() const { return CurCoro.Data != nullptr; }
364
365 bool inSuspendBlock() const {
366 return isCoroutine() && CurCoro.InSuspendBlock;
367 }
368
369 // Holds FramePtr for await_suspend wrapper generation,
370 // so that __builtin_coro_frame call can be lowered
371 // directly to value of its second argument
372 struct AwaitSuspendWrapperInfo {
373 llvm::Value *FramePtr = nullptr;
374 };
375 AwaitSuspendWrapperInfo CurAwaitSuspendWrapper;
376
377 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
378 // It encapsulates SuspendExpr in a function, to separate it's body
379 // from the main coroutine to avoid miscompilations. Intrinisic
380 // is lowered to this function call in CoroSplit pass
381 // Function signature is:
382 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
383 // where type is one of (void, i1, ptr)
384 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
385 Twine const &SuspendPointName,
386 CoroutineSuspendExpr const &S);
387
388 /// CurGD - The GlobalDecl for the current function being compiled.
389 GlobalDecl CurGD;
390
391 /// PrologueCleanupDepth - The cleanup depth enclosing all the
392 /// cleanups associated with the parameters.
393 EHScopeStack::stable_iterator PrologueCleanupDepth;
394
395 /// ReturnBlock - Unified return block.
396 JumpDest ReturnBlock;
397
398 /// ReturnValue - The temporary alloca to hold the return
399 /// value. This is invalid iff the function has no return value.
400 Address ReturnValue = Address::invalid();
401
402 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
403 /// This is invalid if sret is not in use.
404 Address ReturnValuePointer = Address::invalid();
405
406 /// If a return statement is being visited, this holds the return statment's
407 /// result expression.
408 const Expr *RetExpr = nullptr;
409
410 /// Return true if a label was seen in the current scope.
411 bool hasLabelBeenSeenInCurrentScope() const {
412 if (CurLexicalScope)
413 return CurLexicalScope->hasLabels();
414 return !LabelMap.empty();
415 }
416
417 /// AllocaInsertPoint - This is an instruction in the entry block before which
418 /// we prefer to insert allocas.
419 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
420
421private:
422 /// PostAllocaInsertPt - This is a place in the prologue where code can be
423 /// inserted that will be dominated by all the static allocas. This helps
424 /// achieve two things:
425 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
426 /// 2. All other prologue code (which are dominated by static allocas) do
427 /// appear in the source order immediately after all static allocas.
428 ///
429 /// PostAllocaInsertPt will be lazily created when it is *really* required.
430 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
431
432public:
433 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
434 /// immediately after AllocaInsertPt.
435 llvm::Instruction *getPostAllocaInsertPoint() {
436 if (!PostAllocaInsertPt) {
437 assert(AllocaInsertPt &&
438 "Expected static alloca insertion point at function prologue");
439 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
440 "EBB should be entry block of the current code gen function");
441 PostAllocaInsertPt = AllocaInsertPt->clone();
442 PostAllocaInsertPt->setName("postallocapt");
443 PostAllocaInsertPt->insertAfter(InsertPos: AllocaInsertPt->getIterator());
444 }
445
446 return PostAllocaInsertPt;
447 }
448
449 /// API for captured statement code generation.
450 class CGCapturedStmtInfo {
451 public:
452 explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
453 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
454 explicit CGCapturedStmtInfo(const CapturedStmt &S,
455 CapturedRegionKind K = CR_Default)
456 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
457
458 RecordDecl::field_iterator Field =
459 S.getCapturedRecordDecl()->field_begin();
460 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
461 E = S.capture_end();
462 I != E; ++I, ++Field) {
463 if (I->capturesThis())
464 CXXThisFieldDecl = *Field;
465 else if (I->capturesVariable())
466 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
467 else if (I->capturesVariableByCopy())
468 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
469 }
470 }
471
472 virtual ~CGCapturedStmtInfo();
473
474 CapturedRegionKind getKind() const { return Kind; }
475
476 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
477 // Retrieve the value of the context parameter.
478 virtual llvm::Value *getContextValue() const { return ThisValue; }
479
480 /// Lookup the captured field decl for a variable.
481 virtual const FieldDecl *lookup(const VarDecl *VD) const {
482 return CaptureFields.lookup(Val: VD->getCanonicalDecl());
483 }
484
485 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
486 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
487
488 static bool classof(const CGCapturedStmtInfo *) { return true; }
489
490 /// Emit the captured statement body.
491 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
492 CGF.incrementProfileCounter(S);
493 CGF.EmitStmt(S);
494 }
495
496 /// Get the name of the capture helper.
497 virtual StringRef getHelperName() const { return "__captured_stmt"; }
498
499 /// Get the CaptureFields
500 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
501 return CaptureFields;
502 }
503
504 private:
505 /// The kind of captured statement being generated.
506 CapturedRegionKind Kind;
507
508 /// Keep the map between VarDecl and FieldDecl.
509 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
510
511 /// The base address of the captured record, passed in as the first
512 /// argument of the parallel region function.
513 llvm::Value *ThisValue;
514
515 /// Captured 'this' type.
516 FieldDecl *CXXThisFieldDecl;
517 };
518 CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
519
520 /// RAII for correct setting/restoring of CapturedStmtInfo.
521 class CGCapturedStmtRAII {
522 private:
523 CodeGenFunction &CGF;
524 CGCapturedStmtInfo *PrevCapturedStmtInfo;
525
526 public:
527 CGCapturedStmtRAII(CodeGenFunction &CGF,
528 CGCapturedStmtInfo *NewCapturedStmtInfo)
529 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
530 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
531 }
532 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
533 };
534
535 /// An abstract representation of regular/ObjC call/message targets.
536 class AbstractCallee {
537 /// The function declaration of the callee.
538 const Decl *CalleeDecl;
539
540 public:
541 AbstractCallee() : CalleeDecl(nullptr) {}
542 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
543 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
544 bool hasFunctionDecl() const {
545 return isa_and_nonnull<FunctionDecl>(Val: CalleeDecl);
546 }
547 const Decl *getDecl() const { return CalleeDecl; }
548 unsigned getNumParams() const {
549 if (const auto *FD = dyn_cast<FunctionDecl>(Val: CalleeDecl))
550 return FD->getNumParams();
551 return cast<ObjCMethodDecl>(Val: CalleeDecl)->param_size();
552 }
553 const ParmVarDecl *getParamDecl(unsigned I) const {
554 if (const auto *FD = dyn_cast<FunctionDecl>(Val: CalleeDecl))
555 return FD->getParamDecl(i: I);
556 return *(cast<ObjCMethodDecl>(Val: CalleeDecl)->param_begin() + I);
557 }
558 };
559
560 /// Sanitizers enabled for this function.
561 SanitizerSet SanOpts;
562
563 /// True if CodeGen currently emits code implementing sanitizer checks.
564 bool IsSanitizerScope = false;
565
566 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
567 class SanitizerScope {
568 CodeGenFunction *CGF;
569
570 public:
571 SanitizerScope(CodeGenFunction *CGF);
572 ~SanitizerScope();
573 };
574
575 /// In C++, whether we are code generating a thunk. This controls whether we
576 /// should emit cleanups.
577 bool CurFuncIsThunk = false;
578
579 /// In ARC, whether we should autorelease the return value.
580 bool AutoreleaseResult = false;
581
582 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
583 /// potentially set the return value.
584 bool SawAsmBlock = false;
585
586 GlobalDecl CurSEHParent;
587
588 /// True if the current function is an outlined SEH helper. This can be a
589 /// finally block or filter expression.
590 bool IsOutlinedSEHHelper = false;
591
592 /// True if CodeGen currently emits code inside presereved access index
593 /// region.
594 bool IsInPreservedAIRegion = false;
595
596 /// True if the current statement has nomerge attribute.
597 bool InNoMergeAttributedStmt = false;
598
599 /// True if the current statement has noinline attribute.
600 bool InNoInlineAttributedStmt = false;
601
602 /// True if the current statement has always_inline attribute.
603 bool InAlwaysInlineAttributedStmt = false;
604
605 /// True if the current statement has noconvergent attribute.
606 bool InNoConvergentAttributedStmt = false;
607
608 /// HLSL Branch attribute.
609 HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr =
610 HLSLControlFlowHintAttr::SpellingNotCalculated;
611
612 // The CallExpr within the current statement that the musttail attribute
613 // applies to. nullptr if there is no 'musttail' on the current statement.
614 const CallExpr *MustTailCall = nullptr;
615
616 /// Returns true if a function must make progress, which means the
617 /// mustprogress attribute can be added.
618 bool checkIfFunctionMustProgress() {
619 if (CGM.getCodeGenOpts().getFiniteLoops() ==
620 CodeGenOptions::FiniteLoopsKind::Never)
621 return false;
622
623 // C++11 and later guarantees that a thread eventually will do one of the
624 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
625 // - terminate,
626 // - make a call to a library I/O function,
627 // - perform an access through a volatile glvalue, or
628 // - perform a synchronization operation or an atomic operation.
629 //
630 // Hence each function is 'mustprogress' in C++11 or later.
631 return getLangOpts().CPlusPlus11;
632 }
633
634 /// Returns true if a loop must make progress, which means the mustprogress
635 /// attribute can be added. \p HasConstantCond indicates whether the branch
636 /// condition is a known constant.
637 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
638
639 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
640 llvm::Value *BlockPointer = nullptr;
641
642 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
643 FieldDecl *LambdaThisCaptureField = nullptr;
644
645 /// A mapping from NRVO variables to the flags used to indicate
646 /// when the NRVO has been applied to this variable.
647 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
648
649 EHScopeStack EHStack;
650 llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
651
652 // A stack of cleanups which were added to EHStack but have to be deactivated
653 // later before being popped or emitted. These are usually deactivated on
654 // exiting a `CleanupDeactivationScope` scope. For instance, after a
655 // full-expr.
656 //
657 // These are specially useful for correctly emitting cleanups while
658 // encountering branches out of expression (through stmt-expr or coroutine
659 // suspensions).
660 struct DeferredDeactivateCleanup {
661 EHScopeStack::stable_iterator Cleanup;
662 llvm::Instruction *DominatingIP;
663 };
664 llvm::SmallVector<DeferredDeactivateCleanup> DeferredDeactivationCleanupStack;
665
666 // Enters a new scope for capturing cleanups which are deferred to be
667 // deactivated, all of which will be deactivated once the scope is exited.
668 struct CleanupDeactivationScope {
669 CodeGenFunction &CGF;
670 size_t OldDeactivateCleanupStackSize;
671 bool Deactivated;
672 CleanupDeactivationScope(CodeGenFunction &CGF)
673 : CGF(CGF), OldDeactivateCleanupStackSize(
674 CGF.DeferredDeactivationCleanupStack.size()),
675 Deactivated(false) {}
676
677 void ForceDeactivate() {
678 assert(!Deactivated && "Deactivating already deactivated scope");
679 auto &Stack = CGF.DeferredDeactivationCleanupStack;
680 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
681 CGF.DeactivateCleanupBlock(Cleanup: Stack[I - 1].Cleanup,
682 DominatingIP: Stack[I - 1].DominatingIP);
683 Stack[I - 1].DominatingIP->eraseFromParent();
684 }
685 Stack.resize(N: OldDeactivateCleanupStackSize);
686 Deactivated = true;
687 }
688
689 ~CleanupDeactivationScope() {
690 if (Deactivated)
691 return;
692 ForceDeactivate();
693 }
694 };
695
696 llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
697
698 llvm::Instruction *CurrentFuncletPad = nullptr;
699
700 class CallLifetimeEnd final : public EHScopeStack::Cleanup {
701 bool isRedundantBeforeReturn() override { return true; }
702
703 llvm::Value *Addr;
704 llvm::Value *Size;
705
706 public:
707 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
708 : Addr(addr.getPointer()), Size(size) {}
709
710 void Emit(CodeGenFunction &CGF, Flags flags) override {
711 CGF.EmitLifetimeEnd(Size, Addr);
712 }
713 };
714
715 // We are using objects of this 'cleanup' class to emit fake.use calls
716 // for -fextend-variable-liveness. They are placed at the end of a variable's
717 // scope analogous to lifetime markers.
718 class FakeUse final : public EHScopeStack::Cleanup {
719 Address Addr;
720
721 public:
722 FakeUse(Address addr) : Addr(addr) {}
723
724 void Emit(CodeGenFunction &CGF, Flags flags) override {
725 CGF.EmitFakeUse(Addr);
726 }
727 };
728
729 /// Header for data within LifetimeExtendedCleanupStack.
730 struct LifetimeExtendedCleanupHeader {
731 /// The size of the following cleanup object.
732 unsigned Size;
733 /// The kind of cleanup to push.
734 LLVM_PREFERRED_TYPE(CleanupKind)
735 unsigned Kind : 31;
736 /// Whether this is a conditional cleanup.
737 LLVM_PREFERRED_TYPE(bool)
738 unsigned IsConditional : 1;
739
740 size_t getSize() const { return Size; }
741 CleanupKind getKind() const { return (CleanupKind)Kind; }
742 bool isConditional() const { return IsConditional; }
743 };
744
745 /// i32s containing the indexes of the cleanup destinations.
746 RawAddress NormalCleanupDest = RawAddress::invalid();
747
748 unsigned NextCleanupDestIndex = 1;
749
750 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
751 llvm::BasicBlock *EHResumeBlock = nullptr;
752
753 /// The exception slot. All landing pads write the current exception pointer
754 /// into this alloca.
755 llvm::Value *ExceptionSlot = nullptr;
756
757 /// The selector slot. Under the MandatoryCleanup model, all landing pads
758 /// write the current selector value into this alloca.
759 llvm::AllocaInst *EHSelectorSlot = nullptr;
760
761 /// A stack of exception code slots. Entering an __except block pushes a slot
762 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
763 /// a value from the top of the stack.
764 SmallVector<Address, 1> SEHCodeSlotStack;
765
766 /// Value returned by __exception_info intrinsic.
767 llvm::Value *SEHInfo = nullptr;
768
769 /// Emits a landing pad for the current EH stack.
770 llvm::BasicBlock *EmitLandingPad();
771
772 llvm::BasicBlock *getInvokeDestImpl();
773
774 /// Parent loop-based directive for scan directive.
775 const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
776 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
777 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
778 llvm::BasicBlock *OMPScanExitBlock = nullptr;
779 llvm::BasicBlock *OMPScanDispatch = nullptr;
780 bool OMPFirstScanLoop = false;
781
782 /// Manages parent directive for scan directives.
783 class ParentLoopDirectiveForScanRegion {
784 CodeGenFunction &CGF;
785 const OMPExecutableDirective *ParentLoopDirectiveForScan;
786
787 public:
788 ParentLoopDirectiveForScanRegion(
789 CodeGenFunction &CGF,
790 const OMPExecutableDirective &ParentLoopDirectiveForScan)
791 : CGF(CGF),
792 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
793 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
794 }
795 ~ParentLoopDirectiveForScanRegion() {
796 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
797 }
798 };
799
800 template <class T>
801 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
802 return DominatingValue<T>::save(*this, value);
803 }
804
805 class CGFPOptionsRAII {
806 public:
807 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
808 CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
809 ~CGFPOptionsRAII();
810
811 private:
812 void ConstructorHelper(FPOptions FPFeatures);
813 CodeGenFunction &CGF;
814 FPOptions OldFPFeatures;
815 llvm::fp::ExceptionBehavior OldExcept;
816 llvm::RoundingMode OldRounding;
817 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
818 };
819 FPOptions CurFPFeatures;
820
821 class CGAtomicOptionsRAII {
822 public:
823 CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptions AO)
824 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
825 CGM.setAtomicOpts(AO);
826 }
827 CGAtomicOptionsRAII(CodeGenModule &CGM_, const AtomicAttr *AA)
828 : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
829 if (!AA)
830 return;
831 AtomicOptions AO = SavedAtomicOpts;
832 for (auto Option : AA->atomicOptions()) {
833 switch (Option) {
834 case AtomicAttr::remote_memory:
835 AO.remote_memory = true;
836 break;
837 case AtomicAttr::no_remote_memory:
838 AO.remote_memory = false;
839 break;
840 case AtomicAttr::fine_grained_memory:
841 AO.fine_grained_memory = true;
842 break;
843 case AtomicAttr::no_fine_grained_memory:
844 AO.fine_grained_memory = false;
845 break;
846 case AtomicAttr::ignore_denormal_mode:
847 AO.ignore_denormal_mode = true;
848 break;
849 case AtomicAttr::no_ignore_denormal_mode:
850 AO.ignore_denormal_mode = false;
851 break;
852 }
853 }
854 CGM.setAtomicOpts(AO);
855 }
856
857 CGAtomicOptionsRAII(const CGAtomicOptionsRAII &) = delete;
858 CGAtomicOptionsRAII &operator=(const CGAtomicOptionsRAII &) = delete;
859 ~CGAtomicOptionsRAII() { CGM.setAtomicOpts(SavedAtomicOpts); }
860
861 private:
862 CodeGenModule &CGM;
863 AtomicOptions SavedAtomicOpts;
864 };
865
866public:
867 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
868 /// rethrows.
869 SmallVector<llvm::Value *, 8> ObjCEHValueStack;
870
871 /// A class controlling the emission of a finally block.
872 class FinallyInfo {
873 /// Where the catchall's edge through the cleanup should go.
874 JumpDest RethrowDest;
875
876 /// A function to call to enter the catch.
877 llvm::FunctionCallee BeginCatchFn;
878
879 /// An i1 variable indicating whether or not the @finally is
880 /// running for an exception.
881 llvm::AllocaInst *ForEHVar = nullptr;
882
883 /// An i8* variable into which the exception pointer to rethrow
884 /// has been saved.
885 llvm::AllocaInst *SavedExnVar = nullptr;
886
887 public:
888 void enter(CodeGenFunction &CGF, const Stmt *Finally,
889 llvm::FunctionCallee beginCatchFn,
890 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
891 void exit(CodeGenFunction &CGF);
892 };
893
894 /// Returns true inside SEH __try blocks.
895 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
896
897 /// Returns true while emitting a cleanuppad.
898 bool isCleanupPadScope() const {
899 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(Val: CurrentFuncletPad);
900 }
901
902 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
903 /// current full-expression. Safe against the possibility that
904 /// we're currently inside a conditionally-evaluated expression.
905 template <class T, class... As>
906 void pushFullExprCleanup(CleanupKind kind, As... A) {
907 // If we're not in a conditional branch, or if none of the
908 // arguments requires saving, then use the unconditional cleanup.
909 if (!isInConditionalBranch())
910 return EHStack.pushCleanup<T>(kind, A...);
911
912 // Stash values in a tuple so we can guarantee the order of saves.
913 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
914 SavedTuple Saved{saveValueInCond(A)...};
915
916 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
917 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
918 initFullExprCleanup();
919 }
920
921 /// Queue a cleanup to be pushed after finishing the current full-expression,
922 /// potentially with an active flag.
923 template <class T, class... As>
924 void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
925 if (!isInConditionalBranch())
926 return pushCleanupAfterFullExprWithActiveFlag<T>(
927 Kind, RawAddress::invalid(), A...);
928
929 RawAddress ActiveFlag = createCleanupActiveFlag();
930 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
931 "cleanup active flag should never need saving");
932
933 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
934 SavedTuple Saved{saveValueInCond(A)...};
935
936 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
937 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag,
938 Saved);
939 }
940
941 template <class T, class... As>
942 void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
943 RawAddress ActiveFlag, As... A) {
944 LifetimeExtendedCleanupHeader Header = {.Size: sizeof(T), .Kind: Kind,
945 .IsConditional: ActiveFlag.isValid()};
946
947 size_t OldSize = LifetimeExtendedCleanupStack.size();
948 LifetimeExtendedCleanupStack.resize(
949 N: LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
950 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
951
952 static_assert(sizeof(Header) % alignof(T) == 0,
953 "Cleanup will be allocated on misaligned address");
954 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
955 new (Buffer) LifetimeExtendedCleanupHeader(Header);
956 new (Buffer + sizeof(Header)) T(A...);
957 if (Header.IsConditional)
958 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
959 }
960
961 // Push a cleanup onto EHStack and deactivate it later. It is usually
962 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
963 // full expression).
964 template <class T, class... As>
965 void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) {
966 // Placeholder dominating IP for this cleanup.
967 llvm::Instruction *DominatingIP =
968 Builder.CreateFlagLoad(Addr: llvm::Constant::getNullValue(Ty: Int8PtrTy));
969 EHStack.pushCleanup<T>(Kind, A...);
970 DeferredDeactivationCleanupStack.push_back(
971 Elt: {.Cleanup: EHStack.stable_begin(), .DominatingIP: DominatingIP});
972 }
973
974 /// Set up the last cleanup that was pushed as a conditional
975 /// full-expression cleanup.
976 void initFullExprCleanup() {
977 initFullExprCleanupWithFlag(ActiveFlag: createCleanupActiveFlag());
978 }
979
980 void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
981 RawAddress createCleanupActiveFlag();
982
983 /// PushDestructorCleanup - Push a cleanup to call the
984 /// complete-object destructor of an object of the given type at the
985 /// given address. Does nothing if T is not a C++ class type with a
986 /// non-trivial destructor.
987 void PushDestructorCleanup(QualType T, Address Addr);
988
989 /// PushDestructorCleanup - Push a cleanup to call the
990 /// complete-object variant of the given destructor on the object at
991 /// the given address.
992 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
993 Address Addr);
994
995 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
996 /// process all branch fixups.
997 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
998 bool ForDeactivation = false);
999
1000 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
1001 /// The block cannot be reactivated. Pops it if it's the top of the
1002 /// stack.
1003 ///
1004 /// \param DominatingIP - An instruction which is known to
1005 /// dominate the current IP (if set) and which lies along
1006 /// all paths of execution between the current IP and the
1007 /// the point at which the cleanup comes into scope.
1008 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1009 llvm::Instruction *DominatingIP);
1010
1011 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
1012 /// Cannot be used to resurrect a deactivated cleanup.
1013 ///
1014 /// \param DominatingIP - An instruction which is known to
1015 /// dominate the current IP (if set) and which lies along
1016 /// all paths of execution between the current IP and the
1017 /// the point at which the cleanup comes into scope.
1018 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
1019 llvm::Instruction *DominatingIP);
1020
1021 /// Enters a new scope for capturing cleanups, all of which
1022 /// will be executed once the scope is exited.
1023 class RunCleanupsScope {
1024 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
1025 size_t LifetimeExtendedCleanupStackSize;
1026 CleanupDeactivationScope DeactivateCleanups;
1027 bool OldDidCallStackSave;
1028
1029 protected:
1030 bool PerformCleanup;
1031
1032 private:
1033 RunCleanupsScope(const RunCleanupsScope &) = delete;
1034 void operator=(const RunCleanupsScope &) = delete;
1035
1036 protected:
1037 CodeGenFunction &CGF;
1038
1039 public:
1040 /// Enter a new cleanup scope.
1041 explicit RunCleanupsScope(CodeGenFunction &CGF)
1042 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
1043 CleanupStackDepth = CGF.EHStack.stable_begin();
1044 LifetimeExtendedCleanupStackSize =
1045 CGF.LifetimeExtendedCleanupStack.size();
1046 OldDidCallStackSave = CGF.DidCallStackSave;
1047 CGF.DidCallStackSave = false;
1048 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
1049 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
1050 }
1051
1052 /// Exit this cleanup scope, emitting any accumulated cleanups.
1053 ~RunCleanupsScope() {
1054 if (PerformCleanup)
1055 ForceCleanup();
1056 }
1057
1058 /// Determine whether this scope requires any cleanups.
1059 bool requiresCleanups() const {
1060 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1061 }
1062
1063 /// Force the emission of cleanups now, instead of waiting
1064 /// until this object is destroyed.
1065 /// \param ValuesToReload - A list of values that need to be available at
1066 /// the insertion point after cleanup emission. If cleanup emission created
1067 /// a shared cleanup block, these value pointers will be rewritten.
1068 /// Otherwise, they not will be modified.
1069 void
1070 ForceCleanup(std::initializer_list<llvm::Value **> ValuesToReload = {}) {
1071 assert(PerformCleanup && "Already forced cleanup");
1072 CGF.DidCallStackSave = OldDidCallStackSave;
1073 DeactivateCleanups.ForceDeactivate();
1074 CGF.PopCleanupBlocks(OldCleanupStackSize: CleanupStackDepth, OldLifetimeExtendedStackSize: LifetimeExtendedCleanupStackSize,
1075 ValuesToReload);
1076 PerformCleanup = false;
1077 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1078 }
1079 };
1080
1081 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1082 EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
1083 EHScopeStack::stable_end();
1084
1085 class LexicalScope : public RunCleanupsScope {
1086 SourceRange Range;
1087 SmallVector<const LabelDecl *, 4> Labels;
1088 LexicalScope *ParentScope;
1089
1090 LexicalScope(const LexicalScope &) = delete;
1091 void operator=(const LexicalScope &) = delete;
1092
1093 public:
1094 /// Enter a new cleanup scope.
1095 explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range);
1096
1097 void addLabel(const LabelDecl *label) {
1098 assert(PerformCleanup && "adding label to dead scope?");
1099 Labels.push_back(Elt: label);
1100 }
1101
1102 /// Exit this cleanup scope, emitting any accumulated
1103 /// cleanups.
1104 ~LexicalScope();
1105
1106 /// Force the emission of cleanups now, instead of waiting
1107 /// until this object is destroyed.
1108 void ForceCleanup() {
1109 CGF.CurLexicalScope = ParentScope;
1110 RunCleanupsScope::ForceCleanup();
1111
1112 if (!Labels.empty())
1113 rescopeLabels();
1114 }
1115
1116 bool hasLabels() const { return !Labels.empty(); }
1117
1118 void rescopeLabels();
1119 };
1120
1121 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1122
1123 /// The class used to assign some variables some temporarily addresses.
1124 class OMPMapVars {
1125 DeclMapTy SavedLocals;
1126 DeclMapTy SavedTempAddresses;
1127 OMPMapVars(const OMPMapVars &) = delete;
1128 void operator=(const OMPMapVars &) = delete;
1129
1130 public:
1131 explicit OMPMapVars() = default;
1132 ~OMPMapVars() {
1133 assert(SavedLocals.empty() && "Did not restored original addresses.");
1134 };
1135
1136 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1137 /// function \p CGF.
1138 /// \return true if at least one variable was set already, false otherwise.
1139 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1140 Address TempAddr) {
1141 LocalVD = LocalVD->getCanonicalDecl();
1142 // Only save it once.
1143 if (SavedLocals.count(Val: LocalVD))
1144 return false;
1145
1146 // Copy the existing local entry to SavedLocals.
1147 auto it = CGF.LocalDeclMap.find(Val: LocalVD);
1148 if (it != CGF.LocalDeclMap.end())
1149 SavedLocals.try_emplace(Key: LocalVD, Args&: it->second);
1150 else
1151 SavedLocals.try_emplace(Key: LocalVD, Args: Address::invalid());
1152
1153 // Generate the private entry.
1154 QualType VarTy = LocalVD->getType();
1155 if (VarTy->isReferenceType()) {
1156 Address Temp = CGF.CreateMemTemp(T: VarTy);
1157 CGF.Builder.CreateStore(Val: TempAddr.emitRawPointer(CGF), Addr: Temp);
1158 TempAddr = Temp;
1159 }
1160 SavedTempAddresses.try_emplace(Key: LocalVD, Args&: TempAddr);
1161
1162 return true;
1163 }
1164
1165 /// Applies new addresses to the list of the variables.
1166 /// \return true if at least one variable is using new address, false
1167 /// otherwise.
1168 bool apply(CodeGenFunction &CGF) {
1169 copyInto(Src: SavedTempAddresses, Dest&: CGF.LocalDeclMap);
1170 SavedTempAddresses.clear();
1171 return !SavedLocals.empty();
1172 }
1173
1174 /// Restores original addresses of the variables.
1175 void restore(CodeGenFunction &CGF) {
1176 if (!SavedLocals.empty()) {
1177 copyInto(Src: SavedLocals, Dest&: CGF.LocalDeclMap);
1178 SavedLocals.clear();
1179 }
1180 }
1181
1182 private:
1183 /// Copy all the entries in the source map over the corresponding
1184 /// entries in the destination, which must exist.
1185 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1186 for (auto &[Decl, Addr] : Src) {
1187 if (!Addr.isValid())
1188 Dest.erase(Val: Decl);
1189 else
1190 Dest.insert_or_assign(Key: Decl, Val: Addr);
1191 }
1192 }
1193 };
1194
1195 /// The scope used to remap some variables as private in the OpenMP loop body
1196 /// (or other captured region emitted without outlining), and to restore old
1197 /// vars back on exit.
1198 class OMPPrivateScope : public RunCleanupsScope {
1199 OMPMapVars MappedVars;
1200 OMPPrivateScope(const OMPPrivateScope &) = delete;
1201 void operator=(const OMPPrivateScope &) = delete;
1202
1203 public:
1204 /// Enter a new OpenMP private scope.
1205 explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
1206
1207 /// Registers \p LocalVD variable as a private with \p Addr as the address
1208 /// of the corresponding private variable. \p
1209 /// PrivateGen is the address of the generated private variable.
1210 /// \return true if the variable is registered as private, false if it has
1211 /// been privatized already.
1212 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1213 assert(PerformCleanup && "adding private to dead scope");
1214 return MappedVars.setVarAddr(CGF, LocalVD, TempAddr: Addr);
1215 }
1216
1217 /// Privatizes local variables previously registered as private.
1218 /// Registration is separate from the actual privatization to allow
1219 /// initializers use values of the original variables, not the private one.
1220 /// This is important, for example, if the private variable is a class
1221 /// variable initialized by a constructor that references other private
1222 /// variables. But at initialization original variables must be used, not
1223 /// private copies.
1224 /// \return true if at least one variable was privatized, false otherwise.
1225 bool Privatize() { return MappedVars.apply(CGF); }
1226
1227 void ForceCleanup() {
1228 RunCleanupsScope::ForceCleanup();
1229 restoreMap();
1230 }
1231
1232 /// Exit scope - all the mapped variables are restored.
1233 ~OMPPrivateScope() {
1234 if (PerformCleanup)
1235 ForceCleanup();
1236 }
1237
1238 /// Checks if the global variable is captured in current function.
1239 bool isGlobalVarCaptured(const VarDecl *VD) const {
1240 VD = VD->getCanonicalDecl();
1241 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(Val: VD) > 0;
1242 }
1243
1244 /// Restore all mapped variables w/o clean up. This is usefully when we want
1245 /// to reference the original variables but don't want the clean up because
1246 /// that could emit lifetime end too early, causing backend issue #56913.
1247 void restoreMap() { MappedVars.restore(CGF); }
1248 };
1249
1250 /// Save/restore original map of previously emitted local vars in case when we
1251 /// need to duplicate emission of the same code several times in the same
1252 /// function for OpenMP code.
1253 class OMPLocalDeclMapRAII {
1254 CodeGenFunction &CGF;
1255 DeclMapTy SavedMap;
1256
1257 public:
1258 OMPLocalDeclMapRAII(CodeGenFunction &CGF)
1259 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1260 ~OMPLocalDeclMapRAII() { SavedMap.swap(RHS&: CGF.LocalDeclMap); }
1261 };
1262
1263 /// Takes the old cleanup stack size and emits the cleanup blocks
1264 /// that have been added.
1265 void
1266 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1267 std::initializer_list<llvm::Value **> ValuesToReload = {});
1268
1269 /// Takes the old cleanup stack size and emits the cleanup blocks
1270 /// that have been added, then adds all lifetime-extended cleanups from
1271 /// the given position to the stack.
1272 void
1273 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
1274 size_t OldLifetimeExtendedStackSize,
1275 std::initializer_list<llvm::Value **> ValuesToReload = {});
1276
1277 void ResolveBranchFixups(llvm::BasicBlock *Target);
1278
1279 /// The given basic block lies in the current EH scope, but may be a
1280 /// target of a potentially scope-crossing jump; get a stable handle
1281 /// to which we can perform this jump later.
1282 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
1283 return JumpDest(Target, EHStack.getInnermostNormalCleanup(),
1284 NextCleanupDestIndex++);
1285 }
1286
1287 /// The given basic block lies in the current EH scope, but may be a
1288 /// target of a potentially scope-crossing jump; get a stable handle
1289 /// to which we can perform this jump later.
1290 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1291 return getJumpDestInCurrentScope(Target: createBasicBlock(name: Name));
1292 }
1293
1294 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1295 /// block through the normal cleanup handling code (if any) and then
1296 /// on to \arg Dest.
1297 void EmitBranchThroughCleanup(JumpDest Dest);
1298
1299 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1300 /// specified destination obviously has no cleanups to run. 'false' is always
1301 /// a conservatively correct answer for this method.
1302 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
1303
1304 /// popCatchScope - Pops the catch scope at the top of the EHScope
1305 /// stack, emitting any required code (other than the catch handlers
1306 /// themselves).
1307 void popCatchScope();
1308
1309 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1310 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
1311 llvm::BasicBlock *
1312 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
1313
1314 /// An object to manage conditionally-evaluated expressions.
1315 class ConditionalEvaluation {
1316 llvm::BasicBlock *StartBB;
1317
1318 public:
1319 ConditionalEvaluation(CodeGenFunction &CGF)
1320 : StartBB(CGF.Builder.GetInsertBlock()) {}
1321
1322 void begin(CodeGenFunction &CGF) {
1323 assert(CGF.OutermostConditional != this);
1324 if (!CGF.OutermostConditional)
1325 CGF.OutermostConditional = this;
1326 }
1327
1328 void end(CodeGenFunction &CGF) {
1329 assert(CGF.OutermostConditional != nullptr);
1330 if (CGF.OutermostConditional == this)
1331 CGF.OutermostConditional = nullptr;
1332 }
1333
1334 /// Returns a block which will be executed prior to each
1335 /// evaluation of the conditional code.
1336 llvm::BasicBlock *getStartingBlock() const { return StartBB; }
1337 };
1338
1339 /// isInConditionalBranch - Return true if we're currently emitting
1340 /// one branch or the other of a conditional expression.
1341 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1342
1343 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1344 CodeGenFunction &CGF) {
1345 assert(isInConditionalBranch());
1346 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1347 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1348 block->back().getIterator());
1349 store->setAlignment(addr.getAlignment().getAsAlign());
1350 }
1351
1352 /// An RAII object to record that we're evaluating a statement
1353 /// expression.
1354 class StmtExprEvaluation {
1355 CodeGenFunction &CGF;
1356
1357 /// We have to save the outermost conditional: cleanups in a
1358 /// statement expression aren't conditional just because the
1359 /// StmtExpr is.
1360 ConditionalEvaluation *SavedOutermostConditional;
1361
1362 public:
1363 StmtExprEvaluation(CodeGenFunction &CGF)
1364 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1365 CGF.OutermostConditional = nullptr;
1366 }
1367
1368 ~StmtExprEvaluation() {
1369 CGF.OutermostConditional = SavedOutermostConditional;
1370 CGF.EnsureInsertPoint();
1371 }
1372 };
1373
1374 /// An object which temporarily prevents a value from being
1375 /// destroyed by aggressive peephole optimizations that assume that
1376 /// all uses of a value have been realized in the IR.
1377 class PeepholeProtection {
1378 llvm::Instruction *Inst = nullptr;
1379 friend class CodeGenFunction;
1380
1381 public:
1382 PeepholeProtection() = default;
1383 };
1384
1385 /// A non-RAII class containing all the information about a bound
1386 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1387 /// this which makes individual mappings very simple; using this
1388 /// class directly is useful when you have a variable number of
1389 /// opaque values or don't want the RAII functionality for some
1390 /// reason.
1391 class OpaqueValueMappingData {
1392 const OpaqueValueExpr *OpaqueValue;
1393 bool BoundLValue;
1394 CodeGenFunction::PeepholeProtection Protection;
1395
1396 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
1397 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1398
1399 public:
1400 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1401
1402 static bool shouldBindAsLValue(const Expr *expr) {
1403 // gl-values should be bound as l-values for obvious reasons.
1404 // Records should be bound as l-values because IR generation
1405 // always keeps them in memory. Expressions of function type
1406 // act exactly like l-values but are formally required to be
1407 // r-values in C.
1408 return expr->isGLValue() || expr->getType()->isFunctionType() ||
1409 hasAggregateEvaluationKind(T: expr->getType());
1410 }
1411
1412 static OpaqueValueMappingData
1413 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) {
1414 if (shouldBindAsLValue(expr: ov))
1415 return bind(CGF, ov, lv: CGF.EmitLValue(E: e));
1416 return bind(CGF, ov, rv: CGF.EmitAnyExpr(E: e));
1417 }
1418
1419 static OpaqueValueMappingData
1420 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) {
1421 assert(shouldBindAsLValue(ov));
1422 CGF.OpaqueLValues.insert(KV: std::make_pair(x&: ov, y: lv));
1423 return OpaqueValueMappingData(ov, true);
1424 }
1425
1426 static OpaqueValueMappingData
1427 bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) {
1428 assert(!shouldBindAsLValue(ov));
1429 CGF.OpaqueRValues.insert(KV: std::make_pair(x&: ov, y: rv));
1430
1431 OpaqueValueMappingData data(ov, false);
1432
1433 // Work around an extremely aggressive peephole optimization in
1434 // EmitScalarConversion which assumes that all other uses of a
1435 // value are extant.
1436 data.Protection = CGF.protectFromPeepholes(rvalue: rv);
1437
1438 return data;
1439 }
1440
1441 bool isValid() const { return OpaqueValue != nullptr; }
1442 void clear() { OpaqueValue = nullptr; }
1443
1444 void unbind(CodeGenFunction &CGF) {
1445 assert(OpaqueValue && "no data to unbind!");
1446
1447 if (BoundLValue) {
1448 CGF.OpaqueLValues.erase(Val: OpaqueValue);
1449 } else {
1450 CGF.OpaqueRValues.erase(Val: OpaqueValue);
1451 CGF.unprotectFromPeepholes(protection: Protection);
1452 }
1453 }
1454 };
1455
1456 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1457 class OpaqueValueMapping {
1458 CodeGenFunction &CGF;
1459 OpaqueValueMappingData Data;
1460
1461 public:
1462 static bool shouldBindAsLValue(const Expr *expr) {
1463 return OpaqueValueMappingData::shouldBindAsLValue(expr);
1464 }
1465
1466 /// Build the opaque value mapping for the given conditional
1467 /// operator if it's the GNU ?: extension. This is a common
1468 /// enough pattern that the convenience operator is really
1469 /// helpful.
1470 ///
1471 OpaqueValueMapping(CodeGenFunction &CGF,
1472 const AbstractConditionalOperator *op)
1473 : CGF(CGF) {
1474 if (isa<ConditionalOperator>(Val: op))
1475 // Leave Data empty.
1476 return;
1477
1478 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(Val: op);
1479 Data = OpaqueValueMappingData::bind(CGF, ov: e->getOpaqueValue(),
1480 e: e->getCommon());
1481 }
1482
1483 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1484 /// expression is set to the expression the OVE represents.
1485 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1486 : CGF(CGF) {
1487 if (OV) {
1488 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1489 "for OVE with no source expression");
1490 Data = OpaqueValueMappingData::bind(CGF, ov: OV, e: OV->getSourceExpr());
1491 }
1492 }
1493
1494 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue,
1495 LValue lvalue)
1496 : CGF(CGF),
1497 Data(OpaqueValueMappingData::bind(CGF, ov: opaqueValue, lv: lvalue)) {}
1498
1499 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue,
1500 RValue rvalue)
1501 : CGF(CGF),
1502 Data(OpaqueValueMappingData::bind(CGF, ov: opaqueValue, rv: rvalue)) {}
1503
1504 void pop() {
1505 Data.unbind(CGF);
1506 Data.clear();
1507 }
1508
1509 ~OpaqueValueMapping() {
1510 if (Data.isValid())
1511 Data.unbind(CGF);
1512 }
1513 };
1514
1515private:
1516 CGDebugInfo *DebugInfo;
1517 /// Used to create unique names for artificial VLA size debug info variables.
1518 unsigned VLAExprCounter = 0;
1519 bool DisableDebugInfo = false;
1520
1521 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1522 /// calling llvm.stacksave for multiple VLAs in the same scope.
1523 bool DidCallStackSave = false;
1524
1525 /// IndirectBranch - The first time an indirect goto is seen we create a block
1526 /// with an indirect branch. Every time we see the address of a label taken,
1527 /// we add the label to the indirect goto. Every subsequent indirect goto is
1528 /// codegen'd as a jump to the IndirectBranch's basic block.
1529 llvm::IndirectBrInst *IndirectBranch = nullptr;
1530
1531 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1532 /// decls.
1533 DeclMapTy LocalDeclMap;
1534
1535 // Keep track of the cleanups for callee-destructed parameters pushed to the
1536 // cleanup stack so that they can be deactivated later.
1537 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1538 CalleeDestructedParamCleanups;
1539
1540 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1541 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1542 /// parameter.
1543 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1544 SizeArguments;
1545
1546 /// Track escaped local variables with auto storage. Used during SEH
1547 /// outlining to produce a call to llvm.localescape.
1548 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1549
1550 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1551 llvm::DenseMap<const LabelDecl *, JumpDest> LabelMap;
1552
1553 // BreakContinueStack - This keeps track of where break and continue
1554 // statements should jump to.
1555 struct BreakContinue {
1556 BreakContinue(JumpDest Break, JumpDest Continue)
1557 : BreakBlock(Break), ContinueBlock(Continue) {}
1558
1559 JumpDest BreakBlock;
1560 JumpDest ContinueBlock;
1561 };
1562 SmallVector<BreakContinue, 8> BreakContinueStack;
1563
1564 /// Handles cancellation exit points in OpenMP-related constructs.
1565 class OpenMPCancelExitStack {
1566 /// Tracks cancellation exit point and join point for cancel-related exit
1567 /// and normal exit.
1568 struct CancelExit {
1569 CancelExit() = default;
1570 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1571 JumpDest ContBlock)
1572 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1573 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1574 /// true if the exit block has been emitted already by the special
1575 /// emitExit() call, false if the default codegen is used.
1576 bool HasBeenEmitted = false;
1577 JumpDest ExitBlock;
1578 JumpDest ContBlock;
1579 };
1580
1581 SmallVector<CancelExit, 8> Stack;
1582
1583 public:
1584 OpenMPCancelExitStack() : Stack(1) {}
1585 ~OpenMPCancelExitStack() = default;
1586 /// Fetches the exit block for the current OpenMP construct.
1587 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1588 /// Emits exit block with special codegen procedure specific for the related
1589 /// OpenMP construct + emits code for normal construct cleanup.
1590 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1591 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1592 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1593 assert(CGF.getOMPCancelDestination(Kind).isValid());
1594 assert(CGF.HaveInsertPoint());
1595 assert(!Stack.back().HasBeenEmitted);
1596 auto IP = CGF.Builder.saveAndClearIP();
1597 CGF.EmitBlock(BB: Stack.back().ExitBlock.getBlock());
1598 CodeGen(CGF);
1599 CGF.EmitBranch(Block: Stack.back().ContBlock.getBlock());
1600 CGF.Builder.restoreIP(IP);
1601 Stack.back().HasBeenEmitted = true;
1602 }
1603 CodeGen(CGF);
1604 }
1605 /// Enter the cancel supporting \a Kind construct.
1606 /// \param Kind OpenMP directive that supports cancel constructs.
1607 /// \param HasCancel true, if the construct has inner cancel directive,
1608 /// false otherwise.
1609 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1610 Stack.push_back(Elt: {Kind,
1611 HasCancel ? CGF.getJumpDestInCurrentScope(Name: "cancel.exit")
1612 : JumpDest(),
1613 HasCancel ? CGF.getJumpDestInCurrentScope(Name: "cancel.cont")
1614 : JumpDest()});
1615 }
1616 /// Emits default exit point for the cancel construct (if the special one
1617 /// has not be used) + join point for cancel/normal exits.
1618 void exit(CodeGenFunction &CGF) {
1619 if (getExitBlock().isValid()) {
1620 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1621 bool HaveIP = CGF.HaveInsertPoint();
1622 if (!Stack.back().HasBeenEmitted) {
1623 if (HaveIP)
1624 CGF.EmitBranchThroughCleanup(Dest: Stack.back().ContBlock);
1625 CGF.EmitBlock(BB: Stack.back().ExitBlock.getBlock());
1626 CGF.EmitBranchThroughCleanup(Dest: Stack.back().ContBlock);
1627 }
1628 CGF.EmitBlock(BB: Stack.back().ContBlock.getBlock());
1629 if (!HaveIP) {
1630 CGF.Builder.CreateUnreachable();
1631 CGF.Builder.ClearInsertionPoint();
1632 }
1633 }
1634 Stack.pop_back();
1635 }
1636 };
1637 OpenMPCancelExitStack OMPCancelStack;
1638
1639 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1640 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1641 Stmt::Likelihood LH);
1642
1643 std::unique_ptr<CodeGenPGO> PGO;
1644
1645 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1646 Address MCDCCondBitmapAddr = Address::invalid();
1647
1648 /// Calculate branch weights appropriate for PGO data
1649 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1650 uint64_t FalseCount) const;
1651 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1652 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1653 uint64_t LoopCount) const;
1654
1655public:
1656 std::pair<bool, bool> getIsCounterPair(const Stmt *S) const;
1657 void markStmtAsUsed(bool Skipped, const Stmt *S);
1658 void markStmtMaybeUsed(const Stmt *S);
1659
1660 /// Increment the profiler's counter for the given statement by \p StepV.
1661 /// If \p StepV is null, the default increment is 1.
1662 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr);
1663
1664 bool isMCDCCoverageEnabled() const {
1665 return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
1666 CGM.getCodeGenOpts().MCDCCoverage &&
1667 !CurFn->hasFnAttribute(Kind: llvm::Attribute::NoProfile));
1668 }
1669
1670 /// Allocate a temp value on the stack that MCDC can use to track condition
1671 /// results.
1672 void maybeCreateMCDCCondBitmap();
1673
1674 bool isBinaryLogicalOp(const Expr *E) const {
1675 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: E->IgnoreParens());
1676 return (BOp && BOp->isLogicalOp());
1677 }
1678
1679 /// Zero-init the MCDC temp value.
1680 void maybeResetMCDCCondBitmap(const Expr *E);
1681
1682 /// Increment the profiler's counter for the given expression by \p StepV.
1683 /// If \p StepV is null, the default increment is 1.
1684 void maybeUpdateMCDCTestVectorBitmap(const Expr *E);
1685
1686 /// Update the MCDC temp value with the condition's evaluated result.
1687 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val);
1688
1689 /// Get the profiler's count for the given statement.
1690 uint64_t getProfileCount(const Stmt *S);
1691
1692 /// Set the profiler's current count.
1693 void setCurrentProfileCount(uint64_t Count);
1694
1695 /// Get the profiler's current count. This is generally the count for the most
1696 /// recently incremented counter.
1697 uint64_t getCurrentProfileCount();
1698
1699 /// See CGDebugInfo::addInstToCurrentSourceAtom.
1700 void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction,
1701 llvm::Value *Backup);
1702
1703 /// See CGDebugInfo::addInstToSpecificSourceAtom.
1704 void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction,
1705 llvm::Value *Backup, uint64_t Atom);
1706
1707 /// Add \p KeyInstruction and an optional \p Backup instruction to a new atom
1708 /// group (See ApplyAtomGroup for more info).
1709 void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
1710 llvm::Value *Backup);
1711
1712private:
1713 /// SwitchInsn - This is nearest current switch instruction. It is null if
1714 /// current context is not in a switch.
1715 llvm::SwitchInst *SwitchInsn = nullptr;
1716 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1717 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1718
1719 /// The likelihood attributes of the SwitchCase.
1720 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1721
1722 /// CaseRangeBlock - This block holds if condition check for last case
1723 /// statement range in current switch instruction.
1724 llvm::BasicBlock *CaseRangeBlock = nullptr;
1725
1726 /// OpaqueLValues - Keeps track of the current set of opaque value
1727 /// expressions.
1728 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1729 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1730
1731 // VLASizeMap - This keeps track of the associated size for each VLA type.
1732 // We track this by the size expression rather than the type itself because
1733 // in certain situations, like a const qualifier applied to an VLA typedef,
1734 // multiple VLA types can share the same size expression.
1735 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1736 // enter/leave scopes.
1737 llvm::DenseMap<const Expr *, llvm::Value *> VLASizeMap;
1738
1739 /// A block containing a single 'unreachable' instruction. Created
1740 /// lazily by getUnreachableBlock().
1741 llvm::BasicBlock *UnreachableBlock = nullptr;
1742
1743 /// Counts of the number return expressions in the function.
1744 unsigned NumReturnExprs = 0;
1745
1746 /// Count the number of simple (constant) return expressions in the function.
1747 unsigned NumSimpleReturnExprs = 0;
1748
1749 /// The last regular (non-return) debug location (breakpoint) in the function.
1750 SourceLocation LastStopPoint;
1751
1752public:
1753 /// Source location information about the default argument or member
1754 /// initializer expression we're evaluating, if any.
1755 CurrentSourceLocExprScope CurSourceLocExprScope;
1756 using SourceLocExprScopeGuard =
1757 CurrentSourceLocExprScope::SourceLocExprScopeGuard;
1758
1759 /// A scope within which we are constructing the fields of an object which
1760 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1761 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1762 class FieldConstructionScope {
1763 public:
1764 FieldConstructionScope(CodeGenFunction &CGF, Address This)
1765 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1766 CGF.CXXDefaultInitExprThis = This;
1767 }
1768 ~FieldConstructionScope() {
1769 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1770 }
1771
1772 private:
1773 CodeGenFunction &CGF;
1774 Address OldCXXDefaultInitExprThis;
1775 };
1776
1777 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1778 /// is overridden to be the object under construction.
1779 class CXXDefaultInitExprScope {
1780 public:
1781 CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
1782 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1783 OldCXXThisAlignment(CGF.CXXThisAlignment),
1784 SourceLocScope(E, CGF.CurSourceLocExprScope) {
1785 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1786 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1787 }
1788 ~CXXDefaultInitExprScope() {
1789 CGF.CXXThisValue = OldCXXThisValue;
1790 CGF.CXXThisAlignment = OldCXXThisAlignment;
1791 }
1792
1793 public:
1794 CodeGenFunction &CGF;
1795 llvm::Value *OldCXXThisValue;
1796 CharUnits OldCXXThisAlignment;
1797 SourceLocExprScopeGuard SourceLocScope;
1798 };
1799
1800 struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
1801 CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
1802 : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
1803 };
1804
1805 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1806 /// current loop index is overridden.
1807 class ArrayInitLoopExprScope {
1808 public:
1809 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1810 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1811 CGF.ArrayInitIndex = Index;
1812 }
1813 ~ArrayInitLoopExprScope() { CGF.ArrayInitIndex = OldArrayInitIndex; }
1814
1815 private:
1816 CodeGenFunction &CGF;
1817 llvm::Value *OldArrayInitIndex;
1818 };
1819
1820 class InlinedInheritingConstructorScope {
1821 public:
1822 InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
1823 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1824 OldCurCodeDecl(CGF.CurCodeDecl),
1825 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1826 OldCXXABIThisValue(CGF.CXXABIThisValue),
1827 OldCXXThisValue(CGF.CXXThisValue),
1828 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1829 OldCXXThisAlignment(CGF.CXXThisAlignment),
1830 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1831 OldCXXInheritedCtorInitExprArgs(
1832 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1833 CGF.CurGD = GD;
1834 CGF.CurFuncDecl = CGF.CurCodeDecl =
1835 cast<CXXConstructorDecl>(Val: GD.getDecl());
1836 CGF.CXXABIThisDecl = nullptr;
1837 CGF.CXXABIThisValue = nullptr;
1838 CGF.CXXThisValue = nullptr;
1839 CGF.CXXABIThisAlignment = CharUnits();
1840 CGF.CXXThisAlignment = CharUnits();
1841 CGF.ReturnValue = Address::invalid();
1842 CGF.FnRetTy = QualType();
1843 CGF.CXXInheritedCtorInitExprArgs.clear();
1844 }
1845 ~InlinedInheritingConstructorScope() {
1846 CGF.CurGD = OldCurGD;
1847 CGF.CurFuncDecl = OldCurFuncDecl;
1848 CGF.CurCodeDecl = OldCurCodeDecl;
1849 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1850 CGF.CXXABIThisValue = OldCXXABIThisValue;
1851 CGF.CXXThisValue = OldCXXThisValue;
1852 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1853 CGF.CXXThisAlignment = OldCXXThisAlignment;
1854 CGF.ReturnValue = OldReturnValue;
1855 CGF.FnRetTy = OldFnRetTy;
1856 CGF.CXXInheritedCtorInitExprArgs =
1857 std::move(OldCXXInheritedCtorInitExprArgs);
1858 }
1859
1860 private:
1861 CodeGenFunction &CGF;
1862 GlobalDecl OldCurGD;
1863 const Decl *OldCurFuncDecl;
1864 const Decl *OldCurCodeDecl;
1865 ImplicitParamDecl *OldCXXABIThisDecl;
1866 llvm::Value *OldCXXABIThisValue;
1867 llvm::Value *OldCXXThisValue;
1868 CharUnits OldCXXABIThisAlignment;
1869 CharUnits OldCXXThisAlignment;
1870 Address OldReturnValue;
1871 QualType OldFnRetTy;
1872 CallArgList OldCXXInheritedCtorInitExprArgs;
1873 };
1874
1875 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1876 // region body, and finalization codegen callbacks. This will class will also
1877 // contain privatization functions used by the privatization call backs
1878 //
1879 // TODO: this is temporary class for things that are being moved out of
1880 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1881 // utility function for use with the OMPBuilder. Once that move to use the
1882 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1883 // directly, or a new helper class that will contain functions used by both
1884 // this and the OMPBuilder
1885
1886 struct OMPBuilderCBHelpers {
1887
1888 OMPBuilderCBHelpers() = delete;
1889 OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete;
1890 OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete;
1891
1892 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1893
1894 /// Cleanup action for allocate support.
1895 class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
1896
1897 private:
1898 llvm::CallInst *RTLFnCI;
1899
1900 public:
1901 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1902 RLFnCI->removeFromParent();
1903 }
1904
1905 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1906 if (!CGF.HaveInsertPoint())
1907 return;
1908 CGF.Builder.Insert(I: RTLFnCI);
1909 }
1910 };
1911
1912 /// Returns address of the threadprivate variable for the current
1913 /// thread. This Also create any necessary OMP runtime calls.
1914 ///
1915 /// \param VD VarDecl for Threadprivate variable.
1916 /// \param VDAddr Address of the Vardecl
1917 /// \param Loc The location where the barrier directive was encountered
1918 static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
1919 const VarDecl *VD, Address VDAddr,
1920 SourceLocation Loc);
1921
1922 /// Gets the OpenMP-specific address of the local variable /p VD.
1923 static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
1924 const VarDecl *VD);
1925 /// Get the platform-specific name separator.
1926 /// \param Parts different parts of the final name that needs separation
1927 /// \param FirstSeparator First separator used between the initial two
1928 /// parts of the name.
1929 /// \param Separator separator used between all of the rest consecutinve
1930 /// parts of the name
1931 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1932 StringRef FirstSeparator = ".",
1933 StringRef Separator = ".");
1934 /// Emit the Finalization for an OMP region
1935 /// \param CGF The Codegen function this belongs to
1936 /// \param IP Insertion point for generating the finalization code.
1937 static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) {
1938 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1939 assert(IP.getBlock()->end() != IP.getPoint() &&
1940 "OpenMP IR Builder should cause terminated block!");
1941
1942 llvm::BasicBlock *IPBB = IP.getBlock();
1943 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1944 assert(DestBB && "Finalization block should have one successor!");
1945
1946 // erase and replace with cleanup branch.
1947 IPBB->getTerminator()->eraseFromParent();
1948 CGF.Builder.SetInsertPoint(IPBB);
1949 CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(Target: DestBB);
1950 CGF.EmitBranchThroughCleanup(Dest);
1951 }
1952
1953 /// Emit the body of an OMP region
1954 /// \param CGF The Codegen function this belongs to
1955 /// \param RegionBodyStmt The body statement for the OpenMP region being
1956 /// generated
1957 /// \param AllocaIP Where to insert alloca instructions
1958 /// \param CodeGenIP Where to insert the region code
1959 /// \param RegionName Name to be used for new blocks
1960 static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
1961 const Stmt *RegionBodyStmt,
1962 InsertPointTy AllocaIP,
1963 InsertPointTy CodeGenIP,
1964 Twine RegionName);
1965
1966 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1967 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1968 ArrayRef<llvm::Value *> Args) {
1969 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1970 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1971 CodeGenIPBBTI->eraseFromParent();
1972
1973 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1974
1975 if (Fn->doesNotThrow())
1976 CGF.EmitNounwindRuntimeCall(callee: Fn, args: Args);
1977 else
1978 CGF.EmitRuntimeCall(callee: Fn, args: Args);
1979
1980 if (CGF.Builder.saveIP().isSet())
1981 CGF.Builder.CreateBr(Dest: &FiniBB);
1982 }
1983
1984 /// Emit the body of an OMP region that will be outlined in
1985 /// OpenMPIRBuilder::finalize().
1986 /// \param CGF The Codegen function this belongs to
1987 /// \param RegionBodyStmt The body statement for the OpenMP region being
1988 /// generated
1989 /// \param AllocaIP Where to insert alloca instructions
1990 /// \param CodeGenIP Where to insert the region code
1991 /// \param RegionName Name to be used for new blocks
1992 static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
1993 const Stmt *RegionBodyStmt,
1994 InsertPointTy AllocaIP,
1995 InsertPointTy CodeGenIP,
1996 Twine RegionName);
1997
1998 /// RAII for preserving necessary info during Outlined region body codegen.
1999 class OutlinedRegionBodyRAII {
2000
2001 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2002 CodeGenFunction::JumpDest OldReturnBlock;
2003 CodeGenFunction &CGF;
2004
2005 public:
2006 OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
2007 llvm::BasicBlock &RetBB)
2008 : CGF(cgf) {
2009 assert(AllocaIP.isSet() &&
2010 "Must specify Insertion point for allocas of outlined function");
2011 OldAllocaIP = CGF.AllocaInsertPt;
2012 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2013
2014 OldReturnBlock = CGF.ReturnBlock;
2015 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(Target: &RetBB);
2016 }
2017
2018 ~OutlinedRegionBodyRAII() {
2019 CGF.AllocaInsertPt = OldAllocaIP;
2020 CGF.ReturnBlock = OldReturnBlock;
2021 }
2022 };
2023
2024 /// RAII for preserving necessary info during inlined region body codegen.
2025 class InlinedRegionBodyRAII {
2026
2027 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2028 CodeGenFunction &CGF;
2029
2030 public:
2031 InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
2032 llvm::BasicBlock &FiniBB)
2033 : CGF(cgf) {
2034 // Alloca insertion block should be in the entry block of the containing
2035 // function so it expects an empty AllocaIP in which case will reuse the
2036 // old alloca insertion point, or a new AllocaIP in the same block as
2037 // the old one
2038 assert((!AllocaIP.isSet() ||
2039 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2040 "Insertion point should be in the entry block of containing "
2041 "function!");
2042 OldAllocaIP = CGF.AllocaInsertPt;
2043 if (AllocaIP.isSet())
2044 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2045
2046 // TODO: Remove the call, after making sure the counter is not used by
2047 // the EHStack.
2048 // Since this is an inlined region, it should not modify the
2049 // ReturnBlock, and should reuse the one for the enclosing outlined
2050 // region. So, the JumpDest being return by the function is discarded
2051 (void)CGF.getJumpDestInCurrentScope(Target: &FiniBB);
2052 }
2053
2054 ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; }
2055 };
2056 };
2057
2058private:
2059 /// CXXThisDecl - When generating code for a C++ member function,
2060 /// this will hold the implicit 'this' declaration.
2061 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2062 llvm::Value *CXXABIThisValue = nullptr;
2063 llvm::Value *CXXThisValue = nullptr;
2064 CharUnits CXXABIThisAlignment;
2065 CharUnits CXXThisAlignment;
2066
2067 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2068 /// this expression.
2069 Address CXXDefaultInitExprThis = Address::invalid();
2070
2071 /// The current array initialization index when evaluating an
2072 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2073 llvm::Value *ArrayInitIndex = nullptr;
2074
2075 /// The values of function arguments to use when evaluating
2076 /// CXXInheritedCtorInitExprs within this context.
2077 CallArgList CXXInheritedCtorInitExprArgs;
2078
2079 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2080 /// destructor, this will hold the implicit argument (e.g. VTT).
2081 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2082 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2083
2084 /// OutermostConditional - Points to the outermost active
2085 /// conditional control. This is used so that we know if a
2086 /// temporary should be destroyed conditionally.
2087 ConditionalEvaluation *OutermostConditional = nullptr;
2088
2089 /// The current lexical scope.
2090 LexicalScope *CurLexicalScope = nullptr;
2091
2092 /// The current source location that should be used for exception
2093 /// handling code.
2094 SourceLocation CurEHLocation;
2095
2096 /// BlockByrefInfos - For each __block variable, contains
2097 /// information about the layout of the variable.
2098 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2099
2100 /// Used by -fsanitize=nullability-return to determine whether the return
2101 /// value can be checked.
2102 llvm::Value *RetValNullabilityPrecondition = nullptr;
2103
2104 /// Check if -fsanitize=nullability-return instrumentation is required for
2105 /// this function.
2106 bool requiresReturnValueNullabilityCheck() const {
2107 return RetValNullabilityPrecondition;
2108 }
2109
2110 /// Used to store precise source locations for return statements by the
2111 /// runtime return value checks.
2112 Address ReturnLocation = Address::invalid();
2113
2114 /// Check if the return value of this function requires sanitization.
2115 bool requiresReturnValueCheck() const;
2116
2117 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2118 bool hasInAllocaArg(const CXXMethodDecl *MD);
2119
2120 llvm::BasicBlock *TerminateLandingPad = nullptr;
2121 llvm::BasicBlock *TerminateHandler = nullptr;
2122 llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
2123
2124 /// Terminate funclets keyed by parent funclet pad.
2125 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2126
2127 /// Largest vector width used in ths function. Will be used to create a
2128 /// function attribute.
2129 unsigned LargestVectorWidth = 0;
2130
2131 /// True if we need emit the life-time markers. This is initially set in
2132 /// the constructor, but could be overwritten to true if this is a coroutine.
2133 bool ShouldEmitLifetimeMarkers;
2134
2135 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2136 /// the function metadata.
2137 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2138
2139public:
2140 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext = false);
2141 ~CodeGenFunction();
2142
2143 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2144 ASTContext &getContext() const { return CGM.getContext(); }
2145 CGDebugInfo *getDebugInfo() {
2146 if (DisableDebugInfo)
2147 return nullptr;
2148 return DebugInfo;
2149 }
2150 void disableDebugInfo() { DisableDebugInfo = true; }
2151 void enableDebugInfo() { DisableDebugInfo = false; }
2152
2153 bool shouldUseFusedARCCalls() {
2154 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2155 }
2156
2157 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2158
2159 /// Returns a pointer to the function's exception object and selector slot,
2160 /// which is assigned in every landing pad.
2161 Address getExceptionSlot();
2162 Address getEHSelectorSlot();
2163
2164 /// Returns the contents of the function's exception object and selector
2165 /// slots.
2166 llvm::Value *getExceptionFromSlot();
2167 llvm::Value *getSelectorFromSlot();
2168
2169 RawAddress getNormalCleanupDestSlot();
2170
2171 llvm::BasicBlock *getUnreachableBlock() {
2172 if (!UnreachableBlock) {
2173 UnreachableBlock = createBasicBlock(name: "unreachable");
2174 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2175 }
2176 return UnreachableBlock;
2177 }
2178
2179 llvm::BasicBlock *getInvokeDest() {
2180 if (!EHStack.requiresLandingPad())
2181 return nullptr;
2182 return getInvokeDestImpl();
2183 }
2184
2185 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2186
2187 const TargetInfo &getTarget() const { return Target; }
2188 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2189 const TargetCodeGenInfo &getTargetHooks() const {
2190 return CGM.getTargetCodeGenInfo();
2191 }
2192
2193 //===--------------------------------------------------------------------===//
2194 // Cleanups
2195 //===--------------------------------------------------------------------===//
2196
2197 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2198
2199 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2200 Address arrayEndPointer,
2201 QualType elementType,
2202 CharUnits elementAlignment,
2203 Destroyer *destroyer);
2204 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2205 llvm::Value *arrayEnd,
2206 QualType elementType,
2207 CharUnits elementAlignment,
2208 Destroyer *destroyer);
2209
2210 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
2211 QualType type);
2212 void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr,
2213 QualType type);
2214 void pushDestroy(CleanupKind kind, Address addr, QualType type,
2215 Destroyer *destroyer, bool useEHCleanupForArray);
2216 void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
2217 Address addr, QualType type);
2218 void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
2219 QualType type, Destroyer *destroyer,
2220 bool useEHCleanupForArray);
2221 void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
2222 QualType type, Destroyer *destroyer,
2223 bool useEHCleanupForArray);
2224 void pushLifetimeExtendedDestroy(QualType::DestructionKind dtorKind,
2225 Address addr, QualType type);
2226 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2227 llvm::Value *CompletePtr,
2228 QualType ElementType);
2229 void pushStackRestore(CleanupKind kind, Address SPMem);
2230 void pushKmpcAllocFree(CleanupKind Kind,
2231 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2232 void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
2233 bool useEHCleanupForArray);
2234 llvm::Function *generateDestroyHelper(Address addr, QualType type,
2235 Destroyer *destroyer,
2236 bool useEHCleanupForArray,
2237 const VarDecl *VD);
2238 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2239 QualType elementType, CharUnits elementAlign,
2240 Destroyer *destroyer, bool checkZeroLength,
2241 bool useEHCleanup);
2242
2243 Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
2244
2245 /// Determines whether an EH cleanup is required to destroy a type
2246 /// with the given destruction kind.
2247 bool needsEHCleanup(QualType::DestructionKind kind) {
2248 switch (kind) {
2249 case QualType::DK_none:
2250 return false;
2251 case QualType::DK_cxx_destructor:
2252 case QualType::DK_objc_weak_lifetime:
2253 case QualType::DK_nontrivial_c_struct:
2254 return getLangOpts().Exceptions;
2255 case QualType::DK_objc_strong_lifetime:
2256 return getLangOpts().Exceptions &&
2257 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2258 }
2259 llvm_unreachable("bad destruction kind");
2260 }
2261
2262 CleanupKind getCleanupKind(QualType::DestructionKind kind) {
2263 return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
2264 }
2265
2266 //===--------------------------------------------------------------------===//
2267 // Objective-C
2268 //===--------------------------------------------------------------------===//
2269
2270 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
2271
2272 void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
2273
2274 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2275 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
2276 const ObjCPropertyImplDecl *PID);
2277 void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
2278 const ObjCPropertyImplDecl *propImpl,
2279 const ObjCMethodDecl *GetterMothodDecl,
2280 llvm::Constant *AtomicHelperFn);
2281
2282 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
2283 ObjCMethodDecl *MD, bool ctor);
2284
2285 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2286 /// for the given property.
2287 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
2288 const ObjCPropertyImplDecl *PID);
2289 void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
2290 const ObjCPropertyImplDecl *propImpl,
2291 llvm::Constant *AtomicHelperFn);
2292
2293 //===--------------------------------------------------------------------===//
2294 // Block Bits
2295 //===--------------------------------------------------------------------===//
2296
2297 /// Emit block literal.
2298 /// \return an LLVM value which is a pointer to a struct which contains
2299 /// information about the block, including the block invoke function, the
2300 /// captured variables, etc.
2301 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2302
2303 llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info,
2304 const DeclMapTy &ldm,
2305 bool IsLambdaConversionToBlock,
2306 bool BuildGlobalBlock);
2307
2308 /// Check if \p T is a C++ class that has a destructor that can throw.
2309 static bool cxxDestructorCanThrow(QualType T);
2310
2311 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2312 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2313 llvm::Constant *
2314 GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2315 llvm::Constant *
2316 GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID);
2317 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2318
2319 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2320 bool CanThrow);
2321
2322 class AutoVarEmission;
2323
2324 void emitByrefStructureInit(const AutoVarEmission &emission);
2325
2326 /// Enter a cleanup to destroy a __block variable. Note that this
2327 /// cleanup should be a no-op if the variable hasn't left the stack
2328 /// yet; if a cleanup is required for the variable itself, that needs
2329 /// to be done externally.
2330 ///
2331 /// \param Kind Cleanup kind.
2332 ///
2333 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2334 /// structure that will be passed to _Block_object_dispose. When
2335 /// \p LoadBlockVarAddr is true, the address of the field of the block
2336 /// structure that holds the address of the __block structure.
2337 ///
2338 /// \param Flags The flag that will be passed to _Block_object_dispose.
2339 ///
2340 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2341 /// \p Addr to get the address of the __block structure.
2342 void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
2343 bool LoadBlockVarAddr, bool CanThrow);
2344
2345 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2346 llvm::Value *ptr);
2347
2348 Address LoadBlockStruct();
2349 Address GetAddrOfBlockDecl(const VarDecl *var);
2350
2351 /// BuildBlockByrefAddress - Computes the location of the
2352 /// data in a variable which is declared as __block.
2353 Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
2354 bool followForward = true);
2355 Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info,
2356 bool followForward, const llvm::Twine &name);
2357
2358 const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
2359
2360 QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
2361
2362 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2363 const CGFunctionInfo &FnInfo);
2364
2365 /// Annotate the function with an attribute that disables TSan checking at
2366 /// runtime.
2367 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2368
2369 /// Emit code for the start of a function.
2370 /// \param Loc The location to be associated with the function.
2371 /// \param StartLoc The location of the function body.
2372 void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn,
2373 const CGFunctionInfo &FnInfo, const FunctionArgList &Args,
2374 SourceLocation Loc = SourceLocation(),
2375 SourceLocation StartLoc = SourceLocation());
2376
2377 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
2378
2379 void EmitConstructorBody(FunctionArgList &Args);
2380 void EmitDestructorBody(FunctionArgList &Args);
2381 void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
2382 void EmitFunctionBody(const Stmt *Body);
2383 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2384
2385 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2386 CallArgList &CallArgs,
2387 const CGFunctionInfo *CallOpFnInfo = nullptr,
2388 llvm::Constant *CallOpFn = nullptr);
2389 void EmitLambdaBlockInvokeBody();
2390 void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
2391 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
2392 CallArgList &CallArgs);
2393 void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
2394 const CGFunctionInfo **ImplFnInfo,
2395 llvm::Function **ImplFn);
2396 void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
2397 void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
2398 EmitStoreThroughLValue(Src: RValue::get(V: VLASizeMap[VAT->getSizeExpr()]), Dst: LV);
2399 }
2400 void EmitAsanPrologueOrEpilogue(bool Prologue);
2401
2402 /// Emit the unified return block, trying to avoid its emission when
2403 /// possible.
2404 /// \return The debug location of the user written return statement if the
2405 /// return block is avoided.
2406 llvm::DebugLoc EmitReturnBlock();
2407
2408 /// FinishFunction - Complete IR generation of the current function. It is
2409 /// legal to call this function even if there is no current insertion point.
2410 void FinishFunction(SourceLocation EndLoc = SourceLocation());
2411
2412 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2413 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2414
2415 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2416 const ThunkInfo *Thunk, bool IsUnprototyped);
2417
2418 void FinishThunk();
2419
2420 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2421 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2422 llvm::FunctionCallee Callee);
2423
2424 /// Generate a thunk for the given method.
2425 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2426 GlobalDecl GD, const ThunkInfo &Thunk,
2427 bool IsUnprototyped);
2428
2429 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2430 const CGFunctionInfo &FnInfo,
2431 GlobalDecl GD, const ThunkInfo &Thunk);
2432
2433 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
2434 FunctionArgList &Args);
2435
2436 void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
2437
2438 /// Struct with all information about dynamic [sub]class needed to set vptr.
2439 struct VPtr {
2440 BaseSubobject Base;
2441 const CXXRecordDecl *NearestVBase;
2442 CharUnits OffsetFromNearestVBase;
2443 const CXXRecordDecl *VTableClass;
2444 };
2445
2446 /// Initialize the vtable pointer of the given subobject.
2447 void InitializeVTablePointer(const VPtr &vptr);
2448
2449 typedef llvm::SmallVector<VPtr, 4> VPtrsVector;
2450
2451 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
2452 VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
2453
2454 void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
2455 CharUnits OffsetFromNearestVBase,
2456 bool BaseIsNonVirtualPrimaryBase,
2457 const CXXRecordDecl *VTableClass,
2458 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2459
2460 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
2461
2462 // VTableTrapMode - whether we guarantee that loading the
2463 // vtable is guaranteed to trap on authentication failure,
2464 // even if the resulting vtable pointer is unused.
2465 enum class VTableAuthMode {
2466 Authenticate,
2467 MustTrap,
2468 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2469 };
2470 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2471 /// to by This.
2472 llvm::Value *
2473 GetVTablePtr(Address This, llvm::Type *VTableTy,
2474 const CXXRecordDecl *VTableClass,
2475 VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
2476
2477 enum CFITypeCheckKind {
2478 CFITCK_VCall,
2479 CFITCK_NVCall,
2480 CFITCK_DerivedCast,
2481 CFITCK_UnrelatedCast,
2482 CFITCK_ICall,
2483 CFITCK_NVMFCall,
2484 CFITCK_VMFCall,
2485 };
2486
2487 /// Derived is the presumed address of an object of type T after a
2488 /// cast. If T is a polymorphic class type, emit a check that the virtual
2489 /// table for Derived belongs to a class derived from T.
2490 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2491 CFITypeCheckKind TCK, SourceLocation Loc);
2492
2493 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2494 /// If vptr CFI is enabled, emit a check that VTable is valid.
2495 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2496 CFITypeCheckKind TCK, SourceLocation Loc);
2497
2498 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2499 /// RD using llvm.type.test.
2500 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2501 CFITypeCheckKind TCK, SourceLocation Loc);
2502
2503 /// If whole-program virtual table optimization is enabled, emit an assumption
2504 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2505 /// enabled, emit a check that VTable is a member of RD's type identifier.
2506 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
2507 llvm::Value *VTable, SourceLocation Loc);
2508
2509 /// Returns whether we should perform a type checked load when loading a
2510 /// virtual function for virtual calls to members of RD. This is generally
2511 /// true when both vcall CFI and whole-program-vtables are enabled.
2512 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
2513
2514 /// Emit a type checked load from the given vtable.
2515 llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
2516 llvm::Value *VTable,
2517 llvm::Type *VTableTy,
2518 uint64_t VTableByteOffset);
2519
2520 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2521 /// given phase of destruction for a destructor. The end result
2522 /// should call destructors on members and base classes in reverse
2523 /// order of their construction.
2524 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
2525
2526 /// ShouldInstrumentFunction - Return true if the current function should be
2527 /// instrumented with __cyg_profile_func_* calls
2528 bool ShouldInstrumentFunction();
2529
2530 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2531 /// should not be instrumented with sanitizers.
2532 bool ShouldSkipSanitizerInstrumentation();
2533
2534 /// ShouldXRayInstrument - Return true if the current function should be
2535 /// instrumented with XRay nop sleds.
2536 bool ShouldXRayInstrumentFunction() const;
2537
2538 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2539 /// XRay custom event handling calls.
2540 bool AlwaysEmitXRayCustomEvents() const;
2541
2542 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2543 /// XRay typed event handling calls.
2544 bool AlwaysEmitXRayTypedEvents() const;
2545
2546 /// Return a type hash constant for a function instrumented by
2547 /// -fsanitize=function.
2548 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2549
2550 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2551 /// arguments for the given function. This is also responsible for naming the
2552 /// LLVM function arguments.
2553 void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn,
2554 const FunctionArgList &Args);
2555
2556 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2557 /// given temporary. Specify the source location atom group (Key Instructions
2558 /// debug info feature) for the `ret` using \p RetKeyInstructionsSourceAtom.
2559 /// If it's 0, the `ret` will get added to a new source atom group.
2560 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2561 SourceLocation EndLoc,
2562 uint64_t RetKeyInstructionsSourceAtom);
2563
2564 /// Emit a test that checks if the return value \p RV is nonnull.
2565 void EmitReturnValueCheck(llvm::Value *RV);
2566
2567 /// EmitStartEHSpec - Emit the start of the exception spec.
2568 void EmitStartEHSpec(const Decl *D);
2569
2570 /// EmitEndEHSpec - Emit the end of the exception spec.
2571 void EmitEndEHSpec(const Decl *D);
2572
2573 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2574 llvm::BasicBlock *getTerminateLandingPad();
2575
2576 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2577 /// terminate.
2578 llvm::BasicBlock *getTerminateFunclet();
2579
2580 /// getTerminateHandler - Return a handler (not a landing pad, just
2581 /// a catch handler) that just calls terminate. This is used when
2582 /// a terminate scope encloses a try.
2583 llvm::BasicBlock *getTerminateHandler();
2584
2585 llvm::Type *ConvertTypeForMem(QualType T);
2586 llvm::Type *ConvertType(QualType T);
2587 llvm::Type *convertTypeForLoadStore(QualType ASTTy,
2588 llvm::Type *LLVMTy = nullptr);
2589 llvm::Type *ConvertType(const TypeDecl *T) {
2590 return ConvertType(T: getContext().getTypeDeclType(Decl: T));
2591 }
2592
2593 /// LoadObjCSelf - Load the value of self. This function is only valid while
2594 /// generating code for an Objective-C method.
2595 llvm::Value *LoadObjCSelf();
2596
2597 /// TypeOfSelfObject - Return type of object that this self represents.
2598 QualType TypeOfSelfObject();
2599
2600 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2601 static TypeEvaluationKind getEvaluationKind(QualType T);
2602
2603 static bool hasScalarEvaluationKind(QualType T) {
2604 return getEvaluationKind(T) == TEK_Scalar;
2605 }
2606
2607 static bool hasAggregateEvaluationKind(QualType T) {
2608 return getEvaluationKind(T) == TEK_Aggregate;
2609 }
2610
2611 /// createBasicBlock - Create an LLVM basic block.
2612 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2613 llvm::Function *parent = nullptr,
2614 llvm::BasicBlock *before = nullptr) {
2615 return llvm::BasicBlock::Create(Context&: getLLVMContext(), Name: name, Parent: parent, InsertBefore: before);
2616 }
2617
2618 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2619 /// label maps to.
2620 JumpDest getJumpDestForLabel(const LabelDecl *S);
2621
2622 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2623 /// another basic block, simplify it. This assumes that no other code could
2624 /// potentially reference the basic block.
2625 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2626
2627 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2628 /// adding a fall-through branch from the current insert block if
2629 /// necessary. It is legal to call this function even if there is no current
2630 /// insertion point.
2631 ///
2632 /// IsFinished - If true, indicates that the caller has finished emitting
2633 /// branches to the given block and does not expect to emit code into it. This
2634 /// means the block can be ignored if it is unreachable.
2635 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished = false);
2636
2637 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2638 /// near its uses, and leave the insertion point in it.
2639 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2640
2641 /// EmitBranch - Emit a branch to the specified basic block from the current
2642 /// insert block, taking care to avoid creation of branches from dummy
2643 /// blocks. It is legal to call this function even if there is no current
2644 /// insertion point.
2645 ///
2646 /// This function clears the current insertion point. The caller should follow
2647 /// calls to this function with calls to Emit*Block prior to generation new
2648 /// code.
2649 void EmitBranch(llvm::BasicBlock *Block);
2650
2651 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2652 /// indicates that the current code being emitted is unreachable.
2653 bool HaveInsertPoint() const { return Builder.GetInsertBlock() != nullptr; }
2654
2655 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2656 /// emitted IR has a place to go. Note that by definition, if this function
2657 /// creates a block then that block is unreachable; callers may do better to
2658 /// detect when no insertion point is defined and simply skip IR generation.
2659 void EnsureInsertPoint() {
2660 if (!HaveInsertPoint())
2661 EmitBlock(BB: createBasicBlock());
2662 }
2663
2664 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2665 /// specified stmt yet.
2666 void ErrorUnsupported(const Stmt *S, const char *Type);
2667
2668 //===--------------------------------------------------------------------===//
2669 // Helpers
2670 //===--------------------------------------------------------------------===//
2671
2672 Address mergeAddressesInConditionalExpr(Address LHS, Address RHS,
2673 llvm::BasicBlock *LHSBlock,
2674 llvm::BasicBlock *RHSBlock,
2675 llvm::BasicBlock *MergeBlock,
2676 QualType MergedType) {
2677 Builder.SetInsertPoint(MergeBlock);
2678 llvm::PHINode *PtrPhi = Builder.CreatePHI(Ty: LHS.getType(), NumReservedValues: 2, Name: "cond");
2679 PtrPhi->addIncoming(V: LHS.getBasePointer(), BB: LHSBlock);
2680 PtrPhi->addIncoming(V: RHS.getBasePointer(), BB: RHSBlock);
2681 LHS.replaceBasePointer(P: PtrPhi);
2682 LHS.setAlignment(std::min(a: LHS.getAlignment(), b: RHS.getAlignment()));
2683 return LHS;
2684 }
2685
2686 /// Construct an address with the natural alignment of T. If a pointer to T
2687 /// is expected to be signed, the pointer passed to this function must have
2688 /// been signed, and the returned Address will have the pointer authentication
2689 /// information needed to authenticate the signed pointer.
2690 Address makeNaturalAddressForPointer(
2691 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2692 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2693 TBAAAccessInfo *TBAAInfo = nullptr,
2694 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2695 if (Alignment.isZero())
2696 Alignment =
2697 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, forPointeeType: ForPointeeType);
2698 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2699 CGM.getPointerAuthInfoForPointeeType(type: T), /*Offset=*/nullptr,
2700 IsKnownNonNull);
2701 }
2702
2703 LValue MakeAddrLValue(Address Addr, QualType T,
2704 AlignmentSource Source = AlignmentSource::Type) {
2705 return MakeAddrLValue(Addr, T, BaseInfo: LValueBaseInfo(Source),
2706 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2707 }
2708
2709 LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo,
2710 TBAAAccessInfo TBAAInfo) {
2711 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo, TBAAInfo);
2712 }
2713
2714 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2715 AlignmentSource Source = AlignmentSource::Type) {
2716 return MakeAddrLValue(Addr: makeNaturalAddressForPointer(Ptr: V, T, Alignment), T,
2717 BaseInfo: LValueBaseInfo(Source), TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2718 }
2719
2720 /// Same as MakeAddrLValue above except that the pointer is known to be
2721 /// unsigned.
2722 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2723 AlignmentSource Source = AlignmentSource::Type) {
2724 Address Addr(V, ConvertTypeForMem(T), Alignment);
2725 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo: LValueBaseInfo(Source),
2726 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: T));
2727 }
2728
2729 LValue
2730 MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
2731 AlignmentSource Source = AlignmentSource::Type) {
2732 return LValue::MakeAddr(Addr, type: T, Context&: getContext(), BaseInfo: LValueBaseInfo(Source),
2733 TBAAInfo: TBAAAccessInfo());
2734 }
2735
2736 /// Given a value of type T* that may not be to a complete object, construct
2737 /// an l-value with the natural pointee alignment of T.
2738 LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2739
2740 LValue
2741 MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
2742 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2743
2744 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2745 /// to be unsigned.
2746 LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
2747
2748 LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
2749
2750 Address EmitLoadOfReference(LValue RefLVal,
2751 LValueBaseInfo *PointeeBaseInfo = nullptr,
2752 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2753 LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2754 LValue
2755 EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy,
2756 AlignmentSource Source = AlignmentSource::Type) {
2757 LValue RefLVal = MakeAddrLValue(Addr: RefAddr, T: RefTy, BaseInfo: LValueBaseInfo(Source),
2758 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: RefTy));
2759 return EmitLoadOfReferenceLValue(RefLVal);
2760 }
2761
2762 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2763 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2764 /// it is loaded from.
2765 Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2766 LValueBaseInfo *BaseInfo = nullptr,
2767 TBAAAccessInfo *TBAAInfo = nullptr);
2768 LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2769
2770private:
2771 struct AllocaTracker {
2772 void Add(llvm::AllocaInst *I) { Allocas.push_back(Elt: I); }
2773 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2774
2775 private:
2776 llvm::SmallVector<llvm::AllocaInst *> Allocas;
2777 };
2778 AllocaTracker *Allocas = nullptr;
2779
2780 /// CGDecl helper.
2781 void emitStoresForConstant(const VarDecl &D, Address Loc, bool isVolatile,
2782 llvm::Constant *constant, bool IsAutoInit);
2783 /// CGDecl helper.
2784 void emitStoresForZeroInit(const VarDecl &D, Address Loc, bool isVolatile);
2785 /// CGDecl helper.
2786 void emitStoresForPatternInit(const VarDecl &D, Address Loc, bool isVolatile);
2787 /// CGDecl helper.
2788 void emitStoresForInitAfterBZero(llvm::Constant *Init, Address Loc,
2789 bool isVolatile, bool IsAutoInit);
2790
2791public:
2792 // Captures all the allocas created during the scope of its RAII object.
2793 struct AllocaTrackerRAII {
2794 AllocaTrackerRAII(CodeGenFunction &CGF)
2795 : CGF(CGF), OldTracker(CGF.Allocas) {
2796 CGF.Allocas = &Tracker;
2797 }
2798 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2799
2800 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2801
2802 private:
2803 CodeGenFunction &CGF;
2804 AllocaTracker *OldTracker;
2805 AllocaTracker Tracker;
2806 };
2807
2808 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2809 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2810 /// insertion point of the builder. The caller is responsible for setting an
2811 /// appropriate alignment on
2812 /// the alloca.
2813 ///
2814 /// \p ArraySize is the number of array elements to be allocated if it
2815 /// is not nullptr.
2816 ///
2817 /// LangAS::Default is the address space of pointers to local variables and
2818 /// temporaries, as exposed in the source language. In certain
2819 /// configurations, this is not the same as the alloca address space, and a
2820 /// cast is needed to lift the pointer from the alloca AS into
2821 /// LangAS::Default. This can happen when the target uses a restricted
2822 /// address space for the stack but the source language requires
2823 /// LangAS::Default to be a generic address space. The latter condition is
2824 /// common for most programming languages; OpenCL is an exception in that
2825 /// LangAS::Default is the private address space, which naturally maps
2826 /// to the stack.
2827 ///
2828 /// Because the address of a temporary is often exposed to the program in
2829 /// various ways, this function will perform the cast. The original alloca
2830 /// instruction is returned through \p Alloca if it is not nullptr.
2831 ///
2832 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2833 /// more efficient if the caller knows that the address will not be exposed.
2834 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2835 llvm::Value *ArraySize = nullptr);
2836
2837 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2838 /// block. The alloca is casted to the address space of \p UseAddrSpace if
2839 /// necessary.
2840 RawAddress CreateTempAlloca(llvm::Type *Ty, LangAS UseAddrSpace,
2841 CharUnits align, const Twine &Name = "tmp",
2842 llvm::Value *ArraySize = nullptr,
2843 RawAddress *Alloca = nullptr);
2844
2845 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
2846 /// block. The alloca is casted to default address space if necessary.
2847 ///
2848 /// FIXME: This version should be removed, and context should provide the
2849 /// context use address space used instead of default.
2850 RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2851 const Twine &Name = "tmp",
2852 llvm::Value *ArraySize = nullptr,
2853 RawAddress *Alloca = nullptr) {
2854 return CreateTempAlloca(Ty, UseAddrSpace: LangAS::Default, align, Name, ArraySize,
2855 Alloca);
2856 }
2857
2858 RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2859 const Twine &Name = "tmp",
2860 llvm::Value *ArraySize = nullptr);
2861
2862 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2863 /// default ABI alignment of the given LLVM type.
2864 ///
2865 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2866 /// any given AST type that happens to have been lowered to the
2867 /// given IR type. This should only ever be used for function-local,
2868 /// IR-driven manipulations like saving and restoring a value. Do
2869 /// not hand this address off to arbitrary IRGen routines, and especially
2870 /// do not pass it as an argument to a function that might expect a
2871 /// properly ABI-aligned value.
2872 RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2873 const Twine &Name = "tmp");
2874
2875 /// CreateIRTemp - Create a temporary IR object of the given type, with
2876 /// appropriate alignment. This routine should only be used when an temporary
2877 /// value needs to be stored into an alloca (for example, to avoid explicit
2878 /// PHI construction), but the type is the IR type, not the type appropriate
2879 /// for storing in memory.
2880 ///
2881 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2882 /// ConvertType instead of ConvertTypeForMem.
2883 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2884
2885 /// CreateMemTemp - Create a temporary memory object of the given type, with
2886 /// appropriate alignmen and cast it to the default address space. Returns
2887 /// the original alloca instruction by \p Alloca if it is not nullptr.
2888 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2889 RawAddress *Alloca = nullptr);
2890 RawAddress CreateMemTemp(QualType T, CharUnits Align,
2891 const Twine &Name = "tmp",
2892 RawAddress *Alloca = nullptr);
2893
2894 /// CreateMemTemp - Create a temporary memory object of the given type, with
2895 /// appropriate alignmen without casting it to the default address space.
2896 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2897 RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
2898 const Twine &Name = "tmp");
2899
2900 /// CreateAggTemp - Create a temporary memory object for the given
2901 /// aggregate type.
2902 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2903 RawAddress *Alloca = nullptr) {
2904 return AggValueSlot::forAddr(
2905 addr: CreateMemTemp(T, Name, Alloca), quals: T.getQualifiers(),
2906 isDestructed: AggValueSlot::IsNotDestructed, needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2907 isAliased: AggValueSlot::IsNotAliased, mayOverlap: AggValueSlot::DoesNotOverlap);
2908 }
2909
2910 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2911 /// expression and compare the result against zero, returning an Int1Ty value.
2912 llvm::Value *EvaluateExprAsBool(const Expr *E);
2913
2914 /// Retrieve the implicit cast expression of the rhs in a binary operator
2915 /// expression by passing pointers to Value and QualType
2916 /// This is used for implicit bitfield conversion checks, which
2917 /// must compare with the value before potential truncation.
2918 llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
2919 llvm::Value **Previous,
2920 QualType *SrcType);
2921
2922 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2923 /// so we use the value after conversion.
2924 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2925 llvm::Value *Dst, QualType DstType,
2926 const CGBitFieldInfo &Info,
2927 SourceLocation Loc);
2928
2929 /// EmitIgnoredExpr - Emit an expression in a context which ignores the
2930 /// result.
2931 void EmitIgnoredExpr(const Expr *E);
2932
2933 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2934 /// any type. The result is returned as an RValue struct. If this is an
2935 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2936 /// the result should be returned.
2937 ///
2938 /// \param ignoreResult True if the resulting value isn't used.
2939 RValue EmitAnyExpr(const Expr *E,
2940 AggValueSlot aggSlot = AggValueSlot::ignored(),
2941 bool ignoreResult = false);
2942
2943 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2944 // or the value of the expression, depending on how va_list is defined.
2945 Address EmitVAListRef(const Expr *E);
2946
2947 /// Emit a "reference" to a __builtin_ms_va_list; this is
2948 /// always the value of the expression, because a __builtin_ms_va_list is a
2949 /// pointer to a char.
2950 Address EmitMSVAListRef(const Expr *E);
2951
2952 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2953 /// always be accessible even if no aggregate location is provided.
2954 RValue EmitAnyExprToTemp(const Expr *E);
2955
2956 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2957 /// arbitrary expression into the given memory location.
2958 void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals,
2959 bool IsInitializer);
2960
2961 void EmitAnyExprToExn(const Expr *E, Address Addr);
2962
2963 /// EmitInitializationToLValue - Emit an initializer to an LValue.
2964 void EmitInitializationToLValue(
2965 const Expr *E, LValue LV,
2966 AggValueSlot::IsZeroed_t IsZeroed = AggValueSlot::IsNotZeroed);
2967
2968 /// EmitExprAsInit - Emits the code necessary to initialize a
2969 /// location in memory with the given initializer.
2970 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2971 bool capturedByInit);
2972
2973 /// hasVolatileMember - returns true if aggregate type has a volatile
2974 /// member.
2975 bool hasVolatileMember(QualType T) {
2976 if (const RecordType *RT = T->getAs<RecordType>()) {
2977 const RecordDecl *RD = cast<RecordDecl>(Val: RT->getDecl());
2978 return RD->hasVolatileMember();
2979 }
2980 return false;
2981 }
2982
2983 /// Determine whether a return value slot may overlap some other object.
2984 AggValueSlot::Overlap_t getOverlapForReturnValue() {
2985 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2986 // class subobjects. These cases may need to be revisited depending on the
2987 // resolution of the relevant core issue.
2988 return AggValueSlot::DoesNotOverlap;
2989 }
2990
2991 /// Determine whether a field initialization may overlap some other object.
2992 AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD);
2993
2994 /// Determine whether a base class initialization may overlap some other
2995 /// object.
2996 AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD,
2997 const CXXRecordDecl *BaseRD,
2998 bool IsVirtual);
2999
3000 /// Emit an aggregate assignment.
3001 void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
3002 ApplyAtomGroup Grp(getDebugInfo());
3003 bool IsVolatile = hasVolatileMember(T: EltTy);
3004 EmitAggregateCopy(Dest, Src, EltTy, MayOverlap: AggValueSlot::MayOverlap, isVolatile: IsVolatile);
3005 }
3006
3007 void EmitAggregateCopyCtor(LValue Dest, LValue Src,
3008 AggValueSlot::Overlap_t MayOverlap) {
3009 EmitAggregateCopy(Dest, Src, EltTy: Src.getType(), MayOverlap);
3010 }
3011
3012 /// EmitAggregateCopy - Emit an aggregate copy.
3013 ///
3014 /// \param isVolatile \c true iff either the source or the destination is
3015 /// volatile.
3016 /// \param MayOverlap Whether the tail padding of the destination might be
3017 /// occupied by some other object. More efficient code can often be
3018 /// generated if not.
3019 void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
3020 AggValueSlot::Overlap_t MayOverlap,
3021 bool isVolatile = false);
3022
3023 /// GetAddrOfLocalVar - Return the address of a local variable.
3024 Address GetAddrOfLocalVar(const VarDecl *VD) {
3025 auto it = LocalDeclMap.find(Val: VD);
3026 assert(it != LocalDeclMap.end() &&
3027 "Invalid argument to GetAddrOfLocalVar(), no decl!");
3028 return it->second;
3029 }
3030
3031 /// Given an opaque value expression, return its LValue mapping if it exists,
3032 /// otherwise create one.
3033 LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
3034
3035 /// Given an opaque value expression, return its RValue mapping if it exists,
3036 /// otherwise create one.
3037 RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
3038
3039 /// isOpaqueValueEmitted - Return true if the opaque value expression has
3040 /// already been emitted.
3041 bool isOpaqueValueEmitted(const OpaqueValueExpr *E);
3042
3043 /// Get the index of the current ArrayInitLoopExpr, if any.
3044 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3045
3046 /// getAccessedFieldNo - Given an encoded value and a result number, return
3047 /// the input field number being accessed.
3048 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3049
3050 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3051 llvm::BasicBlock *GetIndirectGotoBlock();
3052
3053 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3054 static bool IsWrappedCXXThis(const Expr *E);
3055
3056 /// EmitNullInitialization - Generate code to set a value of the given type to
3057 /// null, If the type contains data member pointers, they will be initialized
3058 /// to -1 in accordance with the Itanium C++ ABI.
3059 void EmitNullInitialization(Address DestPtr, QualType Ty);
3060
3061 /// Emits a call to an LLVM variable-argument intrinsic, either
3062 /// \c llvm.va_start or \c llvm.va_end.
3063 /// \param ArgValue A reference to the \c va_list as emitted by either
3064 /// \c EmitVAListRef or \c EmitMSVAListRef.
3065 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3066 /// calls \c llvm.va_end.
3067 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3068
3069 /// Generate code to get an argument from the passed in pointer
3070 /// and update it accordingly.
3071 /// \param VE The \c VAArgExpr for which to generate code.
3072 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3073 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3074 /// \returns A pointer to the argument.
3075 // FIXME: We should be able to get rid of this method and use the va_arg
3076 // instruction in LLVM instead once it works well enough.
3077 RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
3078 AggValueSlot Slot = AggValueSlot::ignored());
3079
3080 /// emitArrayLength - Compute the length of an array, even if it's a
3081 /// VLA, and drill down to the base element type.
3082 llvm::Value *emitArrayLength(const ArrayType *arrayType, QualType &baseType,
3083 Address &addr);
3084
3085 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3086 /// the given variably-modified type and store them in the VLASizeMap.
3087 ///
3088 /// This function can be called with a null (unreachable) insert point.
3089 void EmitVariablyModifiedType(QualType Ty);
3090
3091 struct VlaSizePair {
3092 llvm::Value *NumElts;
3093 QualType Type;
3094
3095 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3096 };
3097
3098 /// Return the number of elements for a single dimension
3099 /// for the given array type.
3100 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
3101 VlaSizePair getVLAElements1D(QualType vla);
3102
3103 /// Returns an LLVM value that corresponds to the size,
3104 /// in non-variably-sized elements, of a variable length array type,
3105 /// plus that largest non-variably-sized element type. Assumes that
3106 /// the type has already been emitted with EmitVariablyModifiedType.
3107 VlaSizePair getVLASize(const VariableArrayType *vla);
3108 VlaSizePair getVLASize(QualType vla);
3109
3110 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3111 /// generating code for an C++ member function.
3112 llvm::Value *LoadCXXThis() {
3113 assert(CXXThisValue && "no 'this' value for this function");
3114 return CXXThisValue;
3115 }
3116 Address LoadCXXThisAddress();
3117
3118 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3119 /// virtual bases.
3120 // FIXME: Every place that calls LoadCXXVTT is something
3121 // that needs to be abstracted properly.
3122 llvm::Value *LoadCXXVTT() {
3123 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3124 return CXXStructorImplicitParamValue;
3125 }
3126
3127 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3128 /// complete class to the given direct base.
3129 Address GetAddressOfDirectBaseInCompleteClass(Address Value,
3130 const CXXRecordDecl *Derived,
3131 const CXXRecordDecl *Base,
3132 bool BaseIsVirtual);
3133
3134 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3135
3136 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3137 /// load of 'this' and returns address of the base class.
3138 Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived,
3139 CastExpr::path_const_iterator PathBegin,
3140 CastExpr::path_const_iterator PathEnd,
3141 bool NullCheckValue, SourceLocation Loc);
3142
3143 Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived,
3144 CastExpr::path_const_iterator PathBegin,
3145 CastExpr::path_const_iterator PathEnd,
3146 bool NullCheckValue);
3147
3148 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3149 /// base constructor/destructor with virtual bases.
3150 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3151 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3152 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3153 bool Delegating);
3154
3155 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
3156 CXXCtorType CtorType,
3157 const FunctionArgList &Args,
3158 SourceLocation Loc);
3159 // It's important not to confuse this and the previous function. Delegating
3160 // constructors are the C++0x feature. The constructor delegate optimization
3161 // is used to reduce duplication in the base and complete consturctors where
3162 // they are substantially the same.
3163 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3164 const FunctionArgList &Args);
3165
3166 /// Emit a call to an inheriting constructor (that is, one that invokes a
3167 /// constructor inherited from a base class) by inlining its definition. This
3168 /// is necessary if the ABI does not support forwarding the arguments to the
3169 /// base class constructor (because they're variadic or similar).
3170 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
3171 CXXCtorType CtorType,
3172 bool ForVirtualBase,
3173 bool Delegating,
3174 CallArgList &Args);
3175
3176 /// Emit a call to a constructor inherited from a base class, passing the
3177 /// current constructor's arguments along unmodified (without even making
3178 /// a copy).
3179 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
3180 bool ForVirtualBase, Address This,
3181 bool InheritedFromVBase,
3182 const CXXInheritedCtorInitExpr *E);
3183
3184 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3185 bool ForVirtualBase, bool Delegating,
3186 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3187
3188 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
3189 bool ForVirtualBase, bool Delegating,
3190 Address This, CallArgList &Args,
3191 AggValueSlot::Overlap_t Overlap,
3192 SourceLocation Loc, bool NewPointerIsChecked,
3193 llvm::CallBase **CallOrInvoke = nullptr);
3194
3195 /// Emit assumption load for all bases. Requires to be called only on
3196 /// most-derived class and not under construction of the object.
3197 void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
3198
3199 /// Emit assumption that vptr load == global vtable.
3200 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3201
3202 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This,
3203 Address Src, const CXXConstructExpr *E);
3204
3205 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3206 const ArrayType *ArrayTy, Address ArrayPtr,
3207 const CXXConstructExpr *E,
3208 bool NewPointerIsChecked,
3209 bool ZeroInitialization = false);
3210
3211 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
3212 llvm::Value *NumElements, Address ArrayPtr,
3213 const CXXConstructExpr *E,
3214 bool NewPointerIsChecked,
3215 bool ZeroInitialization = false);
3216
3217 static Destroyer destroyCXXObject;
3218
3219 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
3220 bool ForVirtualBase, bool Delegating, Address This,
3221 QualType ThisTy);
3222
3223 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
3224 llvm::Type *ElementTy, Address NewPtr,
3225 llvm::Value *NumElements,
3226 llvm::Value *AllocSizeWithoutCookie);
3227
3228 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3229 Address Ptr);
3230
3231 void EmitSehCppScopeBegin();
3232 void EmitSehCppScopeEnd();
3233 void EmitSehTryScopeBegin();
3234 void EmitSehTryScopeEnd();
3235
3236 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3237 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3238
3239 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3240 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
3241
3242 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3243 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3244 CharUnits CookieSize = CharUnits());
3245
3246 RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
3247 const CallExpr *TheCallExpr, bool IsDelete);
3248
3249 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3250 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3251 Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
3252
3253 /// Situations in which we might emit a check for the suitability of a
3254 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3255 /// compiler-rt.
3256 enum TypeCheckKind {
3257 /// Checking the operand of a load. Must be suitably sized and aligned.
3258 TCK_Load,
3259 /// Checking the destination of a store. Must be suitably sized and aligned.
3260 TCK_Store,
3261 /// Checking the bound value in a reference binding. Must be suitably sized
3262 /// and aligned, but is not required to refer to an object (until the
3263 /// reference is used), per core issue 453.
3264 TCK_ReferenceBinding,
3265 /// Checking the object expression in a non-static data member access. Must
3266 /// be an object within its lifetime.
3267 TCK_MemberAccess,
3268 /// Checking the 'this' pointer for a call to a non-static member function.
3269 /// Must be an object within its lifetime.
3270 TCK_MemberCall,
3271 /// Checking the 'this' pointer for a constructor call.
3272 TCK_ConstructorCall,
3273 /// Checking the operand of a static_cast to a derived pointer type. Must be
3274 /// null or an object within its lifetime.
3275 TCK_DowncastPointer,
3276 /// Checking the operand of a static_cast to a derived reference type. Must
3277 /// be an object within its lifetime.
3278 TCK_DowncastReference,
3279 /// Checking the operand of a cast to a base object. Must be suitably sized
3280 /// and aligned.
3281 TCK_Upcast,
3282 /// Checking the operand of a cast to a virtual base object. Must be an
3283 /// object within its lifetime.
3284 TCK_UpcastToVirtualBase,
3285 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3286 TCK_NonnullAssign,
3287 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3288 /// null or an object within its lifetime.
3289 TCK_DynamicOperation
3290 };
3291
3292 /// Determine whether the pointer type check \p TCK permits null pointers.
3293 static bool isNullPointerAllowed(TypeCheckKind TCK);
3294
3295 /// Determine whether the pointer type check \p TCK requires a vptr check.
3296 static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
3297
3298 /// Whether any type-checking sanitizers are enabled. If \c false,
3299 /// calls to EmitTypeCheck can be skipped.
3300 bool sanitizePerformTypeCheck() const;
3301
3302 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV,
3303 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3304 llvm::Value *ArraySize = nullptr) {
3305 if (!sanitizePerformTypeCheck())
3306 return;
3307 EmitTypeCheck(TCK, Loc, V: LV.emitRawPointer(CGF&: *this), Type, Alignment: LV.getAlignment(),
3308 SkippedChecks, ArraySize);
3309 }
3310
3311 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr,
3312 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3313 SanitizerSet SkippedChecks = SanitizerSet(),
3314 llvm::Value *ArraySize = nullptr) {
3315 if (!sanitizePerformTypeCheck())
3316 return;
3317 EmitTypeCheck(TCK, Loc, V: Addr.emitRawPointer(CGF&: *this), Type, Alignment,
3318 SkippedChecks, ArraySize);
3319 }
3320
3321 /// Emit a check that \p V is the address of storage of the
3322 /// appropriate size and alignment for an object of type \p Type
3323 /// (or if ArraySize is provided, for an array of that bound).
3324 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
3325 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3326 SanitizerSet SkippedChecks = SanitizerSet(),
3327 llvm::Value *ArraySize = nullptr);
3328
3329 /// Emit a check that \p Base points into an array object, which
3330 /// we can access at index \p Index. \p Accessed should be \c false if we
3331 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3332 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3333 QualType IndexType, bool Accessed);
3334 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3335 llvm::Value *Index, QualType IndexType,
3336 QualType IndexedType, bool Accessed);
3337
3338 /// Returns debug info, with additional annotation if
3339 /// CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo[Ordinal] is enabled for
3340 /// any of the ordinals.
3341 llvm::DILocation *
3342 SanitizerAnnotateDebugInfo(ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
3343 SanitizerHandler Handler);
3344
3345 llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD,
3346 const FieldDecl *CountDecl);
3347
3348 /// Build an expression accessing the "counted_by" field.
3349 llvm::Value *EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD,
3350 const FieldDecl *CountDecl);
3351
3352 // Emit bounds checking for flexible array and pointer members with the
3353 // counted_by attribute.
3354 void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx,
3355 Address Addr, QualType IdxTy,
3356 QualType ArrayTy, bool Accessed,
3357 bool FlexibleArray);
3358
3359 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3360 bool isInc, bool isPre);
3361 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
3362 bool isInc, bool isPre);
3363
3364 /// Converts Location to a DebugLoc, if debug information is enabled.
3365 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3366
3367 /// Get the record field index as represented in debug info.
3368 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3369
3370 //===--------------------------------------------------------------------===//
3371 // Declaration Emission
3372 //===--------------------------------------------------------------------===//
3373
3374 /// EmitDecl - Emit a declaration.
3375 ///
3376 /// This function can be called with a null (unreachable) insert point.
3377 void EmitDecl(const Decl &D, bool EvaluateConditionDecl = false);
3378
3379 /// EmitVarDecl - Emit a local variable declaration.
3380 ///
3381 /// This function can be called with a null (unreachable) insert point.
3382 void EmitVarDecl(const VarDecl &D);
3383
3384 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3385 bool capturedByInit);
3386
3387 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
3388 llvm::Value *Address);
3389
3390 /// Determine whether the given initializer is trivial in the sense
3391 /// that it requires no code to be generated.
3392 bool isTrivialInitializer(const Expr *Init);
3393
3394 /// EmitAutoVarDecl - Emit an auto variable declaration.
3395 ///
3396 /// This function can be called with a null (unreachable) insert point.
3397 void EmitAutoVarDecl(const VarDecl &D);
3398
3399 class AutoVarEmission {
3400 friend class CodeGenFunction;
3401
3402 const VarDecl *Variable;
3403
3404 /// The address of the alloca for languages with explicit address space
3405 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3406 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3407 /// as a global constant.
3408 Address Addr;
3409
3410 llvm::Value *NRVOFlag;
3411
3412 /// True if the variable is a __block variable that is captured by an
3413 /// escaping block.
3414 bool IsEscapingByRef;
3415
3416 /// True if the variable is of aggregate type and has a constant
3417 /// initializer.
3418 bool IsConstantAggregate;
3419
3420 /// Non-null if we should use lifetime annotations.
3421 llvm::Value *SizeForLifetimeMarkers;
3422
3423 /// Address with original alloca instruction. Invalid if the variable was
3424 /// emitted as a global constant.
3425 RawAddress AllocaAddr;
3426
3427 struct Invalid {};
3428 AutoVarEmission(Invalid)
3429 : Variable(nullptr), Addr(Address::invalid()),
3430 AllocaAddr(RawAddress::invalid()) {}
3431
3432 AutoVarEmission(const VarDecl &variable)
3433 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3434 IsEscapingByRef(false), IsConstantAggregate(false),
3435 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3436
3437 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3438
3439 public:
3440 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3441
3442 bool useLifetimeMarkers() const {
3443 return SizeForLifetimeMarkers != nullptr;
3444 }
3445 llvm::Value *getSizeForLifetimeMarkers() const {
3446 assert(useLifetimeMarkers());
3447 return SizeForLifetimeMarkers;
3448 }
3449
3450 /// Returns the raw, allocated address, which is not necessarily
3451 /// the address of the object itself. It is casted to default
3452 /// address space for address space agnostic languages.
3453 Address getAllocatedAddress() const { return Addr; }
3454
3455 /// Returns the address for the original alloca instruction.
3456 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3457
3458 /// Returns the address of the object within this declaration.
3459 /// Note that this does not chase the forwarding pointer for
3460 /// __block decls.
3461 Address getObjectAddress(CodeGenFunction &CGF) const {
3462 if (!IsEscapingByRef)
3463 return Addr;
3464
3465 return CGF.emitBlockByrefAddress(baseAddr: Addr, V: Variable, /*forward*/ followForward: false);
3466 }
3467 };
3468 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
3469 void EmitAutoVarInit(const AutoVarEmission &emission);
3470 void EmitAutoVarCleanups(const AutoVarEmission &emission);
3471 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
3472 QualType::DestructionKind dtorKind);
3473
3474 void MaybeEmitDeferredVarDeclInit(const VarDecl *var);
3475
3476 /// Emits the alloca and debug information for the size expressions for each
3477 /// dimension of an array. It registers the association of its (1-dimensional)
3478 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3479 /// reference this node when creating the DISubrange object to describe the
3480 /// array types.
3481 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D,
3482 bool EmitDebugInfo);
3483
3484 void EmitStaticVarDecl(const VarDecl &D,
3485 llvm::GlobalValue::LinkageTypes Linkage);
3486
3487 class ParamValue {
3488 union {
3489 Address Addr;
3490 llvm::Value *Value;
3491 };
3492
3493 bool IsIndirect;
3494
3495 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3496 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3497
3498 public:
3499 static ParamValue forDirect(llvm::Value *value) {
3500 return ParamValue(value);
3501 }
3502 static ParamValue forIndirect(Address addr) {
3503 assert(!addr.getAlignment().isZero());
3504 return ParamValue(addr);
3505 }
3506
3507 bool isIndirect() const { return IsIndirect; }
3508 llvm::Value *getAnyValue() const {
3509 if (!isIndirect())
3510 return Value;
3511 assert(!Addr.hasOffset() && "unexpected offset");
3512 return Addr.getBasePointer();
3513 }
3514
3515 llvm::Value *getDirectValue() const {
3516 assert(!isIndirect());
3517 return Value;
3518 }
3519
3520 Address getIndirectAddress() const {
3521 assert(isIndirect());
3522 return Addr;
3523 }
3524 };
3525
3526 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3527 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3528
3529 /// protectFromPeepholes - Protect a value that we're intending to
3530 /// store to the side, but which will probably be used later, from
3531 /// aggressive peepholing optimizations that might delete it.
3532 ///
3533 /// Pass the result to unprotectFromPeepholes to declare that
3534 /// protection is no longer required.
3535 ///
3536 /// There's no particular reason why this shouldn't apply to
3537 /// l-values, it's just that no existing peepholes work on pointers.
3538 PeepholeProtection protectFromPeepholes(RValue rvalue);
3539 void unprotectFromPeepholes(PeepholeProtection protection);
3540
3541 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3542 SourceLocation Loc,
3543 SourceLocation AssumptionLoc,
3544 llvm::Value *Alignment,
3545 llvm::Value *OffsetValue,
3546 llvm::Value *TheCheck,
3547 llvm::Instruction *Assumption);
3548
3549 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3550 SourceLocation Loc, SourceLocation AssumptionLoc,
3551 llvm::Value *Alignment,
3552 llvm::Value *OffsetValue = nullptr);
3553
3554 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3555 SourceLocation AssumptionLoc,
3556 llvm::Value *Alignment,
3557 llvm::Value *OffsetValue = nullptr);
3558
3559 //===--------------------------------------------------------------------===//
3560 // Statement Emission
3561 //===--------------------------------------------------------------------===//
3562
3563 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3564 void EmitStopPoint(const Stmt *S);
3565
3566 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3567 /// this function even if there is no current insertion point.
3568 ///
3569 /// This function may clear the current insertion point; callers should use
3570 /// EnsureInsertPoint if they wish to subsequently generate code without first
3571 /// calling EmitBlock, EmitBranch, or EmitStmt.
3572 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3573
3574 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3575 /// necessarily require an insertion point or debug information; typically
3576 /// because the statement amounts to a jump or a container of other
3577 /// statements.
3578 ///
3579 /// \return True if the statement was handled.
3580 bool EmitSimpleStmt(const Stmt *S, ArrayRef<const Attr *> Attrs);
3581
3582 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3583 AggValueSlot AVS = AggValueSlot::ignored());
3584 Address
3585 EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast = false,
3586 AggValueSlot AVS = AggValueSlot::ignored());
3587
3588 /// EmitLabel - Emit the block for the given label. It is legal to call this
3589 /// function even if there is no current insertion point.
3590 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3591
3592 void EmitLabelStmt(const LabelStmt &S);
3593 void EmitAttributedStmt(const AttributedStmt &S);
3594 void EmitGotoStmt(const GotoStmt &S);
3595 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
3596 void EmitIfStmt(const IfStmt &S);
3597
3598 void EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> Attrs = {});
3599 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3600 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3601 void EmitReturnStmt(const ReturnStmt &S);
3602 void EmitDeclStmt(const DeclStmt &S);
3603 void EmitBreakStmt(const BreakStmt &S);
3604 void EmitContinueStmt(const ContinueStmt &S);
3605 void EmitSwitchStmt(const SwitchStmt &S);
3606 void EmitDefaultStmt(const DefaultStmt &S, ArrayRef<const Attr *> Attrs);
3607 void EmitCaseStmt(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3608 void EmitCaseStmtRange(const CaseStmt &S, ArrayRef<const Attr *> Attrs);
3609 void EmitAsmStmt(const AsmStmt &S);
3610
3611 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
3612 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
3613 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
3614 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
3615 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
3616
3617 void EmitCoroutineBody(const CoroutineBodyStmt &S);
3618 void EmitCoreturnStmt(const CoreturnStmt &S);
3619 RValue EmitCoawaitExpr(const CoawaitExpr &E,
3620 AggValueSlot aggSlot = AggValueSlot::ignored(),
3621 bool ignoreResult = false);
3622 LValue EmitCoawaitLValue(const CoawaitExpr *E);
3623 RValue EmitCoyieldExpr(const CoyieldExpr &E,
3624 AggValueSlot aggSlot = AggValueSlot::ignored(),
3625 bool ignoreResult = false);
3626 LValue EmitCoyieldLValue(const CoyieldExpr *E);
3627 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3628
3629 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3630 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3631
3632 void EmitCXXTryStmt(const CXXTryStmt &S);
3633 void EmitSEHTryStmt(const SEHTryStmt &S);
3634 void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
3635 void EnterSEHTryStmt(const SEHTryStmt &S);
3636 void ExitSEHTryStmt(const SEHTryStmt &S);
3637 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3638 llvm::SmallPtrSet<llvm::BasicBlock *, 10> &V);
3639
3640 void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc);
3641 void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
3642 const Stmt *OutlinedStmt);
3643
3644 llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
3645 const SEHExceptStmt &Except);
3646
3647 llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
3648 const SEHFinallyStmt &Finally);
3649
3650 void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
3651 llvm::Value *ParentFP, llvm::Value *EntryEBP);
3652 llvm::Value *EmitSEHExceptionCode();
3653 llvm::Value *EmitSEHExceptionInfo();
3654 llvm::Value *EmitSEHAbnormalTermination();
3655
3656 /// Emit simple code for OpenMP directives in Simd-only mode.
3657 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
3658
3659 /// Scan the outlined statement for captures from the parent function. For
3660 /// each capture, mark the capture as escaped and emit a call to
3661 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3662 void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
3663 bool IsFilter);
3664
3665 /// Recovers the address of a local in a parent function. ParentVar is the
3666 /// address of the variable used in the immediate parent function. It can
3667 /// either be an alloca or a call to llvm.localrecover if there are nested
3668 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3669 /// frame.
3670 Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
3671 Address ParentVar, llvm::Value *ParentFP);
3672
3673 void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
3674 ArrayRef<const Attr *> Attrs = {});
3675
3676 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3677 class OMPCancelStackRAII {
3678 CodeGenFunction &CGF;
3679
3680 public:
3681 OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
3682 bool HasCancel)
3683 : CGF(CGF) {
3684 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3685 }
3686 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3687 };
3688
3689 /// Returns calculated size of the specified type.
3690 llvm::Value *getTypeSize(QualType Ty);
3691 LValue InitCapturedStruct(const CapturedStmt &S);
3692 llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
3693 llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
3694 Address GenerateCapturedStmtArgument(const CapturedStmt &S);
3695 llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
3696 SourceLocation Loc);
3697 void GenerateOpenMPCapturedVars(const CapturedStmt &S,
3698 SmallVectorImpl<llvm::Value *> &CapturedVars);
3699 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3700 SourceLocation Loc);
3701 /// Perform element by element copying of arrays with type \a
3702 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3703 /// generated by \a CopyGen.
3704 ///
3705 /// \param DestAddr Address of the destination array.
3706 /// \param SrcAddr Address of the source array.
3707 /// \param OriginalType Type of destination and source arrays.
3708 /// \param CopyGen Copying procedure that copies value of single array element
3709 /// to another single array element.
3710 void EmitOMPAggregateAssign(
3711 Address DestAddr, Address SrcAddr, QualType OriginalType,
3712 const llvm::function_ref<void(Address, Address)> CopyGen);
3713 /// Emit proper copying of data from one variable to another.
3714 ///
3715 /// \param OriginalType Original type of the copied variables.
3716 /// \param DestAddr Destination address.
3717 /// \param SrcAddr Source address.
3718 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3719 /// type of the base array element).
3720 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3721 /// the base array element).
3722 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3723 /// DestVD.
3724 void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr,
3725 const VarDecl *DestVD, const VarDecl *SrcVD,
3726 const Expr *Copy);
3727 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3728 /// \a X = \a E \a BO \a E.
3729 ///
3730 /// \param X Value to be updated.
3731 /// \param E Update value.
3732 /// \param BO Binary operation for update operation.
3733 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3734 /// expression, false otherwise.
3735 /// \param AO Atomic ordering of the generated atomic instructions.
3736 /// \param CommonGen Code generator for complex expressions that cannot be
3737 /// expressed through atomicrmw instruction.
3738 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3739 /// generated, <false, RValue::get(nullptr)> otherwise.
3740 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3741 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3742 llvm::AtomicOrdering AO, SourceLocation Loc,
3743 const llvm::function_ref<RValue(RValue)> CommonGen);
3744 bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3745 OMPPrivateScope &PrivateScope);
3746 void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3747 OMPPrivateScope &PrivateScope);
3748 void EmitOMPUseDevicePtrClause(
3749 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3750 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3751 CaptureDeviceAddrMap);
3752 void EmitOMPUseDeviceAddrClause(
3753 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3754 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3755 CaptureDeviceAddrMap);
3756 /// Emit code for copyin clause in \a D directive. The next code is
3757 /// generated at the start of outlined functions for directives:
3758 /// \code
3759 /// threadprivate_var1 = master_threadprivate_var1;
3760 /// operator=(threadprivate_var2, master_threadprivate_var2);
3761 /// ...
3762 /// __kmpc_barrier(&loc, global_tid);
3763 /// \endcode
3764 ///
3765 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3766 /// \returns true if at least one copyin variable is found, false otherwise.
3767 bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3768 /// Emit initial code for lastprivate variables. If some variable is
3769 /// not also firstprivate, then the default initialization is used. Otherwise
3770 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3771 /// method.
3772 ///
3773 /// \param D Directive that may have 'lastprivate' directives.
3774 /// \param PrivateScope Private scope for capturing lastprivate variables for
3775 /// proper codegen in internal captured statement.
3776 ///
3777 /// \returns true if there is at least one lastprivate variable, false
3778 /// otherwise.
3779 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3780 OMPPrivateScope &PrivateScope);
3781 /// Emit final copying of lastprivate values to original variables at
3782 /// the end of the worksharing or simd directive.
3783 ///
3784 /// \param D Directive that has at least one 'lastprivate' directives.
3785 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3786 /// it is the last iteration of the loop code in associated directive, or to
3787 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3788 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3789 bool NoFinals,
3790 llvm::Value *IsLastIterCond = nullptr);
3791 /// Emit initial code for linear clauses.
3792 void EmitOMPLinearClause(const OMPLoopDirective &D,
3793 CodeGenFunction::OMPPrivateScope &PrivateScope);
3794 /// Emit final code for linear clauses.
3795 /// \param CondGen Optional conditional code for final part of codegen for
3796 /// linear clause.
3797 void EmitOMPLinearClauseFinal(
3798 const OMPLoopDirective &D,
3799 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3800 /// Emit initial code for reduction variables. Creates reduction copies
3801 /// and initializes them with the values according to OpenMP standard.
3802 ///
3803 /// \param D Directive (possibly) with the 'reduction' clause.
3804 /// \param PrivateScope Private scope for capturing reduction variables for
3805 /// proper codegen in internal captured statement.
3806 ///
3807 void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3808 OMPPrivateScope &PrivateScope,
3809 bool ForInscan = false);
3810 /// Emit final update of reduction values to original variables at
3811 /// the end of the directive.
3812 ///
3813 /// \param D Directive that has at least one 'reduction' directives.
3814 /// \param ReductionKind The kind of reduction to perform.
3815 void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3816 const OpenMPDirectiveKind ReductionKind);
3817 /// Emit initial code for linear variables. Creates private copies
3818 /// and initializes them with the values according to OpenMP standard.
3819 ///
3820 /// \param D Directive (possibly) with the 'linear' clause.
3821 /// \return true if at least one linear variable is found that should be
3822 /// initialized with the value of the original variable, false otherwise.
3823 bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3824
3825 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3826 llvm::Function * /*OutlinedFn*/,
3827 const OMPTaskDataTy & /*Data*/)>
3828 TaskGenTy;
3829 void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3830 const OpenMPDirectiveKind CapturedRegion,
3831 const RegionCodeGenTy &BodyGen,
3832 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3833 struct OMPTargetDataInfo {
3834 Address BasePointersArray = Address::invalid();
3835 Address PointersArray = Address::invalid();
3836 Address SizesArray = Address::invalid();
3837 Address MappersArray = Address::invalid();
3838 unsigned NumberOfTargetItems = 0;
3839 explicit OMPTargetDataInfo() = default;
3840 OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3841 Address SizesArray, Address MappersArray,
3842 unsigned NumberOfTargetItems)
3843 : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3844 SizesArray(SizesArray), MappersArray(MappersArray),
3845 NumberOfTargetItems(NumberOfTargetItems) {}
3846 };
3847 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3848 const RegionCodeGenTy &BodyGen,
3849 OMPTargetDataInfo &InputInfo);
3850 void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data,
3851 CodeGenFunction &CGF, const CapturedStmt *CS,
3852 OMPPrivateScope &Scope);
3853 void EmitOMPMetaDirective(const OMPMetaDirective &S);
3854 void EmitOMPParallelDirective(const OMPParallelDirective &S);
3855 void EmitOMPSimdDirective(const OMPSimdDirective &S);
3856 void EmitOMPTileDirective(const OMPTileDirective &S);
3857 void EmitOMPStripeDirective(const OMPStripeDirective &S);
3858 void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
3859 void EmitOMPReverseDirective(const OMPReverseDirective &S);
3860 void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
3861 void EmitOMPForDirective(const OMPForDirective &S);
3862 void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3863 void EmitOMPScopeDirective(const OMPScopeDirective &S);
3864 void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3865 void EmitOMPSectionDirective(const OMPSectionDirective &S);
3866 void EmitOMPSingleDirective(const OMPSingleDirective &S);
3867 void EmitOMPMasterDirective(const OMPMasterDirective &S);
3868 void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
3869 void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3870 void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3871 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3872 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3873 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
3874 void EmitOMPTaskDirective(const OMPTaskDirective &S);
3875 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3876 void EmitOMPErrorDirective(const OMPErrorDirective &S);
3877 void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3878 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3879 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3880 void EmitOMPFlushDirective(const OMPFlushDirective &S);
3881 void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
3882 void EmitOMPScanDirective(const OMPScanDirective &S);
3883 void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3884 void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3885 void EmitOMPTargetDirective(const OMPTargetDirective &S);
3886 void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3887 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3888 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3889 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3890 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3891 void
3892 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3893 void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3894 void
3895 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3896 void EmitOMPCancelDirective(const OMPCancelDirective &S);
3897 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3898 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3899 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3900 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S);
3901 void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S);
3902 void
3903 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S);
3904 void
3905 EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S);
3906 void EmitOMPParallelMasterTaskLoopDirective(
3907 const OMPParallelMasterTaskLoopDirective &S);
3908 void EmitOMPParallelMaskedTaskLoopDirective(
3909 const OMPParallelMaskedTaskLoopDirective &S);
3910 void EmitOMPParallelMasterTaskLoopSimdDirective(
3911 const OMPParallelMasterTaskLoopSimdDirective &S);
3912 void EmitOMPParallelMaskedTaskLoopSimdDirective(
3913 const OMPParallelMaskedTaskLoopSimdDirective &S);
3914 void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3915 void EmitOMPDistributeParallelForDirective(
3916 const OMPDistributeParallelForDirective &S);
3917 void EmitOMPDistributeParallelForSimdDirective(
3918 const OMPDistributeParallelForSimdDirective &S);
3919 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3920 void EmitOMPTargetParallelForSimdDirective(
3921 const OMPTargetParallelForSimdDirective &S);
3922 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3923 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3924 void
3925 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3926 void EmitOMPTeamsDistributeParallelForSimdDirective(
3927 const OMPTeamsDistributeParallelForSimdDirective &S);
3928 void EmitOMPTeamsDistributeParallelForDirective(
3929 const OMPTeamsDistributeParallelForDirective &S);
3930 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3931 void EmitOMPTargetTeamsDistributeDirective(
3932 const OMPTargetTeamsDistributeDirective &S);
3933 void EmitOMPTargetTeamsDistributeParallelForDirective(
3934 const OMPTargetTeamsDistributeParallelForDirective &S);
3935 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3936 const OMPTargetTeamsDistributeParallelForSimdDirective &S);
3937 void EmitOMPTargetTeamsDistributeSimdDirective(
3938 const OMPTargetTeamsDistributeSimdDirective &S);
3939 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
3940 void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
3941 void EmitOMPTargetParallelGenericLoopDirective(
3942 const OMPTargetParallelGenericLoopDirective &S);
3943 void EmitOMPTargetTeamsGenericLoopDirective(
3944 const OMPTargetTeamsGenericLoopDirective &S);
3945 void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
3946 void EmitOMPInteropDirective(const OMPInteropDirective &S);
3947 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
3948 void EmitOMPAssumeDirective(const OMPAssumeDirective &S);
3949
3950 /// Emit device code for the target directive.
3951 static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3952 StringRef ParentName,
3953 const OMPTargetDirective &S);
3954 static void
3955 EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3956 const OMPTargetParallelDirective &S);
3957 /// Emit device code for the target parallel for directive.
3958 static void EmitOMPTargetParallelForDeviceFunction(
3959 CodeGenModule &CGM, StringRef ParentName,
3960 const OMPTargetParallelForDirective &S);
3961 /// Emit device code for the target parallel for simd directive.
3962 static void EmitOMPTargetParallelForSimdDeviceFunction(
3963 CodeGenModule &CGM, StringRef ParentName,
3964 const OMPTargetParallelForSimdDirective &S);
3965 /// Emit device code for the target teams directive.
3966 static void
3967 EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3968 const OMPTargetTeamsDirective &S);
3969 /// Emit device code for the target teams distribute directive.
3970 static void EmitOMPTargetTeamsDistributeDeviceFunction(
3971 CodeGenModule &CGM, StringRef ParentName,
3972 const OMPTargetTeamsDistributeDirective &S);
3973 /// Emit device code for the target teams distribute simd directive.
3974 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3975 CodeGenModule &CGM, StringRef ParentName,
3976 const OMPTargetTeamsDistributeSimdDirective &S);
3977 /// Emit device code for the target simd directive.
3978 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3979 StringRef ParentName,
3980 const OMPTargetSimdDirective &S);
3981 /// Emit device code for the target teams distribute parallel for simd
3982 /// directive.
3983 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3984 CodeGenModule &CGM, StringRef ParentName,
3985 const OMPTargetTeamsDistributeParallelForSimdDirective &S);
3986
3987 /// Emit device code for the target teams loop directive.
3988 static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
3989 CodeGenModule &CGM, StringRef ParentName,
3990 const OMPTargetTeamsGenericLoopDirective &S);
3991
3992 /// Emit device code for the target parallel loop directive.
3993 static void EmitOMPTargetParallelGenericLoopDeviceFunction(
3994 CodeGenModule &CGM, StringRef ParentName,
3995 const OMPTargetParallelGenericLoopDirective &S);
3996
3997 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3998 CodeGenModule &CGM, StringRef ParentName,
3999 const OMPTargetTeamsDistributeParallelForDirective &S);
4000
4001 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
4002 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
4003 /// future it is meant to be the number of loops expected in the loop nests
4004 /// (usually specified by the "collapse" clause) that are collapsed to a
4005 /// single loop by this function.
4006 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
4007 int Depth);
4008
4009 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
4010 void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
4011
4012 /// Emit inner loop of the worksharing/simd construct.
4013 ///
4014 /// \param S Directive, for which the inner loop must be emitted.
4015 /// \param RequiresCleanup true, if directive has some associated private
4016 /// variables.
4017 /// \param LoopCond Bollean condition for loop continuation.
4018 /// \param IncExpr Increment expression for loop control variable.
4019 /// \param BodyGen Generator for the inner body of the inner loop.
4020 /// \param PostIncGen Genrator for post-increment code (required for ordered
4021 /// loop directvies).
4022 void EmitOMPInnerLoop(
4023 const OMPExecutableDirective &S, bool RequiresCleanup,
4024 const Expr *LoopCond, const Expr *IncExpr,
4025 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
4026 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
4027
4028 JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
4029 /// Emit initial code for loop counters of loop-based directives.
4030 void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
4031 OMPPrivateScope &LoopScope);
4032
4033 /// Helper for the OpenMP loop directives.
4034 void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
4035
4036 /// Emit code for the worksharing loop-based directive.
4037 /// \return true, if this construct has any lastprivate clause, false -
4038 /// otherwise.
4039 bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
4040 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
4041 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4042
4043 /// Emit code for the distribute loop-based directive.
4044 void EmitOMPDistributeLoop(const OMPLoopDirective &S,
4045 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4046
4047 /// Helpers for the OpenMP loop directives.
4048 void EmitOMPSimdInit(const OMPLoopDirective &D);
4049 void EmitOMPSimdFinal(
4050 const OMPLoopDirective &D,
4051 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4052
4053 /// Emits the lvalue for the expression with possibly captured variable.
4054 LValue EmitOMPSharedLValue(const Expr *E);
4055
4056private:
4057 /// Helpers for blocks.
4058 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4059
4060 /// struct with the values to be passed to the OpenMP loop-related functions
4061 struct OMPLoopArguments {
4062 /// loop lower bound
4063 Address LB = Address::invalid();
4064 /// loop upper bound
4065 Address UB = Address::invalid();
4066 /// loop stride
4067 Address ST = Address::invalid();
4068 /// isLastIteration argument for runtime functions
4069 Address IL = Address::invalid();
4070 /// Chunk value generated by sema
4071 llvm::Value *Chunk = nullptr;
4072 /// EnsureUpperBound
4073 Expr *EUB = nullptr;
4074 /// IncrementExpression
4075 Expr *IncExpr = nullptr;
4076 /// Loop initialization
4077 Expr *Init = nullptr;
4078 /// Loop exit condition
4079 Expr *Cond = nullptr;
4080 /// Update of LB after a whole chunk has been executed
4081 Expr *NextLB = nullptr;
4082 /// Update of UB after a whole chunk has been executed
4083 Expr *NextUB = nullptr;
4084 /// Distinguish between the for distribute and sections
4085 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4086 OMPLoopArguments() = default;
4087 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4088 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4089 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4090 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4091 Expr *NextUB = nullptr)
4092 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4093 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4094 NextUB(NextUB) {}
4095 };
4096 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4097 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4098 const OMPLoopArguments &LoopArgs,
4099 const CodeGenLoopTy &CodeGenLoop,
4100 const CodeGenOrderedTy &CodeGenOrdered);
4101 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4102 bool IsMonotonic, const OMPLoopDirective &S,
4103 OMPPrivateScope &LoopScope, bool Ordered,
4104 const OMPLoopArguments &LoopArgs,
4105 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4106 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4107 const OMPLoopDirective &S,
4108 OMPPrivateScope &LoopScope,
4109 const OMPLoopArguments &LoopArgs,
4110 const CodeGenLoopTy &CodeGenLoopContent);
4111 /// Emit code for sections directive.
4112 void EmitSections(const OMPExecutableDirective &S);
4113
4114public:
4115 //===--------------------------------------------------------------------===//
4116 // OpenACC Emission
4117 //===--------------------------------------------------------------------===//
4118 void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S) {
4119 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4120 // simply emitting its structured block, but in the future we will implement
4121 // some sort of IR.
4122 EmitStmt(S: S.getStructuredBlock());
4123 }
4124
4125 void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S) {
4126 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4127 // simply emitting its loop, but in the future we will implement
4128 // some sort of IR.
4129 EmitStmt(S: S.getLoop());
4130 }
4131
4132 void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S) {
4133 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4134 // simply emitting its loop, but in the future we will implement
4135 // some sort of IR.
4136 EmitStmt(S: S.getLoop());
4137 }
4138
4139 void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S) {
4140 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4141 // simply emitting its structured block, but in the future we will implement
4142 // some sort of IR.
4143 EmitStmt(S: S.getStructuredBlock());
4144 }
4145
4146 void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S) {
4147 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4148 // but in the future we will implement some sort of IR.
4149 }
4150
4151 void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S) {
4152 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4153 // but in the future we will implement some sort of IR.
4154 }
4155
4156 void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S) {
4157 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4158 // simply emitting its structured block, but in the future we will implement
4159 // some sort of IR.
4160 EmitStmt(S: S.getStructuredBlock());
4161 }
4162
4163 void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S) {
4164 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4165 // but in the future we will implement some sort of IR.
4166 }
4167
4168 void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S) {
4169 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4170 // but in the future we will implement some sort of IR.
4171 }
4172
4173 void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S) {
4174 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4175 // but in the future we will implement some sort of IR.
4176 }
4177
4178 void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S) {
4179 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4180 // but in the future we will implement some sort of IR.
4181 }
4182
4183 void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S) {
4184 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4185 // but in the future we will implement some sort of IR.
4186 }
4187
4188 void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S) {
4189 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4190 // simply emitting its associated stmt, but in the future we will implement
4191 // some sort of IR.
4192 EmitStmt(S: S.getAssociatedStmt());
4193 }
4194 void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S) {
4195 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4196 // but in the future we will implement some sort of IR.
4197 }
4198
4199 //===--------------------------------------------------------------------===//
4200 // LValue Expression Emission
4201 //===--------------------------------------------------------------------===//
4202
4203 /// Create a check that a scalar RValue is non-null.
4204 llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T);
4205
4206 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4207 RValue GetUndefRValue(QualType Ty);
4208
4209 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4210 /// and issue an ErrorUnsupported style diagnostic (using the
4211 /// provided Name).
4212 RValue EmitUnsupportedRValue(const Expr *E, const char *Name);
4213
4214 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4215 /// an ErrorUnsupported style diagnostic (using the provided Name).
4216 LValue EmitUnsupportedLValue(const Expr *E, const char *Name);
4217
4218 /// EmitLValue - Emit code to compute a designator that specifies the location
4219 /// of the expression.
4220 ///
4221 /// This can return one of two things: a simple address or a bitfield
4222 /// reference. In either case, the LLVM Value* in the LValue structure is
4223 /// guaranteed to be an LLVM pointer type.
4224 ///
4225 /// If this returns a bitfield reference, nothing about the pointee type of
4226 /// the LLVM value is known: For example, it may not be a pointer to an
4227 /// integer.
4228 ///
4229 /// If this returns a normal address, and if the lvalue's C type is fixed
4230 /// size, this method guarantees that the returned pointer type will point to
4231 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4232 /// variable length type, this is not possible.
4233 ///
4234 LValue EmitLValue(const Expr *E,
4235 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4236
4237private:
4238 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4239
4240public:
4241 /// Same as EmitLValue but additionally we generate checking code to
4242 /// guard against undefined behavior. This is only suitable when we know
4243 /// that the address will be used to access the object.
4244 LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
4245
4246 RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc);
4247
4248 void EmitAtomicInit(Expr *E, LValue lvalue);
4249
4250 bool LValueIsSuitableForInlineAtomic(LValue Src);
4251
4252 RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
4253 AggValueSlot Slot = AggValueSlot::ignored());
4254
4255 RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
4256 llvm::AtomicOrdering AO, bool IsVolatile = false,
4257 AggValueSlot slot = AggValueSlot::ignored());
4258
4259 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4260
4261 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4262 bool IsVolatile, bool isInit);
4263
4264 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4265 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
4266 llvm::AtomicOrdering Success =
4267 llvm::AtomicOrdering::SequentiallyConsistent,
4268 llvm::AtomicOrdering Failure =
4269 llvm::AtomicOrdering::SequentiallyConsistent,
4270 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4271
4272 /// Emit an atomicrmw instruction, and applying relevant metadata when
4273 /// applicable.
4274 llvm::AtomicRMWInst *emitAtomicRMWInst(
4275 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4276 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4277 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4278 const AtomicExpr *AE = nullptr);
4279
4280 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4281 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4282 bool IsVolatile);
4283
4284 /// EmitToMemory - Change a scalar value from its value
4285 /// representation to its in-memory representation.
4286 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4287
4288 /// EmitFromMemory - Change a scalar value from its memory
4289 /// representation to its value representation.
4290 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4291
4292 /// Check if the scalar \p Value is within the valid range for the given
4293 /// type \p Ty.
4294 ///
4295 /// Returns true if a check is needed (even if the range is unknown).
4296 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4297 SourceLocation Loc);
4298
4299 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4300 /// care to appropriately convert from the memory representation to
4301 /// the LLVM value representation.
4302 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4303 SourceLocation Loc,
4304 AlignmentSource Source = AlignmentSource::Type,
4305 bool isNontemporal = false) {
4306 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, BaseInfo: LValueBaseInfo(Source),
4307 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: Ty), isNontemporal);
4308 }
4309
4310 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4311 SourceLocation Loc, LValueBaseInfo BaseInfo,
4312 TBAAAccessInfo TBAAInfo,
4313 bool isNontemporal = false);
4314
4315 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4316 /// care to appropriately convert from the memory representation to
4317 /// the LLVM value representation. The l-value must be a simple
4318 /// l-value.
4319 llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
4320
4321 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4322 /// care to appropriately convert from the memory representation to
4323 /// the LLVM value representation.
4324 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4325 QualType Ty,
4326 AlignmentSource Source = AlignmentSource::Type,
4327 bool isInit = false, bool isNontemporal = false) {
4328 EmitStoreOfScalar(Value, Addr, Volatile, Ty, BaseInfo: LValueBaseInfo(Source),
4329 TBAAInfo: CGM.getTBAAAccessInfo(AccessType: Ty), isInit, isNontemporal);
4330 }
4331
4332 void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile,
4333 QualType Ty, LValueBaseInfo BaseInfo,
4334 TBAAAccessInfo TBAAInfo, bool isInit = false,
4335 bool isNontemporal = false);
4336
4337 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4338 /// care to appropriately convert from the memory representation to
4339 /// the LLVM value representation. The l-value must be a simple
4340 /// l-value. The isInit flag indicates whether this is an initialization.
4341 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4342 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
4343 bool isInit = false);
4344
4345 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4346 /// this method emits the address of the lvalue, then loads the result as an
4347 /// rvalue, returning the rvalue.
4348 RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
4349 RValue EmitLoadOfExtVectorElementLValue(LValue V);
4350 RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
4351 RValue EmitLoadOfGlobalRegLValue(LValue LV);
4352
4353 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4354 RValue EmitLoadOfAnyValue(LValue V,
4355 AggValueSlot Slot = AggValueSlot::ignored(),
4356 SourceLocation Loc = {});
4357
4358 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4359 /// lvalue, where both are guaranteed to the have the same type, and that type
4360 /// is 'Ty'.
4361 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4362 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
4363 void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
4364
4365 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4366 /// as EmitStoreThroughLValue.
4367 ///
4368 /// \param Result [out] - If non-null, this will be set to a Value* for the
4369 /// bit-field contents after the store, appropriate for use as the result of
4370 /// an assignment to the bit-field.
4371 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
4372 llvm::Value **Result = nullptr);
4373
4374 /// Emit an l-value for an assignment (simple or compound) of complex type.
4375 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
4376 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
4377 LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
4378 llvm::Value *&Result);
4379
4380 // Note: only available for agg return types
4381 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
4382 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
4383 // Note: only available for agg return types
4384 LValue EmitCallExprLValue(const CallExpr *E,
4385 llvm::CallBase **CallOrInvoke = nullptr);
4386 // Note: only available for agg return types
4387 LValue EmitVAArgExprLValue(const VAArgExpr *E);
4388 LValue EmitDeclRefLValue(const DeclRefExpr *E);
4389 LValue EmitStringLiteralLValue(const StringLiteral *E);
4390 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
4391 LValue EmitPredefinedLValue(const PredefinedExpr *E);
4392 LValue EmitUnaryOpLValue(const UnaryOperator *E);
4393 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4394 bool Accessed = false);
4395 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4396 LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
4397 LValue EmitArraySectionExpr(const ArraySectionExpr *E,
4398 bool IsLowerBound = true);
4399 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
4400 LValue EmitMemberExpr(const MemberExpr *E);
4401 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
4402 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
4403 LValue EmitInitListLValue(const InitListExpr *E);
4404 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
4405 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
4406 LValue EmitCastLValue(const CastExpr *E);
4407 LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
4408 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
4409 LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E);
4410
4411 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4412 QualType Ty);
4413 LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args,
4414 QualType Ty);
4415
4416 Address EmitExtVectorElementLValue(LValue V);
4417
4418 RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
4419
4420 Address EmitArrayToPointerDecay(const Expr *Array,
4421 LValueBaseInfo *BaseInfo = nullptr,
4422 TBAAAccessInfo *TBAAInfo = nullptr);
4423
4424 class ConstantEmission {
4425 llvm::PointerIntPair<llvm::Constant *, 1, bool> ValueAndIsReference;
4426 ConstantEmission(llvm::Constant *C, bool isReference)
4427 : ValueAndIsReference(C, isReference) {}
4428
4429 public:
4430 ConstantEmission() {}
4431 static ConstantEmission forReference(llvm::Constant *C) {
4432 return ConstantEmission(C, true);
4433 }
4434 static ConstantEmission forValue(llvm::Constant *C) {
4435 return ConstantEmission(C, false);
4436 }
4437
4438 explicit operator bool() const {
4439 return ValueAndIsReference.getOpaqueValue() != nullptr;
4440 }
4441
4442 bool isReference() const { return ValueAndIsReference.getInt(); }
4443 LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const {
4444 assert(isReference());
4445 return CGF.MakeNaturalAlignAddrLValue(V: ValueAndIsReference.getPointer(),
4446 T: RefExpr->getType());
4447 }
4448
4449 llvm::Constant *getValue() const {
4450 assert(!isReference());
4451 return ValueAndIsReference.getPointer();
4452 }
4453 };
4454
4455 ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr);
4456 ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
4457 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4458
4459 RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
4460 AggValueSlot slot = AggValueSlot::ignored());
4461 LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
4462
4463 void FlattenAccessAndType(
4464 Address Addr, QualType AddrTy,
4465 SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
4466 SmallVectorImpl<QualType> &FlatTypes);
4467
4468 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
4469 const ObjCIvarDecl *Ivar);
4470 llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
4471 const ObjCIvarDecl *Ivar);
4472 LValue EmitLValueForField(LValue Base, const FieldDecl *Field,
4473 bool IsInBounds = true);
4474 LValue EmitLValueForLambdaField(const FieldDecl *Field);
4475 LValue EmitLValueForLambdaField(const FieldDecl *Field,
4476 llvm::Value *ThisValue);
4477
4478 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4479 /// if the Field is a reference, this will return the address of the reference
4480 /// and not the address of the value stored in the reference.
4481 LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field);
4482
4483 LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base,
4484 const ObjCIvarDecl *Ivar, unsigned CVRQualifiers);
4485
4486 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
4487 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
4488 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
4489 LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
4490
4491 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
4492 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
4493 LValue EmitStmtExprLValue(const StmtExpr *E);
4494 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
4495 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
4496 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
4497
4498 //===--------------------------------------------------------------------===//
4499 // Scalar Expression Emission
4500 //===--------------------------------------------------------------------===//
4501
4502 /// EmitCall - Generate a call of the given function, expecting the given
4503 /// result type, and using the given argument list which specifies both the
4504 /// LLVM arguments and the types they were derived from.
4505 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4506 ReturnValueSlot ReturnValue, const CallArgList &Args,
4507 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4508 SourceLocation Loc,
4509 bool IsVirtualFunctionPointerThunk = false);
4510 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4511 ReturnValueSlot ReturnValue, const CallArgList &Args,
4512 llvm::CallBase **CallOrInvoke = nullptr,
4513 bool IsMustTail = false) {
4514 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4515 IsMustTail, Loc: SourceLocation());
4516 }
4517 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4518 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4519 llvm::CallBase **CallOrInvoke = nullptr,
4520 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4521
4522 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4523 // writes the pointer to `CallOrInvoke` if it's not null.
4524 RValue EmitCallExpr(const CallExpr *E,
4525 ReturnValueSlot ReturnValue = ReturnValueSlot(),
4526 llvm::CallBase **CallOrInvoke = nullptr);
4527 RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4528 llvm::CallBase **CallOrInvoke = nullptr);
4529 CGCallee EmitCallee(const Expr *E);
4530
4531 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4532 void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl);
4533
4534 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4535 const Twine &name = "");
4536 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4537 ArrayRef<llvm::Value *> args,
4538 const Twine &name = "");
4539 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4540 const Twine &name = "");
4541 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4542 ArrayRef<Address> args,
4543 const Twine &name = "");
4544 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4545 ArrayRef<llvm::Value *> args,
4546 const Twine &name = "");
4547
4548 SmallVector<llvm::OperandBundleDef, 1>
4549 getBundlesForFunclet(llvm::Value *Callee);
4550
4551 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4552 ArrayRef<llvm::Value *> Args,
4553 const Twine &Name = "");
4554 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4555 ArrayRef<llvm::Value *> args,
4556 const Twine &name = "");
4557 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4558 const Twine &name = "");
4559 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4560 ArrayRef<llvm::Value *> args);
4561
4562 CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
4563 NestedNameSpecifier *Qual, llvm::Type *Ty);
4564
4565 CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
4566 CXXDtorType Type,
4567 const CXXRecordDecl *RD);
4568
4569 bool isPointerKnownNonNull(const Expr *E);
4570 /// Check whether the underlying base pointer is a constant null.
4571 bool isUnderlyingBasePointerConstantNull(const Expr *E);
4572
4573 /// Create the discriminator from the storage address and the entity hash.
4574 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4575 llvm::Value *Discriminator);
4576 CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
4577 llvm::Value *StorageAddress,
4578 GlobalDecl SchemaDecl,
4579 QualType SchemaType);
4580
4581 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4582 llvm::Value *Pointer);
4583
4584 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4585 llvm::Value *Pointer);
4586
4587 llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
4588 const CGPointerAuthInfo &CurAuthInfo,
4589 const CGPointerAuthInfo &NewAuthInfo,
4590 bool IsKnownNonNull);
4591 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4592 const CGPointerAuthInfo &CurInfo,
4593 const CGPointerAuthInfo &NewInfo);
4594
4595 void EmitPointerAuthOperandBundle(
4596 const CGPointerAuthInfo &Info,
4597 SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
4598
4599 CGPointerAuthInfo EmitPointerAuthInfo(PointerAuthQualifier Qualifier,
4600 Address StorageAddress);
4601 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4602 llvm::Value *Pointer, QualType ValueType,
4603 Address StorageAddress,
4604 bool IsKnownNonNull);
4605 llvm::Value *EmitPointerAuthQualify(PointerAuthQualifier Qualifier,
4606 const Expr *PointerExpr,
4607 Address StorageAddress);
4608 llvm::Value *EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier,
4609 llvm::Value *Pointer,
4610 QualType PointerType,
4611 Address StorageAddress,
4612 bool IsKnownNonNull);
4613 void EmitPointerAuthCopy(PointerAuthQualifier Qualifier, QualType Type,
4614 Address DestField, Address SrcField);
4615
4616 std::pair<llvm::Value *, CGPointerAuthInfo>
4617 EmitOrigPointerRValue(const Expr *E);
4618
4619 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4620 QualType SourceType, QualType DestType);
4621 Address authPointerToPointerCast(Address Ptr, QualType SourceType,
4622 QualType DestType);
4623
4624 Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
4625
4626 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4627 return getAsNaturalAddressOf(Addr, PointeeTy: PointeeType).getBasePointer();
4628 }
4629
4630 // Return the copy constructor name with the prefix "__copy_constructor_"
4631 // removed.
4632 static std::string getNonTrivialCopyConstructorStr(QualType QT,
4633 CharUnits Alignment,
4634 bool IsVolatile,
4635 ASTContext &Ctx);
4636
4637 // Return the destructor name with the prefix "__destructor_" removed.
4638 static std::string getNonTrivialDestructorStr(QualType QT,
4639 CharUnits Alignment,
4640 bool IsVolatile,
4641 ASTContext &Ctx);
4642
4643 // These functions emit calls to the special functions of non-trivial C
4644 // structs.
4645 void defaultInitNonTrivialCStructVar(LValue Dst);
4646 void callCStructDefaultConstructor(LValue Dst);
4647 void callCStructDestructor(LValue Dst);
4648 void callCStructCopyConstructor(LValue Dst, LValue Src);
4649 void callCStructMoveConstructor(LValue Dst, LValue Src);
4650 void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
4651 void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
4652
4653 RValue EmitCXXMemberOrOperatorCall(
4654 const CXXMethodDecl *Method, const CGCallee &Callee,
4655 ReturnValueSlot ReturnValue, llvm::Value *This,
4656 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4657 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4658 RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee,
4659 llvm::Value *This, QualType ThisTy,
4660 llvm::Value *ImplicitParam,
4661 QualType ImplicitParamTy, const CallExpr *E,
4662 llvm::CallBase **CallOrInvoke = nullptr);
4663 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
4664 ReturnValueSlot ReturnValue,
4665 llvm::CallBase **CallOrInvoke = nullptr);
4666 RValue EmitCXXMemberOrOperatorMemberCallExpr(
4667 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4668 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
4669 const Expr *Base, llvm::CallBase **CallOrInvoke);
4670 // Compute the object pointer.
4671 Address EmitCXXMemberDataPointerAddress(
4672 const Expr *E, Address base, llvm::Value *memberPtr,
4673 const MemberPointerType *memberPtrType, bool IsInBounds,
4674 LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr);
4675 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
4676 ReturnValueSlot ReturnValue,
4677 llvm::CallBase **CallOrInvoke);
4678
4679 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
4680 const CXXMethodDecl *MD,
4681 ReturnValueSlot ReturnValue,
4682 llvm::CallBase **CallOrInvoke);
4683 RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
4684
4685 RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
4686 ReturnValueSlot ReturnValue,
4687 llvm::CallBase **CallOrInvoke);
4688
4689 RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
4690 RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
4691
4692 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4693 const CallExpr *E, ReturnValueSlot ReturnValue);
4694
4695 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4696
4697 /// Emit IR for __builtin_os_log_format.
4698 RValue emitBuiltinOSLogFormat(const CallExpr &E);
4699
4700 /// Emit IR for __builtin_is_aligned.
4701 RValue EmitBuiltinIsAligned(const CallExpr *E);
4702 /// Emit IR for __builtin_align_up/__builtin_align_down.
4703 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4704
4705 llvm::Function *generateBuiltinOSLogHelperFunction(
4706 const analyze_os_log::OSLogBufferLayout &Layout,
4707 CharUnits BufferAlignment);
4708
4709 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue,
4710 llvm::CallBase **CallOrInvoke);
4711
4712 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4713 /// is unhandled by the current target.
4714 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4715 ReturnValueSlot ReturnValue);
4716
4717 llvm::Value *
4718 EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4719 const llvm::CmpInst::Predicate Pred,
4720 const llvm::Twine &Name = "");
4721 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4722 ReturnValueSlot ReturnValue,
4723 llvm::Triple::ArchType Arch);
4724 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4725 ReturnValueSlot ReturnValue,
4726 llvm::Triple::ArchType Arch);
4727 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4728 ReturnValueSlot ReturnValue,
4729 llvm::Triple::ArchType Arch);
4730 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4731 QualType RTy);
4732 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4733 QualType RTy);
4734
4735 llvm::Value *
4736 EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic,
4737 unsigned AltLLVMIntrinsic, const char *NameHint,
4738 unsigned Modifier, const CallExpr *E,
4739 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0,
4740 Address PtrOp1, llvm::Triple::ArchType Arch);
4741
4742 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4743 unsigned Modifier, llvm::Type *ArgTy,
4744 const CallExpr *E);
4745 llvm::Value *EmitNeonCall(llvm::Function *F,
4746 SmallVectorImpl<llvm::Value *> &O, const char *name,
4747 unsigned shift = 0, bool rightshift = false);
4748 llvm::Value *EmitFP8NeonCall(unsigned IID, ArrayRef<llvm::Type *> Tys,
4749 SmallVectorImpl<llvm::Value *> &O,
4750 const CallExpr *E, const char *name);
4751 llvm::Value *EmitFP8NeonCvtCall(unsigned IID, llvm::Type *Ty0,
4752 llvm::Type *Ty1, bool Extract,
4753 SmallVectorImpl<llvm::Value *> &Ops,
4754 const CallExpr *E, const char *name);
4755 llvm::Value *EmitFP8NeonFDOTCall(unsigned IID, bool ExtendLaneArg,
4756 llvm::Type *RetTy,
4757 SmallVectorImpl<llvm::Value *> &Ops,
4758 const CallExpr *E, const char *name);
4759 llvm::Value *EmitFP8NeonFMLACall(unsigned IID, bool ExtendLaneArg,
4760 llvm::Type *RetTy,
4761 SmallVectorImpl<llvm::Value *> &Ops,
4762 const CallExpr *E, const char *name);
4763 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4764 const llvm::ElementCount &Count);
4765 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4766 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4767 bool negateForRightShift);
4768 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4769 llvm::Type *Ty, bool usgn, const char *name);
4770 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4771 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4772 /// access builtin. Only required if it can't be inferred from the base
4773 /// pointer operand.
4774 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4775
4776 SmallVector<llvm::Type *, 2>
4777 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4778 ArrayRef<llvm::Value *> Ops);
4779 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4780 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4781 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4782 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4783 ArrayRef<llvm::Value *> Ops);
4784 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4785 llvm::Type *ReturnType,
4786 ArrayRef<llvm::Value *> Ops);
4787 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4788 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4789 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4790 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4791 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4792 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4793 unsigned BuiltinID);
4794 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4795 llvm::ArrayRef<llvm::Value *> Ops,
4796 unsigned BuiltinID);
4797 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4798 llvm::ScalableVectorType *VTy);
4799 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4800 llvm::StructType *Ty);
4801 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4802 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4803 unsigned IntID);
4804 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4805 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4806 unsigned IntID);
4807 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4808 SmallVectorImpl<llvm::Value *> &Ops,
4809 unsigned BuiltinID, bool IsZExtReturn);
4810 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4811 SmallVectorImpl<llvm::Value *> &Ops,
4812 unsigned BuiltinID);
4813 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4814 SmallVectorImpl<llvm::Value *> &Ops,
4815 unsigned BuiltinID);
4816 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4817 SmallVectorImpl<llvm::Value *> &Ops,
4818 unsigned IntID);
4819 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4820 SmallVectorImpl<llvm::Value *> &Ops,
4821 unsigned IntID);
4822 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4823 SmallVectorImpl<llvm::Value *> &Ops,
4824 unsigned IntID);
4825 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4826
4827 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4828 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4829 unsigned IntID);
4830 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4831 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4832 unsigned IntID);
4833 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4834 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4835 unsigned IntID);
4836 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4837 llvm::SmallVectorImpl<llvm::Value *> &Ops,
4838 unsigned IntID);
4839
4840 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4841 SmallVectorImpl<llvm::Value *> &Ops,
4842 SVETypeFlags TypeFlags);
4843
4844 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4845
4846 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4847 llvm::Triple::ArchType Arch);
4848 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4849
4850 llvm::Value *BuildVector(ArrayRef<llvm::Value *> Ops);
4851 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4852 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4853 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4854 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4855 ReturnValueSlot ReturnValue);
4856
4857 // Returns a builtin function that the SPIR-V backend will expand into a spec
4858 // constant.
4859 llvm::Function *
4860 getSpecConstantFunction(const clang::QualType &SpecConstantType);
4861
4862 llvm::Value *EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4863 llvm::Value *EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4864 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4865 const CallExpr *E);
4866 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4867 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4868 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4869 const CallExpr *E);
4870 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4871 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4872 ReturnValueSlot ReturnValue);
4873
4874 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4875 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4876 llvm::Value *EmitRISCVCpuInit();
4877 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4878 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4879
4880 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4881 const CallExpr *E);
4882 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4883 llvm::AtomicOrdering &AO,
4884 llvm::SyncScope::ID &SSID);
4885
4886 enum class MSVCIntrin;
4887 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4888
4889 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4890
4891 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
4892 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
4893 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4894 llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
4895 llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
4896 llvm::Value *
4897 EmitObjCCollectionLiteral(const Expr *E,
4898 const ObjCMethodDecl *MethodWithObjects);
4899 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
4900 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
4901 ReturnValueSlot Return = ReturnValueSlot());
4902
4903 /// Retrieves the default cleanup kind for an ARC cleanup.
4904 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4905 CleanupKind getARCCleanupKind() {
4906 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions ? NormalAndEHCleanup
4907 : NormalCleanup;
4908 }
4909
4910 // ARC primitives.
4911 void EmitARCInitWeak(Address addr, llvm::Value *value);
4912 void EmitARCDestroyWeak(Address addr);
4913 llvm::Value *EmitARCLoadWeak(Address addr);
4914 llvm::Value *EmitARCLoadWeakRetained(Address addr);
4915 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4916 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4917 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4918 void EmitARCCopyWeak(Address dst, Address src);
4919 void EmitARCMoveWeak(Address dst, Address src);
4920 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4921 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4922 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4923 bool resultIgnored);
4924 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4925 bool resultIgnored);
4926 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4927 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4928 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4929 void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
4930 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4931 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4932 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4933 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4934 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4935 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4936
4937 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4938 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4939 llvm::Type *returnType);
4940 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4941
4942 std::pair<LValue, llvm::Value *>
4943 EmitARCStoreAutoreleasing(const BinaryOperator *e);
4944 std::pair<LValue, llvm::Value *> EmitARCStoreStrong(const BinaryOperator *e,
4945 bool ignored);
4946 std::pair<LValue, llvm::Value *>
4947 EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
4948
4949 llvm::Value *EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType);
4950 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4951 llvm::Type *returnType);
4952 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4953
4954 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4955 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4956 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4957
4958 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4959 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4960 bool allowUnsafeClaim);
4961 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4962 llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
4963 llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
4964
4965 void EmitARCIntrinsicUse(ArrayRef<llvm::Value *> values);
4966
4967 void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
4968
4969 static Destroyer destroyARCStrongImprecise;
4970 static Destroyer destroyARCStrongPrecise;
4971 static Destroyer destroyARCWeak;
4972 static Destroyer emitARCIntrinsicUse;
4973 static Destroyer destroyNonTrivialCStruct;
4974
4975 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4976 llvm::Value *EmitObjCAutoreleasePoolPush();
4977 llvm::Value *EmitObjCMRRAutoreleasePoolPush();
4978 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4979 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4980
4981 /// Emits a reference binding to the passed in expression.
4982 RValue EmitReferenceBindingToExpr(const Expr *E);
4983
4984 //===--------------------------------------------------------------------===//
4985 // Expression Emission
4986 //===--------------------------------------------------------------------===//
4987
4988 // Expressions are broken into three classes: scalar, complex, aggregate.
4989
4990 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4991 /// scalar type, returning the result.
4992 llvm::Value *EmitScalarExpr(const Expr *E, bool IgnoreResultAssign = false);
4993
4994 /// Emit a conversion from the specified type to the specified destination
4995 /// type, both of which are LLVM scalar types.
4996 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4997 QualType DstTy, SourceLocation Loc);
4998
4999 /// Emit a conversion from the specified complex type to the specified
5000 /// destination type, where the destination type is an LLVM scalar type.
5001 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
5002 QualType DstTy,
5003 SourceLocation Loc);
5004
5005 /// EmitAggExpr - Emit the computation of the specified expression
5006 /// of aggregate type. The result is computed into the given slot,
5007 /// which may be null to indicate that the value is not needed.
5008 void EmitAggExpr(const Expr *E, AggValueSlot AS);
5009
5010 /// EmitAggExprToLValue - Emit the computation of the specified expression of
5011 /// aggregate type into a temporary LValue.
5012 LValue EmitAggExprToLValue(const Expr *E);
5013
5014 enum ExprValueKind { EVK_RValue, EVK_NonRValue };
5015
5016 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
5017 /// destination address.
5018 void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
5019 ExprValueKind SrcKind);
5020
5021 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
5022 /// to at most \arg DstSize bytes.
5023 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
5024 bool DstIsVolatile);
5025
5026 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
5027 /// make sure it survives garbage collection until this point.
5028 void EmitExtendGCLifetime(llvm::Value *object);
5029
5030 /// EmitComplexExpr - Emit the computation of the specified expression of
5031 /// complex type, returning the result.
5032 ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
5033 bool IgnoreImag = false);
5034
5035 /// EmitComplexExprIntoLValue - Emit the given expression of complex
5036 /// type and place its result into the specified l-value.
5037 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
5038
5039 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
5040 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
5041
5042 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
5043 ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
5044
5045 ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
5046 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
5047 ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
5048 ComplexPairTy EmitUnPromotedValue(ComplexPairTy result,
5049 QualType PromotionType);
5050
5051 Address emitAddrOfRealComponent(Address complex, QualType complexType);
5052 Address emitAddrOfImagComponent(Address complex, QualType complexType);
5053
5054 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
5055 /// global variable that has already been created for it. If the initializer
5056 /// has a different type than GV does, this may free GV and return a different
5057 /// one. Otherwise it just returns GV.
5058 llvm::GlobalVariable *AddInitializerToStaticVarDecl(const VarDecl &D,
5059 llvm::GlobalVariable *GV);
5060
5061 // Emit an @llvm.invariant.start call for the given memory region.
5062 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
5063
5064 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
5065 /// variable with global storage.
5066 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
5067 bool PerformInit);
5068
5069 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
5070 llvm::Constant *Addr);
5071
5072 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
5073 llvm::FunctionCallee Dtor,
5074 llvm::Constant *Addr,
5075 llvm::FunctionCallee &AtExit);
5076
5077 /// Call atexit() with a function that passes the given argument to
5078 /// the given function.
5079 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
5080 llvm::Constant *addr);
5081
5082 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
5083 /// support an 'atexit()' function.
5084 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
5085 llvm::Constant *addr);
5086
5087 /// Call atexit() with function dtorStub.
5088 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
5089
5090 /// Call unatexit() with function dtorStub.
5091 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
5092
5093 /// Emit code in this function to perform a guarded variable
5094 /// initialization. Guarded initializations are used when it's not
5095 /// possible to prove that an initialization will be done exactly
5096 /// once, e.g. with a static local variable or a static data member
5097 /// of a class template.
5098 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
5099 bool PerformInit);
5100
5101 enum class GuardKind { VariableGuard, TlsGuard };
5102
5103 /// Emit a branch to select whether or not to perform guarded initialization.
5104 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5105 llvm::BasicBlock *InitBlock,
5106 llvm::BasicBlock *NoInitBlock, GuardKind Kind,
5107 const VarDecl *D);
5108
5109 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5110 /// variables.
5111 void
5112 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5113 ArrayRef<llvm::Function *> CXXThreadLocals,
5114 ConstantAddress Guard = ConstantAddress::invalid());
5115
5116 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5117 /// variables.
5118 void GenerateCXXGlobalCleanUpFunc(
5119 llvm::Function *Fn,
5120 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5121 llvm::Constant *>>
5122 DtorsOrStermFinalizers);
5123
5124 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
5125 llvm::GlobalVariable *Addr,
5126 bool PerformInit);
5127
5128 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
5129
5130 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5131
5132 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5133
5134 RValue EmitAtomicExpr(AtomicExpr *E);
5135
5136 void EmitFakeUse(Address Addr);
5137
5138 //===--------------------------------------------------------------------===//
5139 // Annotations Emission
5140 //===--------------------------------------------------------------------===//
5141
5142 /// Emit an annotation call (intrinsic).
5143 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5144 llvm::Value *AnnotatedVal,
5145 StringRef AnnotationStr,
5146 SourceLocation Location,
5147 const AnnotateAttr *Attr);
5148
5149 /// Emit local annotations for the local variable V, declared by D.
5150 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5151
5152 /// Emit field annotations for the given field & value. Returns the
5153 /// annotation result.
5154 Address EmitFieldAnnotations(const FieldDecl *D, Address V);
5155
5156 //===--------------------------------------------------------------------===//
5157 // Internal Helpers
5158 //===--------------------------------------------------------------------===//
5159
5160 /// ContainsLabel - Return true if the statement contains a label in it. If
5161 /// this statement is not executed normally, it not containing a label means
5162 /// that we can just remove the code.
5163 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5164
5165 /// containsBreak - Return true if the statement contains a break out of it.
5166 /// If the statement (recursively) contains a switch or loop with a break
5167 /// inside of it, this is fine.
5168 static bool containsBreak(const Stmt *S);
5169
5170 /// Determine if the given statement might introduce a declaration into the
5171 /// current scope, by being a (possibly-labelled) DeclStmt.
5172 static bool mightAddDeclToScope(const Stmt *S);
5173
5174 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5175 /// to a constant, or if it does but contains a label, return false. If it
5176 /// constant folds return true and set the boolean result in Result.
5177 bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
5178 bool AllowLabels = false);
5179
5180 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5181 /// to a constant, or if it does but contains a label, return false. If it
5182 /// constant folds return true and set the folded value.
5183 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5184 bool AllowLabels = false);
5185
5186 /// Ignore parentheses and logical-NOT to track conditions consistently.
5187 static const Expr *stripCond(const Expr *C);
5188
5189 /// isInstrumentedCondition - Determine whether the given condition is an
5190 /// instrumentable condition (i.e. no "&&" or "||").
5191 static bool isInstrumentedCondition(const Expr *C);
5192
5193 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5194 /// increments a profile counter based on the semantics of the given logical
5195 /// operator opcode. This is used to instrument branch condition coverage
5196 /// for logical operators.
5197 void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp,
5198 llvm::BasicBlock *TrueBlock,
5199 llvm::BasicBlock *FalseBlock,
5200 uint64_t TrueCount = 0,
5201 Stmt::Likelihood LH = Stmt::LH_None,
5202 const Expr *CntrIdx = nullptr);
5203
5204 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5205 /// if statement) to the specified blocks. Based on the condition, this might
5206 /// try to simplify the codegen of the conditional based on the branch.
5207 /// TrueCount should be the number of times we expect the condition to
5208 /// evaluate to true based on PGO data.
5209 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5210 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5211 Stmt::Likelihood LH = Stmt::LH_None,
5212 const Expr *ConditionalOp = nullptr,
5213 const VarDecl *ConditionalDecl = nullptr);
5214
5215 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5216 /// nonnull, if \p LHS is marked _Nonnull.
5217 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5218
5219 /// An enumeration which makes it easier to specify whether or not an
5220 /// operation is a subtraction.
5221 enum { NotSubtraction = false, IsSubtraction = true };
5222
5223 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5224 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5225 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5226 /// \p IsSubtraction indicates whether the expression used to form the GEP
5227 /// is a subtraction.
5228 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5229 ArrayRef<llvm::Value *> IdxList,
5230 bool SignedIndices, bool IsSubtraction,
5231 SourceLocation Loc,
5232 const Twine &Name = "");
5233
5234 Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
5235 llvm::Type *elementType, bool SignedIndices,
5236 bool IsSubtraction, SourceLocation Loc,
5237 CharUnits Align, const Twine &Name = "");
5238
5239 /// Specifies which type of sanitizer check to apply when handling a
5240 /// particular builtin.
5241 enum BuiltinCheckKind {
5242 BCK_CTZPassedZero,
5243 BCK_CLZPassedZero,
5244 BCK_AssumePassedFalse,
5245 };
5246
5247 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5248 /// enabled, a runtime check specified by \p Kind is also emitted.
5249 llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
5250
5251 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5252 /// sanitizer is enabled, a runtime check is also emitted.
5253 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5254
5255 /// Emit a description of a type in a format suitable for passing to
5256 /// a runtime sanitizer handler.
5257 llvm::Constant *EmitCheckTypeDescriptor(QualType T);
5258
5259 /// Convert a value into a format suitable for passing to a runtime
5260 /// sanitizer handler.
5261 llvm::Value *EmitCheckValue(llvm::Value *V);
5262
5263 /// Emit a description of a source location in a format suitable for
5264 /// passing to a runtime sanitizer handler.
5265 llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
5266
5267 void EmitKCFIOperandBundle(const CGCallee &Callee,
5268 SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
5269
5270 /// Create a basic block that will either trap or call a handler function in
5271 /// the UBSan runtime with the provided arguments, and create a conditional
5272 /// branch to it.
5273 void
5274 EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
5275 Checked,
5276 SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
5277 ArrayRef<llvm::Value *> DynamicArgs);
5278
5279 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5280 /// if Cond if false.
5281 void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal,
5282 llvm::Value *Cond, llvm::ConstantInt *TypeId,
5283 llvm::Value *Ptr,
5284 ArrayRef<llvm::Constant *> StaticArgs);
5285
5286 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5287 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5288 void EmitUnreachable(SourceLocation Loc);
5289
5290 /// Create a basic block that will call the trap intrinsic, and emit a
5291 /// conditional branch to it, for the -ftrapv checks.
5292 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5293 bool NoMerge = false);
5294
5295 /// Emit a call to trap or debugtrap and attach function attribute
5296 /// "trap-func-name" if specified.
5297 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5298
5299 /// Emit a stub for the cross-DSO CFI check function.
5300 void EmitCfiCheckStub();
5301
5302 /// Emit a cross-DSO CFI failure handling function.
5303 void EmitCfiCheckFail();
5304
5305 /// Create a check for a function parameter that may potentially be
5306 /// declared as non-null.
5307 void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
5308 AbstractCallee AC, unsigned ParmNum);
5309
5310 void EmitNonNullArgCheck(Address Addr, QualType ArgType,
5311 SourceLocation ArgLoc, AbstractCallee AC,
5312 unsigned ParmNum);
5313
5314 /// EmitWriteback - Emit callbacks for function.
5315 void EmitWritebacks(const CallArgList &Args);
5316
5317 /// EmitCallArg - Emit a single call argument.
5318 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5319
5320 /// EmitDelegateCallArg - We are performing a delegate call; that
5321 /// is, the current function is delegating to another one. Produce
5322 /// a r-value suitable for passing the given parameter.
5323 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5324 SourceLocation loc);
5325
5326 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5327 /// point operation, expressed as the maximum relative error in ulp.
5328 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5329
5330 /// Set the minimum required accuracy of the given sqrt operation
5331 /// based on CodeGenOpts.
5332 void SetSqrtFPAccuracy(llvm::Value *Val);
5333
5334 /// Set the minimum required accuracy of the given sqrt operation based on
5335 /// CodeGenOpts.
5336 void SetDivFPAccuracy(llvm::Value *Val);
5337
5338 /// Set the codegen fast-math flags.
5339 void SetFastMathFlags(FPOptions FPFeatures);
5340
5341 // Truncate or extend a boolean vector to the requested number of elements.
5342 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5343 unsigned NumElementsDst,
5344 const llvm::Twine &Name = "");
5345
5346 void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
5347 SourceLocation Loc);
5348
5349private:
5350 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5351 // as it's parent convergence instr.
5352 llvm::ConvergenceControlInst *emitConvergenceLoopToken(llvm::BasicBlock *BB);
5353
5354 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5355 // instr to the call |Input|.
5356 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input);
5357
5358 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5359 // Returns the convergence instruction.
5360 llvm::ConvergenceControlInst *
5361 getOrEmitConvergenceEntryToken(llvm::Function *F);
5362
5363private:
5364 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5365 void EmitReturnOfRValue(RValue RV, QualType Ty);
5366
5367 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5368
5369 llvm::SmallVector<std::pair<llvm::WeakTrackingVH, llvm::Value *>, 4>
5370 DeferredReplacements;
5371
5372 /// Set the address of a local variable.
5373 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5374 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5375 LocalDeclMap.insert(KV: {VD, Addr});
5376 }
5377
5378 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5379 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5380 ///
5381 /// \param AI - The first function argument of the expansion.
5382 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5383 llvm::Function::arg_iterator &AI);
5384
5385 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5386 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5387 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5388 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5389 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5390 unsigned &IRCallArgPos);
5391
5392 std::pair<llvm::Value *, llvm::Type *>
5393 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5394 std::string &ConstraintStr);
5395
5396 std::pair<llvm::Value *, llvm::Type *>
5397 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5398 QualType InputType, std::string &ConstraintStr,
5399 SourceLocation Loc);
5400
5401 /// Attempts to statically evaluate the object size of E. If that
5402 /// fails, emits code to figure the size of E out for us. This is
5403 /// pass_object_size aware.
5404 ///
5405 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5406 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5407 llvm::IntegerType *ResType,
5408 llvm::Value *EmittedE,
5409 bool IsDynamic);
5410
5411 /// Emits the size of E, as required by __builtin_object_size. This
5412 /// function is aware of pass_object_size parameters, and will act accordingly
5413 /// if E is a parameter with the pass_object_size attribute.
5414 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5415 llvm::IntegerType *ResType,
5416 llvm::Value *EmittedE, bool IsDynamic);
5417
5418 llvm::Value *emitCountedBySize(const Expr *E, llvm::Value *EmittedE,
5419 unsigned Type, llvm::IntegerType *ResType);
5420
5421 llvm::Value *emitCountedByMemberSize(const MemberExpr *E, const Expr *Idx,
5422 llvm::Value *EmittedE,
5423 QualType CastedArrayElementTy,
5424 unsigned Type,
5425 llvm::IntegerType *ResType);
5426
5427 llvm::Value *emitCountedByPointerSize(const ImplicitCastExpr *E,
5428 const Expr *Idx, llvm::Value *EmittedE,
5429 QualType CastedArrayElementTy,
5430 unsigned Type,
5431 llvm::IntegerType *ResType);
5432
5433 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5434 Address Loc);
5435
5436public:
5437 enum class EvaluationOrder {
5438 ///! No language constraints on evaluation order.
5439 Default,
5440 ///! Language semantics require left-to-right evaluation.
5441 ForceLeftToRight,
5442 ///! Language semantics require right-to-left evaluation.
5443 ForceRightToLeft
5444 };
5445
5446 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5447 // an ObjCMethodDecl.
5448 struct PrototypeWrapper {
5449 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5450
5451 PrototypeWrapper(const FunctionProtoType *FT) : P(FT) {}
5452 PrototypeWrapper(const ObjCMethodDecl *MD) : P(MD) {}
5453 };
5454
5455 void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype,
5456 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5457 AbstractCallee AC = AbstractCallee(),
5458 unsigned ParamsToSkip = 0,
5459 EvaluationOrder Order = EvaluationOrder::Default);
5460
5461 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5462 /// emit the value and compute our best estimate of the alignment of the
5463 /// pointee.
5464 ///
5465 /// \param BaseInfo - If non-null, this will be initialized with
5466 /// information about the source of the alignment and the may-alias
5467 /// attribute. Note that this function will conservatively fall back on
5468 /// the type when it doesn't recognize the expression and may-alias will
5469 /// be set to false.
5470 ///
5471 /// One reasonable way to use this information is when there's a language
5472 /// guarantee that the pointer must be aligned to some stricter value, and
5473 /// we're simply trying to ensure that sufficiently obvious uses of under-
5474 /// aligned objects don't get miscompiled; for example, a placement new
5475 /// into the address of a local variable. In such a case, it's quite
5476 /// reasonable to just ignore the returned alignment when it isn't from an
5477 /// explicit source.
5478 Address
5479 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5480 TBAAAccessInfo *TBAAInfo = nullptr,
5481 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5482
5483 /// If \p E references a parameter with pass_object_size info or a constant
5484 /// array size modifier, emit the object size divided by the size of \p EltTy.
5485 /// Otherwise return null.
5486 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5487
5488 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5489
5490 struct FMVResolverOption {
5491 llvm::Function *Function;
5492 llvm::SmallVector<StringRef, 8> Features;
5493 std::optional<StringRef> Architecture;
5494
5495 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5496 std::optional<StringRef> Arch = std::nullopt)
5497 : Function(F), Features(Feats), Architecture(Arch) {}
5498 };
5499
5500 // Emits the body of a multiversion function's resolver. Assumes that the
5501 // options are already sorted in the proper order, with the 'default' option
5502 // last (if it exists).
5503 void EmitMultiVersionResolver(llvm::Function *Resolver,
5504 ArrayRef<FMVResolverOption> Options);
5505 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5506 ArrayRef<FMVResolverOption> Options);
5507 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5508 ArrayRef<FMVResolverOption> Options);
5509 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5510 ArrayRef<FMVResolverOption> Options);
5511
5512private:
5513 QualType getVarArgType(const Expr *Arg);
5514
5515 void EmitDeclMetadata();
5516
5517 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5518 const AutoVarEmission &emission);
5519
5520 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5521
5522 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5523 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5524 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5525 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5526 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5527 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5528 llvm::Value *EmitX86CpuInit();
5529 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5530 llvm::Value *EmitAArch64CpuInit();
5531 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5532 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5533 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5534};
5535
5536inline DominatingLLVMValue::saved_type
5537DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
5538 if (!needsSaving(value))
5539 return saved_type(value, false);
5540
5541 // Otherwise, we need an alloca.
5542 auto align = CharUnits::fromQuantity(
5543 Quantity: CGF.CGM.getDataLayout().getPrefTypeAlign(Ty: value->getType()));
5544 Address alloca =
5545 CGF.CreateTempAlloca(Ty: value->getType(), align, Name: "cond-cleanup.save");
5546 CGF.Builder.CreateStore(Val: value, Addr: alloca);
5547
5548 return saved_type(alloca.emitRawPointer(CGF), true);
5549}
5550
5551inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
5552 saved_type value) {
5553 // If the value says it wasn't saved, trust that it's still dominating.
5554 if (!value.getInt())
5555 return value.getPointer();
5556
5557 // Otherwise, it should be an alloca instruction, as set up in save().
5558 auto alloca = cast<llvm::AllocaInst>(Val: value.getPointer());
5559 return CGF.Builder.CreateAlignedLoad(Ty: alloca->getAllocatedType(), Ptr: alloca,
5560 Align: alloca->getAlign());
5561}
5562
5563} // end namespace CodeGen
5564
5565// Map the LangOption for floating point exception behavior into
5566// the corresponding enum in the IR.
5567llvm::fp::ExceptionBehavior
5568ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind);
5569} // end namespace clang
5570
5571#endif
5572