1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
28#include "clang/AST/ASTContext.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/InferAlloc.h"
33#include "clang/AST/NSAPI.h"
34#include "clang/AST/ParentMapContext.h"
35#include "clang/AST/StmtVisitor.h"
36#include "clang/Basic/Builtins.h"
37#include "clang/Basic/CodeGenOptions.h"
38#include "clang/Basic/Module.h"
39#include "clang/Basic/SourceManager.h"
40#include "llvm/ADT/STLExtras.h"
41#include "llvm/ADT/ScopeExit.h"
42#include "llvm/ADT/StringExtras.h"
43#include "llvm/IR/Constants.h"
44#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/MDBuilder.h"
48#include "llvm/IR/MatrixBuilder.h"
49#include "llvm/Support/ConvertUTF.h"
50#include "llvm/Support/Endian.h"
51#include "llvm/Support/MathExtras.h"
52#include "llvm/Support/Path.h"
53#include "llvm/Support/xxhash.h"
54#include "llvm/Transforms/Utils/SanitizerStats.h"
55
56#include <numeric>
57#include <optional>
58#include <string>
59
60using namespace clang;
61using namespace CodeGen;
62
63namespace clang {
64// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
65// by -fsanitize-skip-hot-cutoff
66llvm::cl::opt<bool> ClSanitizeGuardChecks(
67 "ubsan-guard-checks", llvm::cl::Optional,
68 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
69
70} // namespace clang
71
72//===--------------------------------------------------------------------===//
73// Defines for metadata
74//===--------------------------------------------------------------------===//
75
76// Those values are crucial to be the SAME as in ubsan runtime library.
77enum VariableTypeDescriptorKind : uint16_t {
78 /// An integer type.
79 TK_Integer = 0x0000,
80 /// A floating-point type.
81 TK_Float = 0x0001,
82 /// An _BitInt(N) type.
83 TK_BitInt = 0x0002,
84 /// Any other type. The value representation is unspecified.
85 TK_Unknown = 0xffff
86};
87
88//===--------------------------------------------------------------------===//
89// Miscellaneous Helper Methods
90//===--------------------------------------------------------------------===//
91
92static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
93 switch (ID) {
94#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
95 case SanitizerHandler::Enum: \
96 return Msg;
97 LIST_SANITIZER_CHECKS
98#undef SANITIZER_CHECK
99 }
100 llvm_unreachable("unhandled switch case");
101}
102
103/// CreateTempAlloca - This creates a alloca and inserts it into the entry
104/// block.
105RawAddress
106CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
107 const Twine &Name,
108 llvm::Value *ArraySize) {
109 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
110 Alloca->setAlignment(Align.getAsAlign());
111 return RawAddress(Alloca, Ty, Align, KnownNonNull);
112}
113
114RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
115 LangAS DestLangAS,
116 llvm::Value *ArraySize) {
117
118 llvm::Value *V = Alloca.getPointer();
119 // Alloca always returns a pointer in alloca address space, which may
120 // be different from the type defined by the language. For example,
121 // in C++ the auto variables are in the default address space. Therefore
122 // cast alloca to the default address space when necessary.
123
124 unsigned DestAddrSpace = getContext().getTargetAddressSpace(AS: DestLangAS);
125 if (DestAddrSpace != Alloca.getAddressSpace()) {
126 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
127 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
128 // otherwise alloca is inserted at the current insertion point of the
129 // builder.
130 if (!ArraySize)
131 Builder.SetInsertPoint(getPostAllocaInsertPoint());
132 V = getTargetHooks().performAddrSpaceCast(
133 CGF&: *this, V, SrcAddr: getASTAllocaAddressSpace(), DestTy: Builder.getPtrTy(AddrSpace: DestAddrSpace),
134 /*IsNonNull=*/true);
135 }
136
137 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
138 KnownNonNull);
139}
140
141RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS,
142 CharUnits Align, const Twine &Name,
143 llvm::Value *ArraySize,
144 RawAddress *AllocaAddr) {
145 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
146 if (AllocaAddr)
147 *AllocaAddr = Alloca;
148 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
149}
150
151/// CreateTempAlloca - This creates an alloca and inserts it into the entry
152/// block if \p ArraySize is nullptr, otherwise inserts it at the current
153/// insertion point of the builder.
154llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
155 const Twine &Name,
156 llvm::Value *ArraySize) {
157 llvm::AllocaInst *Alloca;
158 if (ArraySize)
159 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
160 else
161 Alloca =
162 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
163 ArraySize, Name, AllocaInsertPt->getIterator());
164 if (SanOpts.Mask & SanitizerKind::Address) {
165 Alloca->addAnnotationMetadata(Annotations: {"alloca_name_altered", Name.str()});
166 }
167 if (Allocas) {
168 Allocas->Add(I: Alloca);
169 }
170 return Alloca;
171}
172
173/// CreateDefaultAlignTempAlloca - This creates an alloca with the
174/// default alignment of the corresponding LLVM type, which is *not*
175/// guaranteed to be related in any way to the expected alignment of
176/// an AST type that might have been lowered to Ty.
177RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
178 const Twine &Name) {
179 CharUnits Align =
180 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty));
181 return CreateTempAlloca(Ty, align: Align, Name);
182}
183
184RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
185 CharUnits Align = getContext().getTypeAlignInChars(T: Ty);
186 return CreateTempAlloca(Ty: ConvertType(T: Ty), align: Align, Name);
187}
188
189RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
190 RawAddress *Alloca) {
191 // FIXME: Should we prefer the preferred type alignment here?
192 return CreateMemTemp(T: Ty, Align: getContext().getTypeAlignInChars(T: Ty), Name, Alloca);
193}
194
195RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
196 const Twine &Name,
197 RawAddress *Alloca) {
198 RawAddress Result = CreateTempAlloca(Ty: ConvertTypeForMem(T: Ty), align: Align, Name,
199 /*ArraySize=*/nullptr, Alloca);
200
201 if (Ty->isConstantMatrixType()) {
202 auto *ArrayTy = cast<llvm::ArrayType>(Val: Result.getElementType());
203 auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(),
204 NumElts: ArrayTy->getNumElements());
205
206 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
207 KnownNonNull);
208 }
209 return Result;
210}
211
212RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
213 CharUnits Align,
214 const Twine &Name) {
215 return CreateTempAllocaWithoutCast(Ty: ConvertTypeForMem(T: Ty), Align, Name);
216}
217
218RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
219 const Twine &Name) {
220 return CreateMemTempWithoutCast(Ty, Align: getContext().getTypeAlignInChars(T: Ty),
221 Name);
222}
223
224/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
225/// expression and compare the result against zero, returning an Int1Ty value.
226llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
227 PGO->setCurrentStmt(E);
228 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
229 llvm::Value *MemPtr = EmitScalarExpr(E);
230 return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr, MPT);
231 }
232
233 QualType BoolTy = getContext().BoolTy;
234 SourceLocation Loc = E->getExprLoc();
235 CGFPOptionsRAII FPOptsRAII(*this, E);
236 if (!E->getType()->isAnyComplexType())
237 return EmitScalarConversion(Src: EmitScalarExpr(E), SrcTy: E->getType(), DstTy: BoolTy, Loc);
238
239 return EmitComplexToScalarConversion(Src: EmitComplexExpr(E), SrcTy: E->getType(), DstTy: BoolTy,
240 Loc);
241}
242
243/// EmitIgnoredExpr - Emit code to compute the specified expression,
244/// ignoring the result.
245void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
246 if (E->isPRValue())
247 return (void)EmitAnyExpr(E, aggSlot: AggValueSlot::ignored(), ignoreResult: true);
248
249 // if this is a bitfield-resulting conditional operator, we can special case
250 // emit this. The normal 'EmitLValue' version of this is particularly
251 // difficult to codegen for, since creating a single "LValue" for two
252 // different sized arguments here is not particularly doable.
253 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
254 Val: E->IgnoreParenNoopCasts(Ctx: getContext()))) {
255 if (CondOp->getObjectKind() == OK_BitField)
256 return EmitIgnoredConditionalOperator(E: CondOp);
257 }
258
259 // Just emit it as an l-value and drop the result.
260 EmitLValue(E);
261}
262
263/// EmitAnyExpr - Emit code to compute the specified expression which
264/// can have any type. The result is returned as an RValue struct.
265/// If this is an aggregate expression, AggSlot indicates where the
266/// result should be returned.
267RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
268 AggValueSlot aggSlot,
269 bool ignoreResult) {
270 switch (getEvaluationKind(T: E->getType())) {
271 case TEK_Scalar:
272 return RValue::get(V: EmitScalarExpr(E, IgnoreResultAssign: ignoreResult));
273 case TEK_Complex:
274 return RValue::getComplex(C: EmitComplexExpr(E, IgnoreReal: ignoreResult, IgnoreImag: ignoreResult));
275 case TEK_Aggregate:
276 if (!ignoreResult && aggSlot.isIgnored())
277 aggSlot = CreateAggTemp(T: E->getType(), Name: "agg-temp");
278 EmitAggExpr(E, AS: aggSlot);
279 return aggSlot.asRValue();
280 }
281 llvm_unreachable("bad evaluation kind");
282}
283
284/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
285/// always be accessible even if no aggregate location is provided.
286RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
287 AggValueSlot AggSlot = AggValueSlot::ignored();
288
289 if (hasAggregateEvaluationKind(T: E->getType()))
290 AggSlot = CreateAggTemp(T: E->getType(), Name: "agg.tmp");
291 return EmitAnyExpr(E, aggSlot: AggSlot);
292}
293
294/// EmitAnyExprToMem - Evaluate an expression into a given memory
295/// location.
296void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
297 Address Location,
298 Qualifiers Quals,
299 bool IsInit) {
300 // FIXME: This function should take an LValue as an argument.
301 switch (getEvaluationKind(T: E->getType())) {
302 case TEK_Complex:
303 EmitComplexExprIntoLValue(E, dest: MakeAddrLValue(Addr: Location, T: E->getType()),
304 /*isInit*/ false);
305 return;
306
307 case TEK_Aggregate: {
308 EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Location, quals: Quals,
309 isDestructed: AggValueSlot::IsDestructed_t(IsInit),
310 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
311 isAliased: AggValueSlot::IsAliased_t(!IsInit),
312 mayOverlap: AggValueSlot::MayOverlap));
313 return;
314 }
315
316 case TEK_Scalar: {
317 RValue RV = RValue::get(V: EmitScalarExpr(E, /*Ignore*/ IgnoreResultAssign: false));
318 LValue LV = MakeAddrLValue(Addr: Location, T: E->getType());
319 EmitStoreThroughLValue(Src: RV, Dst: LV);
320 return;
321 }
322 }
323 llvm_unreachable("bad evaluation kind");
324}
325
326void CodeGenFunction::EmitInitializationToLValue(
327 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
328 QualType Type = LV.getType();
329 switch (getEvaluationKind(T: Type)) {
330 case TEK_Complex:
331 EmitComplexExprIntoLValue(E, dest: LV, /*isInit*/ true);
332 return;
333 case TEK_Aggregate:
334 EmitAggExpr(E, AS: AggValueSlot::forLValue(LV, isDestructed: AggValueSlot::IsDestructed,
335 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
336 isAliased: AggValueSlot::IsNotAliased,
337 mayOverlap: AggValueSlot::MayOverlap, isZeroed: IsZeroed));
338 return;
339 case TEK_Scalar:
340 if (LV.isSimple())
341 EmitScalarInit(init: E, /*D=*/nullptr, lvalue: LV, /*Captured=*/capturedByInit: false);
342 else
343 EmitStoreThroughLValue(Src: RValue::get(V: EmitScalarExpr(E)), Dst: LV);
344 return;
345 }
346 llvm_unreachable("bad evaluation kind");
347}
348
349static void
350pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
351 const Expr *E, Address ReferenceTemporary) {
352 // Objective-C++ ARC:
353 // If we are binding a reference to a temporary that has ownership, we
354 // need to perform retain/release operations on the temporary.
355 //
356 // FIXME: This should be looking at E, not M.
357 if (auto Lifetime = M->getType().getObjCLifetime()) {
358 switch (Lifetime) {
359 case Qualifiers::OCL_None:
360 case Qualifiers::OCL_ExplicitNone:
361 // Carry on to normal cleanup handling.
362 break;
363
364 case Qualifiers::OCL_Autoreleasing:
365 // Nothing to do; cleaned up by an autorelease pool.
366 return;
367
368 case Qualifiers::OCL_Strong:
369 case Qualifiers::OCL_Weak:
370 switch (StorageDuration Duration = M->getStorageDuration()) {
371 case SD_Static:
372 // Note: we intentionally do not register a cleanup to release
373 // the object on program termination.
374 return;
375
376 case SD_Thread:
377 // FIXME: We should probably register a cleanup in this case.
378 return;
379
380 case SD_Automatic:
381 case SD_FullExpression:
382 CodeGenFunction::Destroyer *Destroy;
383 CleanupKind CleanupKind;
384 if (Lifetime == Qualifiers::OCL_Strong) {
385 const ValueDecl *VD = M->getExtendingDecl();
386 bool Precise = isa_and_nonnull<VarDecl>(Val: VD) &&
387 VD->hasAttr<ObjCPreciseLifetimeAttr>();
388 CleanupKind = CGF.getARCCleanupKind();
389 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
390 : &CodeGenFunction::destroyARCStrongImprecise;
391 } else {
392 // __weak objects always get EH cleanups; otherwise, exceptions
393 // could cause really nasty crashes instead of mere leaks.
394 CleanupKind = NormalAndEHCleanup;
395 Destroy = &CodeGenFunction::destroyARCWeak;
396 }
397 if (Duration == SD_FullExpression)
398 CGF.pushDestroy(kind: CleanupKind, addr: ReferenceTemporary,
399 type: M->getType(), destroyer: *Destroy,
400 useEHCleanupForArray: CleanupKind & EHCleanup);
401 else
402 CGF.pushLifetimeExtendedDestroy(kind: CleanupKind, addr: ReferenceTemporary,
403 type: M->getType(),
404 destroyer: *Destroy, useEHCleanupForArray: CleanupKind & EHCleanup);
405 return;
406
407 case SD_Dynamic:
408 llvm_unreachable("temporary cannot have dynamic storage duration");
409 }
410 llvm_unreachable("unknown storage duration");
411 }
412 }
413
414 QualType::DestructionKind DK = E->getType().isDestructedType();
415 if (DK != QualType::DK_none) {
416 switch (M->getStorageDuration()) {
417 case SD_Static:
418 case SD_Thread: {
419 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
420 if (const auto *ClassDecl =
421 E->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
422 ClassDecl && !ClassDecl->hasTrivialDestructor())
423 // Get the destructor for the reference temporary.
424 ReferenceTemporaryDtor = ClassDecl->getDestructor();
425
426 if (!ReferenceTemporaryDtor)
427 return;
428
429 llvm::FunctionCallee CleanupFn;
430 llvm::Constant *CleanupArg;
431 if (E->getType()->isArrayType()) {
432 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
433 addr: ReferenceTemporary, type: E->getType(), destroyer: CodeGenFunction::destroyCXXObject,
434 useEHCleanupForArray: CGF.getLangOpts().Exceptions,
435 VD: dyn_cast_or_null<VarDecl>(Val: M->getExtendingDecl()));
436 CleanupArg = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy);
437 } else {
438 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
439 GD: GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
440 CleanupArg =
441 cast<llvm::Constant>(Val: ReferenceTemporary.emitRawPointer(CGF));
442 }
443 CGF.CGM.getCXXABI().registerGlobalDtor(
444 CGF, D: *cast<VarDecl>(Val: M->getExtendingDecl()), Dtor: CleanupFn, Addr: CleanupArg);
445 } break;
446 case SD_FullExpression:
447 CGF.pushDestroy(dtorKind: DK, addr: ReferenceTemporary, type: E->getType());
448 break;
449 case SD_Automatic:
450 CGF.pushLifetimeExtendedDestroy(dtorKind: DK, addr: ReferenceTemporary, type: E->getType());
451 break;
452 case SD_Dynamic:
453 llvm_unreachable("temporary cannot have dynamic storage duration");
454 }
455 }
456}
457
458static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
459 const MaterializeTemporaryExpr *M,
460 const Expr *Inner,
461 RawAddress *Alloca = nullptr) {
462 auto &TCG = CGF.getTargetHooks();
463 switch (M->getStorageDuration()) {
464 case SD_FullExpression:
465 case SD_Automatic: {
466 // If we have a constant temporary array or record try to promote it into a
467 // constant global under the same rules a normal constant would've been
468 // promoted. This is easier on the optimizer and generally emits fewer
469 // instructions.
470 QualType Ty = Inner->getType();
471 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
472 (Ty->isArrayType() || Ty->isRecordType()) &&
473 Ty.isConstantStorage(Ctx: CGF.getContext(), ExcludeCtor: true, ExcludeDtor: false))
474 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(E: Inner, T: Ty)) {
475 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
476 auto *GV = new llvm::GlobalVariable(
477 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
478 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
479 llvm::GlobalValue::NotThreadLocal,
480 CGF.getContext().getTargetAddressSpace(AS));
481 CharUnits alignment = CGF.getContext().getTypeAlignInChars(T: Ty);
482 GV->setAlignment(alignment.getAsAlign());
483 llvm::Constant *C = GV;
484 if (AS != LangAS::Default)
485 C = TCG.performAddrSpaceCast(
486 CGM&: CGF.CGM, V: GV, SrcAddr: AS,
487 DestTy: llvm::PointerType::get(
488 C&: CGF.getLLVMContext(),
489 AddressSpace: CGF.getContext().getTargetAddressSpace(AS: LangAS::Default)));
490 // FIXME: Should we put the new global into a COMDAT?
491 return RawAddress(C, GV->getValueType(), alignment);
492 }
493 return CGF.CreateMemTemp(Ty, Name: "ref.tmp", Alloca);
494 }
495 case SD_Thread:
496 case SD_Static:
497 return CGF.CGM.GetAddrOfGlobalTemporary(E: M, Inner);
498
499 case SD_Dynamic:
500 llvm_unreachable("temporary can't have dynamic storage duration");
501 }
502 llvm_unreachable("unknown storage duration");
503}
504
505/// Helper method to check if the underlying ABI is AAPCS
506static bool isAAPCS(const TargetInfo &TargetInfo) {
507 return TargetInfo.getABI().starts_with(Prefix: "aapcs");
508}
509
510LValue CodeGenFunction::
511EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
512 const Expr *E = M->getSubExpr();
513
514 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
515 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
516 "Reference should never be pseudo-strong!");
517
518 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
519 // as that will cause the lifetime adjustment to be lost for ARC
520 auto ownership = M->getType().getObjCLifetime();
521 if (ownership != Qualifiers::OCL_None &&
522 ownership != Qualifiers::OCL_ExplicitNone) {
523 RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E);
524 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: Object.getPointer())) {
525 llvm::Type *Ty = ConvertTypeForMem(T: E->getType());
526 Object = Object.withElementType(ElemTy: Ty);
527
528 // createReferenceTemporary will promote the temporary to a global with a
529 // constant initializer if it can. It can only do this to a value of
530 // ARC-manageable type if the value is global and therefore "immune" to
531 // ref-counting operations. Therefore we have no need to emit either a
532 // dynamic initialization or a cleanup and we can just return the address
533 // of the temporary.
534 if (Var->hasInitializer())
535 return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl);
536
537 Var->setInitializer(CGM.EmitNullConstant(T: E->getType()));
538 }
539 LValue RefTempDst = MakeAddrLValue(Addr: Object, T: M->getType(),
540 Source: AlignmentSource::Decl);
541
542 switch (getEvaluationKind(T: E->getType())) {
543 default: llvm_unreachable("expected scalar or aggregate expression");
544 case TEK_Scalar:
545 EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: RefTempDst, capturedByInit: false);
546 break;
547 case TEK_Aggregate: {
548 EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Object,
549 quals: E->getType().getQualifiers(),
550 isDestructed: AggValueSlot::IsDestructed,
551 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
552 isAliased: AggValueSlot::IsNotAliased,
553 mayOverlap: AggValueSlot::DoesNotOverlap));
554 break;
555 }
556 }
557
558 pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object);
559 return RefTempDst;
560 }
561
562 SmallVector<const Expr *, 2> CommaLHSs;
563 SmallVector<SubobjectAdjustment, 2> Adjustments;
564 E = E->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments);
565
566 for (const auto &Ignored : CommaLHSs)
567 EmitIgnoredExpr(E: Ignored);
568
569 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(Val: E)) {
570 if (opaque->getType()->isRecordType()) {
571 assert(Adjustments.empty());
572 return EmitOpaqueValueLValue(e: opaque);
573 }
574 }
575
576 // Create and initialize the reference temporary.
577 RawAddress Alloca = Address::invalid();
578 RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E, Alloca: &Alloca);
579 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
580 Val: Object.getPointer()->stripPointerCasts())) {
581 llvm::Type *TemporaryType = ConvertTypeForMem(T: E->getType());
582 Object = Object.withElementType(ElemTy: TemporaryType);
583 // If the temporary is a global and has a constant initializer or is a
584 // constant temporary that we promoted to a global, we may have already
585 // initialized it.
586 if (!Var->hasInitializer()) {
587 Var->setInitializer(CGM.EmitNullConstant(T: E->getType()));
588 QualType RefType = M->getType().withoutLocalFastQualifiers();
589 if (RefType.getPointerAuth()) {
590 // Use the qualifier of the reference temporary to sign the pointer.
591 LValue LV = MakeRawAddrLValue(V: Object.getPointer(), T: RefType,
592 Alignment: Object.getAlignment());
593 EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: LV, capturedByInit: false);
594 } else {
595 EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/ true);
596 }
597 }
598 } else {
599 switch (M->getStorageDuration()) {
600 case SD_Automatic:
601 if (EmitLifetimeStart(Addr: Alloca.getPointer())) {
602 pushCleanupAfterFullExpr<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker,
603 A: Alloca);
604 }
605 break;
606
607 case SD_FullExpression: {
608 if (!ShouldEmitLifetimeMarkers)
609 break;
610
611 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
612 // marker. Instead, start the lifetime of a conditional temporary earlier
613 // so that it's unconditional. Don't do this with sanitizers which need
614 // more precise lifetime marks. However when inside an "await.suspend"
615 // block, we should always avoid conditional cleanup because it creates
616 // boolean marker that lives across await_suspend, which can destroy coro
617 // frame.
618 ConditionalEvaluation *OldConditional = nullptr;
619 CGBuilderTy::InsertPoint OldIP;
620 if (isInConditionalBranch() && !E->getType().isDestructedType() &&
621 ((!SanOpts.has(K: SanitizerKind::HWAddress) &&
622 !SanOpts.has(K: SanitizerKind::Memory) &&
623 !SanOpts.has(K: SanitizerKind::MemtagStack) &&
624 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
625 inSuspendBlock())) {
626 OldConditional = OutermostConditional;
627 OutermostConditional = nullptr;
628
629 OldIP = Builder.saveIP();
630 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
631 Builder.restoreIP(IP: CGBuilderTy::InsertPoint(
632 Block, llvm::BasicBlock::iterator(Block->back())));
633 }
634
635 if (EmitLifetimeStart(Addr: Alloca.getPointer())) {
636 pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: Alloca);
637 }
638
639 if (OldConditional) {
640 OutermostConditional = OldConditional;
641 Builder.restoreIP(IP: OldIP);
642 }
643 break;
644 }
645
646 default:
647 break;
648 }
649 EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true);
650 }
651 pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object);
652
653 // Perform derived-to-base casts and/or field accesses, to get from the
654 // temporary object we created (and, potentially, for which we extended
655 // the lifetime) to the subobject we're binding the reference to.
656 for (SubobjectAdjustment &Adjustment : llvm::reverse(C&: Adjustments)) {
657 switch (Adjustment.Kind) {
658 case SubobjectAdjustment::DerivedToBaseAdjustment:
659 Object =
660 GetAddressOfBaseClass(Value: Object, Derived: Adjustment.DerivedToBase.DerivedClass,
661 PathBegin: Adjustment.DerivedToBase.BasePath->path_begin(),
662 PathEnd: Adjustment.DerivedToBase.BasePath->path_end(),
663 /*NullCheckValue=*/ false, Loc: E->getExprLoc());
664 break;
665
666 case SubobjectAdjustment::FieldAdjustment: {
667 LValue LV = MakeAddrLValue(Addr: Object, T: E->getType(), Source: AlignmentSource::Decl);
668 LV = EmitLValueForField(Base: LV, Field: Adjustment.Field);
669 assert(LV.isSimple() &&
670 "materialized temporary field is not a simple lvalue");
671 Object = LV.getAddress();
672 break;
673 }
674
675 case SubobjectAdjustment::MemberPointerAdjustment: {
676 llvm::Value *Ptr = EmitScalarExpr(E: Adjustment.Ptr.RHS);
677 Object = EmitCXXMemberDataPointerAddress(
678 E, base: Object, memberPtr: Ptr, memberPtrType: Adjustment.Ptr.MPT, /*IsInBounds=*/true);
679 break;
680 }
681 }
682 }
683
684 return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl);
685}
686
687RValue
688CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
689 // Emit the expression as an lvalue.
690 LValue LV = EmitLValue(E);
691 assert(LV.isSimple());
692 llvm::Value *Value = LV.getPointer(CGF&: *this);
693
694 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
695 // C++11 [dcl.ref]p5 (as amended by core issue 453):
696 // If a glvalue to which a reference is directly bound designates neither
697 // an existing object or function of an appropriate type nor a region of
698 // storage of suitable size and alignment to contain an object of the
699 // reference's type, the behavior is undefined.
700 QualType Ty = E->getType();
701 EmitTypeCheck(TCK: TCK_ReferenceBinding, Loc: E->getExprLoc(), V: Value, Type: Ty);
702 }
703
704 return RValue::get(V: Value);
705}
706
707
708/// getAccessedFieldNo - Given an encoded value and a result number, return the
709/// input field number being accessed.
710unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
711 const llvm::Constant *Elts) {
712 return cast<llvm::ConstantInt>(Val: Elts->getAggregateElement(Elt: Idx))
713 ->getZExtValue();
714}
715
716static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
717 llvm::Value *Ptr) {
718 llvm::Value *A0 =
719 Builder.CreateMul(LHS: Ptr, RHS: Builder.getInt64(C: 0xbf58476d1ce4e5b9u));
720 llvm::Value *A1 =
721 Builder.CreateXor(LHS: A0, RHS: Builder.CreateLShr(LHS: A0, RHS: Builder.getInt64(C: 31)));
722 return Builder.CreateXor(LHS: Acc, RHS: A1);
723}
724
725bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
726 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
727 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
728}
729
730bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
731 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
732 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
733 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
734 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
735 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
736}
737
738bool CodeGenFunction::sanitizePerformTypeCheck() const {
739 return SanOpts.has(K: SanitizerKind::Null) ||
740 SanOpts.has(K: SanitizerKind::Alignment) ||
741 SanOpts.has(K: SanitizerKind::ObjectSize) ||
742 SanOpts.has(K: SanitizerKind::Vptr);
743}
744
745void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
746 llvm::Value *Ptr, QualType Ty,
747 CharUnits Alignment,
748 SanitizerSet SkippedChecks,
749 llvm::Value *ArraySize) {
750 if (!sanitizePerformTypeCheck())
751 return;
752
753 // Don't check pointers outside the default address space. The null check
754 // isn't correct, the object-size check isn't supported by LLVM, and we can't
755 // communicate the addresses to the runtime handler for the vptr check.
756 if (Ptr->getType()->getPointerAddressSpace())
757 return;
758
759 // Don't check pointers to volatile data. The behavior here is implementation-
760 // defined.
761 if (Ty.isVolatileQualified())
762 return;
763
764 // Quickly determine whether we have a pointer to an alloca. It's possible
765 // to skip null checks, and some alignment checks, for these pointers. This
766 // can reduce compile-time significantly.
767 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Val: Ptr->stripPointerCasts());
768
769 llvm::Value *IsNonNull = nullptr;
770 bool IsGuaranteedNonNull =
771 SkippedChecks.has(K: SanitizerKind::Null) || PtrToAlloca;
772
773 llvm::BasicBlock *Done = nullptr;
774 bool DoneViaNullSanitize = false;
775
776 {
777 auto CheckHandler = SanitizerHandler::TypeMismatch;
778 SanitizerDebugLocation SanScope(this,
779 {SanitizerKind::SO_Null,
780 SanitizerKind::SO_ObjectSize,
781 SanitizerKind::SO_Alignment},
782 CheckHandler);
783
784 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 3>
785 Checks;
786
787 llvm::Value *True = llvm::ConstantInt::getTrue(Context&: getLLVMContext());
788 bool AllowNullPointers = isNullPointerAllowed(TCK);
789 if ((SanOpts.has(K: SanitizerKind::Null) || AllowNullPointers) &&
790 !IsGuaranteedNonNull) {
791 // The glvalue must not be an empty glvalue.
792 IsNonNull = Builder.CreateIsNotNull(Arg: Ptr);
793
794 // The IR builder can constant-fold the null check if the pointer points
795 // to a constant.
796 IsGuaranteedNonNull = IsNonNull == True;
797
798 // Skip the null check if the pointer is known to be non-null.
799 if (!IsGuaranteedNonNull) {
800 if (AllowNullPointers) {
801 // When performing pointer casts, it's OK if the value is null.
802 // Skip the remaining checks in that case.
803 Done = createBasicBlock(name: "null");
804 DoneViaNullSanitize = true;
805 llvm::BasicBlock *Rest = createBasicBlock(name: "not.null");
806 Builder.CreateCondBr(Cond: IsNonNull, True: Rest, False: Done);
807 EmitBlock(BB: Rest);
808 } else {
809 Checks.push_back(Elt: std::make_pair(x&: IsNonNull, y: SanitizerKind::SO_Null));
810 }
811 }
812 }
813
814 if (SanOpts.has(K: SanitizerKind::ObjectSize) &&
815 !SkippedChecks.has(K: SanitizerKind::ObjectSize) &&
816 !Ty->isIncompleteType()) {
817 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
818 llvm::Value *Size = llvm::ConstantInt::get(Ty: IntPtrTy, V: TySize);
819 if (ArraySize)
820 Size = Builder.CreateMul(LHS: Size, RHS: ArraySize);
821
822 // Degenerate case: new X[0] does not need an objectsize check.
823 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Val: Size);
824 if (!ConstantSize || !ConstantSize->isNullValue()) {
825 // The glvalue must refer to a large enough storage region.
826 // FIXME: If Address Sanitizer is enabled, insert dynamic
827 // instrumentation
828 // to check this.
829 // FIXME: Get object address space
830 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
831 llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::objectsize, Tys);
832 llvm::Value *Min = Builder.getFalse();
833 llvm::Value *NullIsUnknown = Builder.getFalse();
834 llvm::Value *Dynamic = Builder.getFalse();
835 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
836 LHS: Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}), RHS: Size);
837 Checks.push_back(
838 Elt: std::make_pair(x&: LargeEnough, y: SanitizerKind::SO_ObjectSize));
839 }
840 }
841
842 llvm::MaybeAlign AlignVal;
843 llvm::Value *PtrAsInt = nullptr;
844
845 if (SanOpts.has(K: SanitizerKind::Alignment) &&
846 !SkippedChecks.has(K: SanitizerKind::Alignment)) {
847 AlignVal = Alignment.getAsMaybeAlign();
848 if (!Ty->isIncompleteType() && !AlignVal)
849 AlignVal = CGM.getNaturalTypeAlignment(T: Ty, BaseInfo: nullptr, TBAAInfo: nullptr,
850 /*ForPointeeType=*/forPointeeType: true)
851 .getAsMaybeAlign();
852
853 // The glvalue must be suitably aligned.
854 if (AlignVal && *AlignVal > llvm::Align(1) &&
855 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
856 PtrAsInt = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
857 llvm::Value *Align = Builder.CreateAnd(
858 LHS: PtrAsInt, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: AlignVal->value() - 1));
859 llvm::Value *Aligned =
860 Builder.CreateICmpEQ(LHS: Align, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 0));
861 if (Aligned != True)
862 Checks.push_back(
863 Elt: std::make_pair(x&: Aligned, y: SanitizerKind::SO_Alignment));
864 }
865 }
866
867 if (Checks.size() > 0) {
868 llvm::Constant *StaticData[] = {
869 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: Ty),
870 llvm::ConstantInt::get(Ty: Int8Ty, V: AlignVal ? llvm::Log2(A: *AlignVal) : 1),
871 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)};
872 EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs: StaticData, DynamicArgs: PtrAsInt ? PtrAsInt : Ptr);
873 }
874 }
875
876 // If possible, check that the vptr indicates that there is a subobject of
877 // type Ty at offset zero within this object.
878 //
879 // C++11 [basic.life]p5,6:
880 // [For storage which does not refer to an object within its lifetime]
881 // The program has undefined behavior if:
882 // -- the [pointer or glvalue] is used to access a non-static data member
883 // or call a non-static member function
884 if (SanOpts.has(K: SanitizerKind::Vptr) &&
885 !SkippedChecks.has(K: SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
886 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
887 SanitizerHandler::DynamicTypeCacheMiss);
888
889 // Ensure that the pointer is non-null before loading it. If there is no
890 // compile-time guarantee, reuse the run-time null check or emit a new one.
891 if (!IsGuaranteedNonNull) {
892 if (!IsNonNull)
893 IsNonNull = Builder.CreateIsNotNull(Arg: Ptr);
894 if (!Done)
895 Done = createBasicBlock(name: "vptr.null");
896 llvm::BasicBlock *VptrNotNull = createBasicBlock(name: "vptr.not.null");
897 Builder.CreateCondBr(Cond: IsNonNull, True: VptrNotNull, False: Done);
898 EmitBlock(BB: VptrNotNull);
899 }
900
901 // Compute a deterministic hash of the mangled name of the type.
902 SmallString<64> MangledName;
903 llvm::raw_svector_ostream Out(MangledName);
904 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty.getUnqualifiedType(),
905 Out);
906
907 // Contained in NoSanitizeList based on the mangled type.
908 if (!CGM.getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::Vptr,
909 MangledTypeName: Out.str())) {
910 // Load the vptr, and mix it with TypeHash.
911 llvm::Value *TypeHash =
912 llvm::ConstantInt::get(Ty: Int64Ty, V: xxh3_64bits(data: Out.str()));
913
914 llvm::Type *VPtrTy = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: 0);
915 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
916 llvm::Value *VPtrVal = GetVTablePtr(This: VPtrAddr, VTableTy: VPtrTy,
917 VTableClass: Ty->getAsCXXRecordDecl(),
918 AuthMode: VTableAuthMode::UnsafeUbsanStrip);
919 VPtrVal = Builder.CreateBitOrPointerCast(V: VPtrVal, DestTy: IntPtrTy);
920
921 llvm::Value *Hash =
922 emitHashMix(Builder, Acc: TypeHash, Ptr: Builder.CreateZExt(V: VPtrVal, DestTy: Int64Ty));
923 Hash = Builder.CreateTrunc(V: Hash, DestTy: IntPtrTy);
924
925 // Look the hash up in our cache.
926 const int CacheSize = 128;
927 llvm::Type *HashTable = llvm::ArrayType::get(ElementType: IntPtrTy, NumElements: CacheSize);
928 llvm::Value *Cache = CGM.CreateRuntimeVariable(Ty: HashTable,
929 Name: "__ubsan_vptr_type_cache");
930 llvm::Value *Slot = Builder.CreateAnd(LHS: Hash,
931 RHS: llvm::ConstantInt::get(Ty: IntPtrTy,
932 V: CacheSize-1));
933 llvm::Value *Indices[] = { Builder.getInt32(C: 0), Slot };
934 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
935 Ty: IntPtrTy, Addr: Builder.CreateInBoundsGEP(Ty: HashTable, Ptr: Cache, IdxList: Indices),
936 Align: getPointerAlign());
937
938 // If the hash isn't in the cache, call a runtime handler to perform the
939 // hard work of checking whether the vptr is for an object of the right
940 // type. This will either fill in the cache and return, or produce a
941 // diagnostic.
942 llvm::Value *EqualHash = Builder.CreateICmpEQ(LHS: CacheVal, RHS: Hash);
943 llvm::Constant *StaticData[] = {
944 EmitCheckSourceLocation(Loc),
945 EmitCheckTypeDescriptor(T: Ty),
946 CGM.GetAddrOfRTTIDescriptor(Ty: Ty.getUnqualifiedType()),
947 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)
948 };
949 llvm::Value *DynamicData[] = { Ptr, Hash };
950 EmitCheck(Checked: std::make_pair(x&: EqualHash, y: SanitizerKind::SO_Vptr),
951 Check: SanitizerHandler::DynamicTypeCacheMiss, StaticArgs: StaticData,
952 DynamicArgs: DynamicData);
953 }
954 }
955
956 if (Done) {
957 SanitizerDebugLocation SanScope(
958 this,
959 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
960 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
961 : SanitizerHandler::DynamicTypeCacheMiss);
962 Builder.CreateBr(Dest: Done);
963 EmitBlock(BB: Done);
964 }
965}
966
967llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
968 QualType EltTy) {
969 ASTContext &C = getContext();
970 uint64_t EltSize = C.getTypeSizeInChars(T: EltTy).getQuantity();
971 if (!EltSize)
972 return nullptr;
973
974 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts());
975 if (!ArrayDeclRef)
976 return nullptr;
977
978 auto *ParamDecl = dyn_cast<ParmVarDecl>(Val: ArrayDeclRef->getDecl());
979 if (!ParamDecl)
980 return nullptr;
981
982 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
983 if (!POSAttr)
984 return nullptr;
985
986 // Don't load the size if it's a lower bound.
987 int POSType = POSAttr->getType();
988 if (POSType != 0 && POSType != 1)
989 return nullptr;
990
991 // Find the implicit size parameter.
992 auto PassedSizeIt = SizeArguments.find(Val: ParamDecl);
993 if (PassedSizeIt == SizeArguments.end())
994 return nullptr;
995
996 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
997 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
998 Address AddrOfSize = LocalDeclMap.find(Val: PassedSizeDecl)->second;
999 llvm::Value *SizeInBytes = EmitLoadOfScalar(Addr: AddrOfSize, /*Volatile=*/false,
1000 Ty: C.getSizeType(), Loc: E->getExprLoc());
1001 llvm::Value *SizeOfElement =
1002 llvm::ConstantInt::get(Ty: SizeInBytes->getType(), V: EltSize);
1003 return Builder.CreateUDiv(LHS: SizeInBytes, RHS: SizeOfElement);
1004}
1005
1006/// If Base is known to point to the start of an array, return the length of
1007/// that array. Return 0 if the length cannot be determined.
1008static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
1009 const Expr *Base,
1010 QualType &IndexedType,
1011 LangOptions::StrictFlexArraysLevelKind
1012 StrictFlexArraysLevel) {
1013 // For the vector indexing extension, the bound is the number of elements.
1014 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1015 IndexedType = Base->getType();
1016 return CGF.Builder.getInt32(C: VT->getNumElements());
1017 }
1018
1019 Base = Base->IgnoreParens();
1020
1021 if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) {
1022 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1023 !CE->getSubExpr()->isFlexibleArrayMemberLike(Context: CGF.getContext(),
1024 StrictFlexArraysLevel)) {
1025 CodeGenFunction::SanitizerScope SanScope(&CGF);
1026
1027 IndexedType = CE->getSubExpr()->getType();
1028 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1029 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
1030 return CGF.Builder.getInt(AI: CAT->getSize());
1031
1032 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT))
1033 return CGF.getVLASize(vla: VAT).NumElts;
1034 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1035 }
1036 }
1037
1038 CodeGenFunction::SanitizerScope SanScope(&CGF);
1039
1040 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1041 if (llvm::Value *POS = CGF.LoadPassedObjectSize(E: Base, EltTy)) {
1042 IndexedType = Base->getType();
1043 return POS;
1044 }
1045
1046 return nullptr;
1047}
1048
1049namespace {
1050
1051/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1052/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1053///
1054/// p in p-> a.b.c
1055///
1056/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1057/// looking for:
1058///
1059/// struct s {
1060/// struct s *ptr;
1061/// int count;
1062/// char array[] __attribute__((counted_by(count)));
1063/// };
1064///
1065/// If we have an expression like \p p->ptr->array[index], we want the
1066/// \p MemberExpr for \p p->ptr instead of \p p.
1067class StructAccessBase
1068 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1069 const RecordDecl *ExpectedRD;
1070
1071 bool IsExpectedRecordDecl(const Expr *E) const {
1072 QualType Ty = E->getType();
1073 if (Ty->isPointerType())
1074 Ty = Ty->getPointeeType();
1075 return ExpectedRD == Ty->getAsRecordDecl();
1076 }
1077
1078public:
1079 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1080
1081 //===--------------------------------------------------------------------===//
1082 // Visitor Methods
1083 //===--------------------------------------------------------------------===//
1084
1085 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1086 // horrors like this:
1087 //
1088 // struct S {
1089 // int x, y;
1090 // int blah[] __attribute__((counted_by(x)));
1091 // } s;
1092 //
1093 // int foo(int index, int val) {
1094 // int (S::*IHatePMDs)[] = &S::blah;
1095 // (s.*IHatePMDs)[index] = val;
1096 // }
1097
1098 const Expr *Visit(const Expr *E) {
1099 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(S: E);
1100 }
1101
1102 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1103
1104 // These are the types we expect to return (in order of most to least
1105 // likely):
1106 //
1107 // 1. DeclRefExpr - This is the expression for the base of the structure.
1108 // It's exactly what we want to build an access to the \p counted_by
1109 // field.
1110 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1111 // as the flexble array member's lexical enclosing \p RecordDecl. This
1112 // allows us to catch things like: "p->p->array"
1113 // 3. CompoundLiteralExpr - This is for people who create something
1114 // heretical like (struct foo has a flexible array member):
1115 //
1116 // (struct foo){ 1, 2 }.blah[idx];
1117 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1118 return IsExpectedRecordDecl(E) ? E : nullptr;
1119 }
1120 const Expr *VisitMemberExpr(const MemberExpr *E) {
1121 if (IsExpectedRecordDecl(E) && E->isArrow())
1122 return E;
1123 const Expr *Res = Visit(E: E->getBase());
1124 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1125 }
1126 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1127 return IsExpectedRecordDecl(E) ? E : nullptr;
1128 }
1129 const Expr *VisitCallExpr(const CallExpr *E) {
1130 return IsExpectedRecordDecl(E) ? E : nullptr;
1131 }
1132
1133 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1134 if (IsExpectedRecordDecl(E))
1135 return E;
1136 return Visit(E: E->getBase());
1137 }
1138 const Expr *VisitCastExpr(const CastExpr *E) {
1139 if (E->getCastKind() == CK_LValueToRValue)
1140 return IsExpectedRecordDecl(E) ? E : nullptr;
1141 return Visit(E: E->getSubExpr());
1142 }
1143 const Expr *VisitParenExpr(const ParenExpr *E) {
1144 return Visit(E: E->getSubExpr());
1145 }
1146 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1147 return Visit(E: E->getSubExpr());
1148 }
1149 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1150 return Visit(E: E->getSubExpr());
1151 }
1152};
1153
1154} // end anonymous namespace
1155
1156using RecIndicesTy = SmallVector<llvm::Value *, 8>;
1157
1158static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
1159 const FieldDecl *Field,
1160 RecIndicesTy &Indices) {
1161 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1162 int64_t FieldNo = -1;
1163 for (const FieldDecl *FD : RD->fields()) {
1164 if (!Layout.containsFieldDecl(FD))
1165 // This could happen if the field has a struct type that's empty. I don't
1166 // know why either.
1167 continue;
1168
1169 FieldNo = Layout.getLLVMFieldNo(FD);
1170 if (FD == Field) {
1171 Indices.emplace_back(Args: CGF.Builder.getInt32(C: FieldNo));
1172 return true;
1173 }
1174
1175 QualType Ty = FD->getType();
1176 if (Ty->isRecordType()) {
1177 if (getGEPIndicesToField(CGF, RD: Ty->getAsRecordDecl(), Field, Indices)) {
1178 if (RD->isUnion())
1179 FieldNo = 0;
1180 Indices.emplace_back(Args: CGF.Builder.getInt32(C: FieldNo));
1181 return true;
1182 }
1183 }
1184 }
1185
1186 return false;
1187}
1188
1189llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
1190 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1191 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1192
1193 // Find the base struct expr (i.e. p in p->a.b.c.d).
1194 const Expr *StructBase = StructAccessBase(RD).Visit(E: Base);
1195 if (!StructBase || StructBase->HasSideEffects(Ctx: getContext()))
1196 return nullptr;
1197
1198 llvm::Value *Res = nullptr;
1199 if (StructBase->getType()->isPointerType()) {
1200 LValueBaseInfo BaseInfo;
1201 TBAAAccessInfo TBAAInfo;
1202 Address Addr = EmitPointerWithAlignment(Addr: StructBase, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
1203 Res = Addr.emitRawPointer(CGF&: *this);
1204 } else if (StructBase->isLValue()) {
1205 LValue LV = EmitLValue(E: StructBase);
1206 Address Addr = LV.getAddress();
1207 Res = Addr.emitRawPointer(CGF&: *this);
1208 } else {
1209 return nullptr;
1210 }
1211
1212 RecIndicesTy Indices;
1213 getGEPIndicesToField(CGF&: *this, RD, Field: CountDecl, Indices);
1214 if (Indices.empty())
1215 return nullptr;
1216
1217 Indices.push_back(Elt: Builder.getInt32(C: 0));
1218 CanQualType T = CGM.getContext().getCanonicalTagType(TD: RD);
1219 return Builder.CreateInBoundsGEP(Ty: ConvertType(T), Ptr: Res,
1220 IdxList: RecIndicesTy(llvm::reverse(C&: Indices)),
1221 Name: "counted_by.gep");
1222}
1223
1224/// This method is typically called in contexts where we can't generate
1225/// side-effects, like in __builtin_dynamic_object_size. When finding
1226/// expressions, only choose those that have either already been emitted or can
1227/// be loaded without side-effects.
1228///
1229/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1230/// within the top-level struct.
1231/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1232llvm::Value *CodeGenFunction::EmitLoadOfCountedByField(
1233 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1234 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1235 return Builder.CreateAlignedLoad(Ty: ConvertType(T: CountDecl->getType()), Addr: GEP,
1236 Align: getIntAlign(), Name: "counted_by.load");
1237 return nullptr;
1238}
1239
1240void CodeGenFunction::EmitBoundsCheck(const Expr *ArrayExpr,
1241 const Expr *ArrayExprBase,
1242 llvm::Value *IndexVal, QualType IndexType,
1243 bool Accessed) {
1244 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1245 "should not be called unless adding bounds checks");
1246 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1247 getLangOpts().getStrictFlexArraysLevel();
1248 QualType ArrayExprBaseType;
1249 llvm::Value *BoundsVal = getArrayIndexingBound(
1250 CGF&: *this, Base: ArrayExprBase, IndexedType&: ArrayExprBaseType, StrictFlexArraysLevel);
1251
1252 EmitBoundsCheckImpl(ArrayExpr, ArrayBaseType: ArrayExprBaseType, IndexVal, IndexType,
1253 BoundsVal, BoundsType: getContext().getSizeType(), Accessed);
1254}
1255
1256void CodeGenFunction::EmitBoundsCheckImpl(const Expr *ArrayExpr,
1257 QualType ArrayBaseType,
1258 llvm::Value *IndexVal,
1259 QualType IndexType,
1260 llvm::Value *BoundsVal,
1261 QualType BoundsType, bool Accessed) {
1262 if (!BoundsVal)
1263 return;
1264
1265 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1266 auto CheckHandler = SanitizerHandler::OutOfBounds;
1267 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1268
1269 // All hail the C implicit type conversion rules!!!
1270 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1271 bool BoundsSigned = BoundsType->isSignedIntegerOrEnumerationType();
1272
1273 const ASTContext &Ctx = getContext();
1274 llvm::Type *Ty = ConvertType(
1275 T: Ctx.getTypeSize(T: IndexType) >= Ctx.getTypeSize(T: BoundsType) ? IndexType
1276 : BoundsType);
1277
1278 llvm::Value *IndexInst = Builder.CreateIntCast(V: IndexVal, DestTy: Ty, isSigned: IndexSigned);
1279 llvm::Value *BoundsInst = Builder.CreateIntCast(V: BoundsVal, DestTy: Ty, isSigned: false);
1280
1281 llvm::Constant *StaticData[] = {
1282 EmitCheckSourceLocation(Loc: ArrayExpr->getExprLoc()),
1283 EmitCheckTypeDescriptor(T: ArrayBaseType),
1284 EmitCheckTypeDescriptor(T: IndexType),
1285 };
1286
1287 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(LHS: IndexInst, RHS: BoundsInst)
1288 : Builder.CreateICmpULE(LHS: IndexInst, RHS: BoundsInst);
1289
1290 if (BoundsSigned) {
1291 // Don't allow a negative bounds.
1292 llvm::Value *Cmp = Builder.CreateICmpSGT(
1293 LHS: BoundsVal, RHS: llvm::ConstantInt::get(Ty: BoundsVal->getType(), V: 0));
1294 Check = Builder.CreateAnd(LHS: Cmp, RHS: Check);
1295 }
1296
1297 EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckKind), Check: CheckHandler, StaticArgs: StaticData,
1298 DynamicArgs: IndexInst);
1299}
1300
1301llvm::MDNode *CodeGenFunction::buildAllocToken(QualType AllocType) {
1302 auto ATMD = infer_alloc::getAllocTokenMetadata(T: AllocType, Ctx: getContext());
1303 if (!ATMD)
1304 return nullptr;
1305
1306 llvm::MDBuilder MDB(getLLVMContext());
1307 auto *TypeNameMD = MDB.createString(Str: ATMD->TypeName);
1308 auto *ContainsPtrC = Builder.getInt1(V: ATMD->ContainsPointer);
1309 auto *ContainsPtrMD = MDB.createConstant(C: ContainsPtrC);
1310
1311 // Format: !{<type-name>, <contains-pointer>}
1312 return llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: {TypeNameMD, ContainsPtrMD});
1313}
1314
1315void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
1316 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1317 "Only needed with -fsanitize=alloc-token");
1318 CB->setMetadata(KindID: llvm::LLVMContext::MD_alloc_token,
1319 Node: buildAllocToken(AllocType));
1320}
1321
1322llvm::MDNode *CodeGenFunction::buildAllocToken(const CallExpr *E) {
1323 QualType AllocType = infer_alloc::inferPossibleType(E, Ctx: getContext(), CastE: CurCast);
1324 if (!AllocType.isNull())
1325 return buildAllocToken(AllocType);
1326 return nullptr;
1327}
1328
1329void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
1330 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1331 "Only needed with -fsanitize=alloc-token");
1332 if (llvm::MDNode *MDN = buildAllocToken(E))
1333 CB->setMetadata(KindID: llvm::LLVMContext::MD_alloc_token, Node: MDN);
1334}
1335
1336CodeGenFunction::ComplexPairTy CodeGenFunction::
1337EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1338 bool isInc, bool isPre) {
1339 ComplexPairTy InVal = EmitLoadOfComplex(src: LV, loc: E->getExprLoc());
1340
1341 llvm::Value *NextVal;
1342 if (isa<llvm::IntegerType>(Val: InVal.first->getType())) {
1343 uint64_t AmountVal = isInc ? 1 : -1;
1344 NextVal = llvm::ConstantInt::get(Ty: InVal.first->getType(), V: AmountVal, IsSigned: true);
1345
1346 // Add the inc/dec to the real part.
1347 NextVal = Builder.CreateAdd(LHS: InVal.first, RHS: NextVal, Name: isInc ? "inc" : "dec");
1348 } else {
1349 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1350 llvm::APFloat FVal(getContext().getFloatTypeSemantics(T: ElemTy), 1);
1351 if (!isInc)
1352 FVal.changeSign();
1353 NextVal = llvm::ConstantFP::get(Context&: getLLVMContext(), V: FVal);
1354
1355 // Add the inc/dec to the real part.
1356 NextVal = Builder.CreateFAdd(L: InVal.first, R: NextVal, Name: isInc ? "inc" : "dec");
1357 }
1358
1359 ComplexPairTy IncVal(NextVal, InVal.second);
1360
1361 // Store the updated result through the lvalue.
1362 EmitStoreOfComplex(V: IncVal, dest: LV, /*init*/ isInit: false);
1363 if (getLangOpts().OpenMP)
1364 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this,
1365 LHS: E->getSubExpr());
1366
1367 // If this is a postinc, return the value read from memory, otherwise use the
1368 // updated value.
1369 return isPre ? IncVal : InVal;
1370}
1371
1372void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1373 CodeGenFunction *CGF) {
1374 // Bind VLAs in the cast type.
1375 if (CGF && E->getType()->isVariablyModifiedType())
1376 CGF->EmitVariablyModifiedType(Ty: E->getType());
1377
1378 if (CGDebugInfo *DI = getModuleDebugInfo())
1379 DI->EmitExplicitCastType(Ty: E->getType());
1380}
1381
1382//===----------------------------------------------------------------------===//
1383// LValue Expression Emission
1384//===----------------------------------------------------------------------===//
1385
1386static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1387 CharUnits eltSize) {
1388 // If we have a constant index, we can use the exact offset of the
1389 // element we're accessing.
1390 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(Val: idx)) {
1391 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1392 return arrayAlign.alignmentAtOffset(offset);
1393 }
1394
1395 // Otherwise, use the worst-case alignment for any element.
1396 return arrayAlign.alignmentOfArrayElement(elementSize: eltSize);
1397}
1398
1399/// Emit pointer + index arithmetic.
1400static Address emitPointerArithmetic(CodeGenFunction &CGF,
1401 const BinaryOperator *BO,
1402 LValueBaseInfo *BaseInfo,
1403 TBAAAccessInfo *TBAAInfo,
1404 KnownNonNull_t IsKnownNonNull) {
1405 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1406 Expr *pointerOperand = BO->getLHS();
1407 Expr *indexOperand = BO->getRHS();
1408 bool isSubtraction = BO->getOpcode() == BO_Sub;
1409
1410 Address BaseAddr = Address::invalid();
1411 llvm::Value *index = nullptr;
1412 // In a subtraction, the LHS is always the pointer.
1413 // Note: do not change the evaluation order.
1414 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1415 std::swap(a&: pointerOperand, b&: indexOperand);
1416 index = CGF.EmitScalarExpr(E: indexOperand);
1417 BaseAddr = CGF.EmitPointerWithAlignment(Addr: pointerOperand, BaseInfo, TBAAInfo,
1418 IsKnownNonNull: NotKnownNonNull);
1419 } else {
1420 BaseAddr = CGF.EmitPointerWithAlignment(Addr: pointerOperand, BaseInfo, TBAAInfo,
1421 IsKnownNonNull: NotKnownNonNull);
1422 index = CGF.EmitScalarExpr(E: indexOperand);
1423 }
1424
1425 llvm::Value *pointer = BaseAddr.getBasePointer();
1426 llvm::Value *Res = CGF.EmitPointerArithmetic(
1427 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1428 QualType PointeeTy = BO->getType()->getPointeeType();
1429 CharUnits Align =
1430 getArrayElementAlign(arrayAlign: BaseAddr.getAlignment(), idx: index,
1431 eltSize: CGF.getContext().getTypeSizeInChars(T: PointeeTy));
1432 return Address(Res, CGF.ConvertTypeForMem(T: PointeeTy), Align,
1433 CGF.CGM.getPointerAuthInfoForPointeeType(type: PointeeTy),
1434 /*Offset=*/nullptr, IsKnownNonNull);
1435}
1436
1437static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
1438 TBAAAccessInfo *TBAAInfo,
1439 KnownNonNull_t IsKnownNonNull,
1440 CodeGenFunction &CGF) {
1441 // We allow this with ObjC object pointers because of fragile ABIs.
1442 assert(E->getType()->isPointerType() ||
1443 E->getType()->isObjCObjectPointerType());
1444 E = E->IgnoreParens();
1445
1446 // Casts:
1447 if (const CastExpr *CE = dyn_cast<CastExpr>(Val: E)) {
1448 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: CE))
1449 CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF);
1450
1451 switch (CE->getCastKind()) {
1452 // Non-converting casts (but not C's implicit conversion from void*).
1453 case CK_BitCast:
1454 case CK_NoOp:
1455 case CK_AddressSpaceConversion:
1456 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1457 if (PtrTy->getPointeeType()->isVoidType())
1458 break;
1459
1460 LValueBaseInfo InnerBaseInfo;
1461 TBAAAccessInfo InnerTBAAInfo;
1462 Address Addr = CGF.EmitPointerWithAlignment(
1463 Addr: CE->getSubExpr(), BaseInfo: &InnerBaseInfo, TBAAInfo: &InnerTBAAInfo, IsKnownNonNull);
1464 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1465 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1466
1467 if (isa<ExplicitCastExpr>(Val: CE)) {
1468 LValueBaseInfo TargetTypeBaseInfo;
1469 TBAAAccessInfo TargetTypeTBAAInfo;
1470 CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
1471 T: E->getType(), BaseInfo: &TargetTypeBaseInfo, TBAAInfo: &TargetTypeTBAAInfo);
1472 if (TBAAInfo)
1473 *TBAAInfo =
1474 CGF.CGM.mergeTBAAInfoForCast(SourceInfo: *TBAAInfo, TargetInfo: TargetTypeTBAAInfo);
1475 // If the source l-value is opaque, honor the alignment of the
1476 // casted-to type.
1477 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1478 if (BaseInfo)
1479 BaseInfo->mergeForCast(Info: TargetTypeBaseInfo);
1480 Addr.setAlignment(Align);
1481 }
1482 }
1483
1484 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast) &&
1485 CE->getCastKind() == CK_BitCast) {
1486 if (auto PT = E->getType()->getAs<PointerType>())
1487 CGF.EmitVTablePtrCheckForCast(T: PT->getPointeeType(), Derived: Addr,
1488 /*MayBeNull=*/true,
1489 TCK: CodeGenFunction::CFITCK_UnrelatedCast,
1490 Loc: CE->getBeginLoc());
1491 }
1492
1493 llvm::Type *ElemTy =
1494 CGF.ConvertTypeForMem(T: E->getType()->getPointeeType());
1495 Addr = Addr.withElementType(ElemTy);
1496 if (CE->getCastKind() == CK_AddressSpaceConversion)
1497 Addr = CGF.Builder.CreateAddrSpaceCast(
1498 Addr, Ty: CGF.ConvertType(T: E->getType()), ElementTy: ElemTy);
1499
1500 return CGF.authPointerToPointerCast(Ptr: Addr, SourceType: CE->getSubExpr()->getType(),
1501 DestType: CE->getType());
1502 }
1503 break;
1504
1505 // Array-to-pointer decay.
1506 case CK_ArrayToPointerDecay:
1507 return CGF.EmitArrayToPointerDecay(Array: CE->getSubExpr(), BaseInfo, TBAAInfo);
1508
1509 // Derived-to-base conversions.
1510 case CK_UncheckedDerivedToBase:
1511 case CK_DerivedToBase: {
1512 // TODO: Support accesses to members of base classes in TBAA. For now, we
1513 // conservatively pretend that the complete object is of the base class
1514 // type.
1515 if (TBAAInfo)
1516 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: E->getType());
1517 Address Addr = CGF.EmitPointerWithAlignment(
1518 Addr: CE->getSubExpr(), BaseInfo, TBAAInfo: nullptr,
1519 IsKnownNonNull: (KnownNonNull_t)(IsKnownNonNull ||
1520 CE->getCastKind() == CK_UncheckedDerivedToBase));
1521 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1522 return CGF.GetAddressOfBaseClass(
1523 Value: Addr, Derived, PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
1524 NullCheckValue: CGF.ShouldNullCheckClassCastValue(Cast: CE), Loc: CE->getExprLoc());
1525 }
1526
1527 // TODO: Is there any reason to treat base-to-derived conversions
1528 // specially?
1529 default:
1530 break;
1531 }
1532 }
1533
1534 // Unary &.
1535 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
1536 if (UO->getOpcode() == UO_AddrOf) {
1537 LValue LV = CGF.EmitLValue(E: UO->getSubExpr(), IsKnownNonNull);
1538 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1539 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1540 return LV.getAddress();
1541 }
1542 }
1543
1544 // std::addressof and variants.
1545 if (auto *Call = dyn_cast<CallExpr>(Val: E)) {
1546 switch (Call->getBuiltinCallee()) {
1547 default:
1548 break;
1549 case Builtin::BIaddressof:
1550 case Builtin::BI__addressof:
1551 case Builtin::BI__builtin_addressof: {
1552 LValue LV = CGF.EmitLValue(E: Call->getArg(Arg: 0), IsKnownNonNull);
1553 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1554 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1555 return LV.getAddress();
1556 }
1557 }
1558 }
1559
1560 // Pointer arithmetic: pointer +/- index.
1561 if (auto *BO = dyn_cast<BinaryOperator>(Val: E)) {
1562 if (BO->isAdditiveOp())
1563 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1564 }
1565
1566 // TODO: conditional operators, comma.
1567
1568 // Otherwise, use the alignment of the type.
1569 return CGF.makeNaturalAddressForPointer(
1570 Ptr: CGF.EmitScalarExpr(E), T: E->getType()->getPointeeType(), Alignment: CharUnits(),
1571 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1572}
1573
1574/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1575/// derive a more accurate bound on the alignment of the pointer.
1576Address CodeGenFunction::EmitPointerWithAlignment(
1577 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1578 KnownNonNull_t IsKnownNonNull) {
1579 Address Addr =
1580 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, CGF&: *this);
1581 if (IsKnownNonNull && !Addr.isKnownNonNull())
1582 Addr.setKnownNonNull();
1583 return Addr;
1584}
1585
1586llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1587 llvm::Value *V = RV.getScalarVal();
1588 if (auto MPT = T->getAs<MemberPointerType>())
1589 return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr: V, MPT);
1590 return Builder.CreateICmpNE(LHS: V, RHS: llvm::Constant::getNullValue(Ty: V->getType()));
1591}
1592
1593RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1594 if (Ty->isVoidType())
1595 return RValue::get(V: nullptr);
1596
1597 switch (getEvaluationKind(T: Ty)) {
1598 case TEK_Complex: {
1599 llvm::Type *EltTy =
1600 ConvertType(T: Ty->castAs<ComplexType>()->getElementType());
1601 llvm::Value *U = llvm::UndefValue::get(T: EltTy);
1602 return RValue::getComplex(C: std::make_pair(x&: U, y&: U));
1603 }
1604
1605 // If this is a use of an undefined aggregate type, the aggregate must have an
1606 // identifiable address. Just because the contents of the value are undefined
1607 // doesn't mean that the address can't be taken and compared.
1608 case TEK_Aggregate: {
1609 Address DestPtr = CreateMemTemp(Ty, Name: "undef.agg.tmp");
1610 return RValue::getAggregate(addr: DestPtr);
1611 }
1612
1613 case TEK_Scalar:
1614 return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: Ty)));
1615 }
1616 llvm_unreachable("bad evaluation kind");
1617}
1618
1619RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1620 const char *Name) {
1621 ErrorUnsupported(S: E, Type: Name);
1622 return GetUndefRValue(Ty: E->getType());
1623}
1624
1625LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1626 const char *Name) {
1627 ErrorUnsupported(S: E, Type: Name);
1628 llvm::Type *ElTy = ConvertType(T: E->getType());
1629 llvm::Type *Ty = DefaultPtrTy;
1630 return MakeAddrLValue(
1631 Addr: Address(llvm::UndefValue::get(T: Ty), ElTy, CharUnits::One()), T: E->getType());
1632}
1633
1634bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1635 const Expr *Base = Obj;
1636 while (!isa<CXXThisExpr>(Val: Base)) {
1637 // The result of a dynamic_cast can be null.
1638 if (isa<CXXDynamicCastExpr>(Val: Base))
1639 return false;
1640
1641 if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) {
1642 Base = CE->getSubExpr();
1643 } else if (const auto *PE = dyn_cast<ParenExpr>(Val: Base)) {
1644 Base = PE->getSubExpr();
1645 } else if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base)) {
1646 if (UO->getOpcode() == UO_Extension)
1647 Base = UO->getSubExpr();
1648 else
1649 return false;
1650 } else {
1651 return false;
1652 }
1653 }
1654 return true;
1655}
1656
1657LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1658 LValue LV;
1659 if (SanOpts.has(K: SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(Val: E))
1660 LV = EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E), /*Accessed*/true);
1661 else
1662 LV = EmitLValue(E);
1663 if (!isa<DeclRefExpr>(Val: E) && !LV.isBitField() && LV.isSimple()) {
1664 SanitizerSet SkippedChecks;
1665 if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) {
1666 bool IsBaseCXXThis = IsWrappedCXXThis(Obj: ME->getBase());
1667 if (IsBaseCXXThis)
1668 SkippedChecks.set(K: SanitizerKind::Alignment, Value: true);
1669 if (IsBaseCXXThis || isa<DeclRefExpr>(Val: ME->getBase()))
1670 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
1671 }
1672 EmitTypeCheck(TCK, Loc: E->getExprLoc(), LV, Type: E->getType(), SkippedChecks);
1673 }
1674 return LV;
1675}
1676
1677/// EmitLValue - Emit code to compute a designator that specifies the location
1678/// of the expression.
1679///
1680/// This can return one of two things: a simple address or a bitfield reference.
1681/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1682/// an LLVM pointer type.
1683///
1684/// If this returns a bitfield reference, nothing about the pointee type of the
1685/// LLVM value is known: For example, it may not be a pointer to an integer.
1686///
1687/// If this returns a normal address, and if the lvalue's C type is fixed size,
1688/// this method guarantees that the returned pointer type will point to an LLVM
1689/// type of the same size of the lvalue's type. If the lvalue has a variable
1690/// length type, this is not possible.
1691///
1692LValue CodeGenFunction::EmitLValue(const Expr *E,
1693 KnownNonNull_t IsKnownNonNull) {
1694 // Running with sufficient stack space to avoid deeply nested expressions
1695 // cause a stack overflow.
1696 LValue LV;
1697 CGM.runWithSufficientStackSpace(
1698 Loc: E->getExprLoc(), Fn: [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1699
1700 if (IsKnownNonNull && !LV.isKnownNonNull())
1701 LV.setKnownNonNull();
1702 return LV;
1703}
1704
1705static QualType getConstantExprReferredType(const FullExpr *E,
1706 const ASTContext &Ctx) {
1707 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1708 if (isa<OpaqueValueExpr>(Val: SE))
1709 return SE->getType();
1710 return cast<CallExpr>(Val: SE)->getCallReturnType(Ctx)->getPointeeType();
1711}
1712
1713LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1714 KnownNonNull_t IsKnownNonNull) {
1715 ApplyDebugLocation DL(*this, E);
1716 switch (E->getStmtClass()) {
1717 default: return EmitUnsupportedLValue(E, Name: "l-value expression");
1718
1719 case Expr::ObjCPropertyRefExprClass:
1720 llvm_unreachable("cannot emit a property reference directly");
1721
1722 case Expr::ObjCSelectorExprClass:
1723 return EmitObjCSelectorLValue(E: cast<ObjCSelectorExpr>(Val: E));
1724 case Expr::ObjCIsaExprClass:
1725 return EmitObjCIsaExpr(E: cast<ObjCIsaExpr>(Val: E));
1726 case Expr::BinaryOperatorClass:
1727 return EmitBinaryOperatorLValue(E: cast<BinaryOperator>(Val: E));
1728 case Expr::CompoundAssignOperatorClass: {
1729 QualType Ty = E->getType();
1730 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1731 Ty = AT->getValueType();
1732 if (!Ty->isAnyComplexType())
1733 return EmitCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E));
1734 return EmitComplexCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E));
1735 }
1736 case Expr::CallExprClass:
1737 case Expr::CXXMemberCallExprClass:
1738 case Expr::CXXOperatorCallExprClass:
1739 case Expr::UserDefinedLiteralClass:
1740 return EmitCallExprLValue(E: cast<CallExpr>(Val: E));
1741 case Expr::CXXRewrittenBinaryOperatorClass:
1742 return EmitLValue(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(),
1743 IsKnownNonNull);
1744 case Expr::VAArgExprClass:
1745 return EmitVAArgExprLValue(E: cast<VAArgExpr>(Val: E));
1746 case Expr::DeclRefExprClass:
1747 return EmitDeclRefLValue(E: cast<DeclRefExpr>(Val: E));
1748 case Expr::ConstantExprClass: {
1749 const ConstantExpr *CE = cast<ConstantExpr>(Val: E);
1750 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1751 QualType RetType = getConstantExprReferredType(E: CE, Ctx: getContext());
1752 return MakeNaturalAlignAddrLValue(V: Result, T: RetType);
1753 }
1754 return EmitLValue(E: cast<ConstantExpr>(Val: E)->getSubExpr(), IsKnownNonNull);
1755 }
1756 case Expr::ParenExprClass:
1757 return EmitLValue(E: cast<ParenExpr>(Val: E)->getSubExpr(), IsKnownNonNull);
1758 case Expr::GenericSelectionExprClass:
1759 return EmitLValue(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(),
1760 IsKnownNonNull);
1761 case Expr::PredefinedExprClass:
1762 return EmitPredefinedLValue(E: cast<PredefinedExpr>(Val: E));
1763 case Expr::StringLiteralClass:
1764 return EmitStringLiteralLValue(E: cast<StringLiteral>(Val: E));
1765 case Expr::ObjCEncodeExprClass:
1766 return EmitObjCEncodeExprLValue(E: cast<ObjCEncodeExpr>(Val: E));
1767 case Expr::PseudoObjectExprClass:
1768 return EmitPseudoObjectLValue(e: cast<PseudoObjectExpr>(Val: E));
1769 case Expr::InitListExprClass:
1770 return EmitInitListLValue(E: cast<InitListExpr>(Val: E));
1771 case Expr::CXXTemporaryObjectExprClass:
1772 case Expr::CXXConstructExprClass:
1773 return EmitCXXConstructLValue(E: cast<CXXConstructExpr>(Val: E));
1774 case Expr::CXXBindTemporaryExprClass:
1775 return EmitCXXBindTemporaryLValue(E: cast<CXXBindTemporaryExpr>(Val: E));
1776 case Expr::CXXUuidofExprClass:
1777 return EmitCXXUuidofLValue(E: cast<CXXUuidofExpr>(Val: E));
1778 case Expr::LambdaExprClass:
1779 return EmitAggExprToLValue(E);
1780
1781 case Expr::ExprWithCleanupsClass: {
1782 const auto *cleanups = cast<ExprWithCleanups>(Val: E);
1783 RunCleanupsScope Scope(*this);
1784 LValue LV = EmitLValue(E: cleanups->getSubExpr(), IsKnownNonNull);
1785 if (LV.isSimple()) {
1786 // Defend against branches out of gnu statement expressions surrounded by
1787 // cleanups.
1788 Address Addr = LV.getAddress();
1789 llvm::Value *V = Addr.getBasePointer();
1790 Scope.ForceCleanup(ValuesToReload: {&V});
1791 Addr.replaceBasePointer(P: V);
1792 return LValue::MakeAddr(Addr, type: LV.getType(), Context&: getContext(),
1793 BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo());
1794 }
1795 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1796 // bitfield lvalue or some other non-simple lvalue?
1797 return LV;
1798 }
1799
1800 case Expr::CXXDefaultArgExprClass: {
1801 auto *DAE = cast<CXXDefaultArgExpr>(Val: E);
1802 CXXDefaultArgExprScope Scope(*this, DAE);
1803 return EmitLValue(E: DAE->getExpr(), IsKnownNonNull);
1804 }
1805 case Expr::CXXDefaultInitExprClass: {
1806 auto *DIE = cast<CXXDefaultInitExpr>(Val: E);
1807 CXXDefaultInitExprScope Scope(*this, DIE);
1808 return EmitLValue(E: DIE->getExpr(), IsKnownNonNull);
1809 }
1810 case Expr::CXXTypeidExprClass:
1811 return EmitCXXTypeidLValue(E: cast<CXXTypeidExpr>(Val: E));
1812
1813 case Expr::ObjCMessageExprClass:
1814 return EmitObjCMessageExprLValue(E: cast<ObjCMessageExpr>(Val: E));
1815 case Expr::ObjCIvarRefExprClass:
1816 return EmitObjCIvarRefLValue(E: cast<ObjCIvarRefExpr>(Val: E));
1817 case Expr::StmtExprClass:
1818 return EmitStmtExprLValue(E: cast<StmtExpr>(Val: E));
1819 case Expr::UnaryOperatorClass:
1820 return EmitUnaryOpLValue(E: cast<UnaryOperator>(Val: E));
1821 case Expr::ArraySubscriptExprClass:
1822 return EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E));
1823 case Expr::MatrixSingleSubscriptExprClass:
1824 return EmitMatrixSingleSubscriptExpr(E: cast<MatrixSingleSubscriptExpr>(Val: E));
1825 case Expr::MatrixSubscriptExprClass:
1826 return EmitMatrixSubscriptExpr(E: cast<MatrixSubscriptExpr>(Val: E));
1827 case Expr::ArraySectionExprClass:
1828 return EmitArraySectionExpr(E: cast<ArraySectionExpr>(Val: E));
1829 case Expr::ExtVectorElementExprClass:
1830 return EmitExtVectorElementExpr(E: cast<ExtVectorElementExpr>(Val: E));
1831 case Expr::CXXThisExprClass:
1832 return MakeAddrLValue(Addr: LoadCXXThisAddress(), T: E->getType());
1833 case Expr::MemberExprClass:
1834 return EmitMemberExpr(E: cast<MemberExpr>(Val: E));
1835 case Expr::CompoundLiteralExprClass:
1836 return EmitCompoundLiteralLValue(E: cast<CompoundLiteralExpr>(Val: E));
1837 case Expr::ConditionalOperatorClass:
1838 return EmitConditionalOperatorLValue(E: cast<ConditionalOperator>(Val: E));
1839 case Expr::BinaryConditionalOperatorClass:
1840 return EmitConditionalOperatorLValue(E: cast<BinaryConditionalOperator>(Val: E));
1841 case Expr::ChooseExprClass:
1842 return EmitLValue(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), IsKnownNonNull);
1843 case Expr::OpaqueValueExprClass:
1844 return EmitOpaqueValueLValue(e: cast<OpaqueValueExpr>(Val: E));
1845 case Expr::SubstNonTypeTemplateParmExprClass:
1846 return EmitLValue(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(),
1847 IsKnownNonNull);
1848 case Expr::ImplicitCastExprClass:
1849 case Expr::CStyleCastExprClass:
1850 case Expr::CXXFunctionalCastExprClass:
1851 case Expr::CXXStaticCastExprClass:
1852 case Expr::CXXDynamicCastExprClass:
1853 case Expr::CXXReinterpretCastExprClass:
1854 case Expr::CXXConstCastExprClass:
1855 case Expr::CXXAddrspaceCastExprClass:
1856 case Expr::ObjCBridgedCastExprClass:
1857 return EmitCastLValue(E: cast<CastExpr>(Val: E));
1858
1859 case Expr::MaterializeTemporaryExprClass:
1860 return EmitMaterializeTemporaryExpr(M: cast<MaterializeTemporaryExpr>(Val: E));
1861
1862 case Expr::CoawaitExprClass:
1863 return EmitCoawaitLValue(E: cast<CoawaitExpr>(Val: E));
1864 case Expr::CoyieldExprClass:
1865 return EmitCoyieldLValue(E: cast<CoyieldExpr>(Val: E));
1866 case Expr::PackIndexingExprClass:
1867 return EmitLValue(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr());
1868 case Expr::HLSLOutArgExprClass:
1869 llvm_unreachable("cannot emit a HLSL out argument directly");
1870 }
1871}
1872
1873/// Given an object of the given canonical type, can we safely copy a
1874/// value out of it based on its initializer?
1875static bool isConstantEmittableObjectType(QualType type) {
1876 assert(type.isCanonical());
1877 assert(!type->isReferenceType());
1878
1879 // Must be const-qualified but non-volatile.
1880 Qualifiers qs = type.getLocalQualifiers();
1881 if (!qs.hasConst() || qs.hasVolatile()) return false;
1882
1883 // Otherwise, all object types satisfy this except C++ classes with
1884 // mutable subobjects or non-trivial copy/destroy behavior.
1885 if (const auto *RT = dyn_cast<RecordType>(Val&: type))
1886 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) {
1887 RD = RD->getDefinitionOrSelf();
1888 if (RD->hasMutableFields() || !RD->isTrivial())
1889 return false;
1890 }
1891
1892 return true;
1893}
1894
1895/// Can we constant-emit a load of a reference to a variable of the
1896/// given type? This is different from predicates like
1897/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1898/// in situations that don't necessarily satisfy the language's rules
1899/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1900/// to do this with const float variables even if those variables
1901/// aren't marked 'constexpr'.
1902enum ConstantEmissionKind {
1903 CEK_None,
1904 CEK_AsReferenceOnly,
1905 CEK_AsValueOrReference,
1906 CEK_AsValueOnly
1907};
1908static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1909 type = type.getCanonicalType();
1910 if (const auto *ref = dyn_cast<ReferenceType>(Val&: type)) {
1911 if (isConstantEmittableObjectType(type: ref->getPointeeType()))
1912 return CEK_AsValueOrReference;
1913 return CEK_AsReferenceOnly;
1914 }
1915 if (isConstantEmittableObjectType(type))
1916 return CEK_AsValueOnly;
1917 return CEK_None;
1918}
1919
1920/// Try to emit a reference to the given value without producing it as
1921/// an l-value. This is just an optimization, but it avoids us needing
1922/// to emit global copies of variables if they're named without triggering
1923/// a formal use in a context where we can't emit a direct reference to them,
1924/// for instance if a block or lambda or a member of a local class uses a
1925/// const int variable or constexpr variable from an enclosing function.
1926CodeGenFunction::ConstantEmission
1927CodeGenFunction::tryEmitAsConstant(const DeclRefExpr *RefExpr) {
1928 const ValueDecl *Value = RefExpr->getDecl();
1929
1930 // The value needs to be an enum constant or a constant variable.
1931 ConstantEmissionKind CEK;
1932 if (isa<ParmVarDecl>(Val: Value)) {
1933 CEK = CEK_None;
1934 } else if (const auto *var = dyn_cast<VarDecl>(Val: Value)) {
1935 CEK = checkVarTypeForConstantEmission(type: var->getType());
1936 } else if (isa<EnumConstantDecl>(Val: Value)) {
1937 CEK = CEK_AsValueOnly;
1938 } else {
1939 CEK = CEK_None;
1940 }
1941 if (CEK == CEK_None) return ConstantEmission();
1942
1943 Expr::EvalResult result;
1944 bool resultIsReference;
1945 QualType resultType;
1946
1947 // It's best to evaluate all the way as an r-value if that's permitted.
1948 if (CEK != CEK_AsReferenceOnly &&
1949 RefExpr->EvaluateAsRValue(Result&: result, Ctx: getContext())) {
1950 resultIsReference = false;
1951 resultType = RefExpr->getType().getUnqualifiedType();
1952
1953 // Otherwise, try to evaluate as an l-value.
1954 } else if (CEK != CEK_AsValueOnly &&
1955 RefExpr->EvaluateAsLValue(Result&: result, Ctx: getContext())) {
1956 resultIsReference = true;
1957 resultType = Value->getType();
1958
1959 // Failure.
1960 } else {
1961 return ConstantEmission();
1962 }
1963
1964 // In any case, if the initializer has side-effects, abandon ship.
1965 if (result.HasSideEffects)
1966 return ConstantEmission();
1967
1968 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1969 // referencing a global host variable by copy. In this case the lambda should
1970 // make a copy of the value of the global host variable. The DRE of the
1971 // captured reference variable cannot be emitted as load from the host
1972 // global variable as compile time constant, since the host variable is not
1973 // accessible on device. The DRE of the captured reference variable has to be
1974 // loaded from captures.
1975 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1976 RefExpr->refersToEnclosingVariableOrCapture()) {
1977 auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: CurCodeDecl);
1978 if (isLambdaMethod(DC: MD) && MD->getOverloadedOperator() == OO_Call) {
1979 const APValue::LValueBase &base = result.Val.getLValueBase();
1980 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1981 if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D)) {
1982 if (!VD->hasAttr<CUDADeviceAttr>()) {
1983 return ConstantEmission();
1984 }
1985 }
1986 }
1987 }
1988 }
1989
1990 // Emit as a constant.
1991 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
1992 loc: RefExpr->getLocation(), value: result.Val, T: resultType);
1993
1994 // Make sure we emit a debug reference to the global variable.
1995 // This should probably fire even for
1996 if (isa<VarDecl>(Val: Value)) {
1997 if (!getContext().DeclMustBeEmitted(D: cast<VarDecl>(Val: Value)))
1998 EmitDeclRefExprDbgValue(E: RefExpr, Init: result.Val);
1999 } else {
2000 assert(isa<EnumConstantDecl>(Value));
2001 EmitDeclRefExprDbgValue(E: RefExpr, Init: result.Val);
2002 }
2003
2004 // If we emitted a reference constant, we need to dereference that.
2005 if (resultIsReference)
2006 return ConstantEmission::forReference(C);
2007
2008 return ConstantEmission::forValue(C);
2009}
2010
2011static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
2012 const MemberExpr *ME) {
2013 if (auto *VD = dyn_cast<VarDecl>(Val: ME->getMemberDecl())) {
2014 // Try to emit static variable member expressions as DREs.
2015 return DeclRefExpr::Create(
2016 Context: CGF.getContext(), QualifierLoc: NestedNameSpecifierLoc(), TemplateKWLoc: SourceLocation(), D: VD,
2017 /*RefersToEnclosingVariableOrCapture=*/false, NameLoc: ME->getExprLoc(),
2018 T: ME->getType(), VK: ME->getValueKind(), FoundD: nullptr, TemplateArgs: nullptr, NOUR: ME->isNonOdrUse());
2019 }
2020 return nullptr;
2021}
2022
2023CodeGenFunction::ConstantEmission
2024CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
2025 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME))
2026 return tryEmitAsConstant(RefExpr: DRE);
2027 return ConstantEmission();
2028}
2029
2030llvm::Value *CodeGenFunction::emitScalarConstant(
2031 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
2032 assert(Constant && "not a constant");
2033 if (Constant.isReference())
2034 return EmitLoadOfLValue(V: Constant.getReferenceLValue(CGF&: *this, RefExpr: E),
2035 Loc: E->getExprLoc())
2036 .getScalarVal();
2037 return Constant.getValue();
2038}
2039
2040llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
2041 SourceLocation Loc) {
2042 return EmitLoadOfScalar(Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(),
2043 Ty: lvalue.getType(), Loc, BaseInfo: lvalue.getBaseInfo(),
2044 TBAAInfo: lvalue.getTBAAInfo(), isNontemporal: lvalue.isNontemporal());
2045}
2046
2047static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
2048 llvm::APInt &Min, llvm::APInt &End,
2049 bool StrictEnums, bool IsBool) {
2050 const auto *ED = Ty->getAsEnumDecl();
2051 bool IsRegularCPlusPlusEnum =
2052 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
2053 if (!IsBool && !IsRegularCPlusPlusEnum)
2054 return false;
2055
2056 if (IsBool) {
2057 Min = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 0);
2058 End = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 2);
2059 } else {
2060 ED->getValueRange(Max&: End, Min);
2061 }
2062 return true;
2063}
2064
2065llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2066 llvm::APInt Min, End;
2067 if (!getRangeForType(CGF&: *this, Ty, Min, End, StrictEnums: CGM.getCodeGenOpts().StrictEnums,
2068 IsBool: Ty->hasBooleanRepresentation() && !Ty->isVectorType()))
2069 return nullptr;
2070
2071 llvm::MDBuilder MDHelper(getLLVMContext());
2072 return MDHelper.createRange(Lo: Min, Hi: End);
2073}
2074
2075void CodeGenFunction::maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty,
2076 SourceLocation Loc) {
2077 if (EmitScalarRangeCheck(Value: Load, Ty, Loc)) {
2078 // In order to prevent the optimizer from throwing away the check, don't
2079 // attach range metadata to the load.
2080 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2081 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2082 Load->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RangeInfo);
2083 Load->setMetadata(KindID: llvm::LLVMContext::MD_noundef,
2084 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: {}));
2085 }
2086 }
2087}
2088
2089bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
2090 SourceLocation Loc) {
2091 bool HasBoolCheck = SanOpts.has(K: SanitizerKind::Bool);
2092 bool HasEnumCheck = SanOpts.has(K: SanitizerKind::Enum);
2093 if (!HasBoolCheck && !HasEnumCheck)
2094 return false;
2095
2096 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2097 NSAPI(CGM.getContext()).isObjCBOOLType(T: Ty);
2098 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2099 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2100 if (!NeedsBoolCheck && !NeedsEnumCheck)
2101 return false;
2102
2103 // Single-bit booleans don't need to be checked. Special-case this to avoid
2104 // a bit width mismatch when handling bitfield values. This is handled by
2105 // EmitFromMemory for the non-bitfield case.
2106 if (IsBool &&
2107 cast<llvm::IntegerType>(Val: Value->getType())->getBitWidth() == 1)
2108 return false;
2109
2110 if (NeedsEnumCheck &&
2111 getContext().isTypeIgnoredBySanitizer(Mask: SanitizerKind::Enum, Ty))
2112 return false;
2113
2114 llvm::APInt Min, End;
2115 if (!getRangeForType(CGF&: *this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
2116 return true;
2117
2118 SanitizerKind::SanitizerOrdinal Kind =
2119 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2120
2121 auto &Ctx = getLLVMContext();
2122 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2123 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2124 llvm::Value *Check;
2125 --End;
2126 if (!Min) {
2127 Check = Builder.CreateICmpULE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End));
2128 } else {
2129 llvm::Value *Upper =
2130 Builder.CreateICmpSLE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End));
2131 llvm::Value *Lower =
2132 Builder.CreateICmpSGE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: Min));
2133 Check = Builder.CreateAnd(LHS: Upper, RHS: Lower);
2134 }
2135 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2136 EmitCheckTypeDescriptor(T: Ty)};
2137 EmitCheck(Checked: std::make_pair(x&: Check, y&: Kind), Check: CheckHandler, StaticArgs, DynamicArgs: Value);
2138 return true;
2139}
2140
2141llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
2142 QualType Ty,
2143 SourceLocation Loc,
2144 LValueBaseInfo BaseInfo,
2145 TBAAAccessInfo TBAAInfo,
2146 bool isNontemporal) {
2147 if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer()))
2148 if (GV->isThreadLocal())
2149 Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV),
2150 IsKnownNonNull: NotKnownNonNull);
2151
2152 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2153 // Boolean vectors use `iN` as storage type.
2154 if (ClangVecTy->isPackedVectorBoolType(ctx: getContext())) {
2155 llvm::Type *ValTy = ConvertType(T: Ty);
2156 unsigned ValNumElems =
2157 cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements();
2158 // Load the `iP` storage object (P is the padded vector size).
2159 auto *RawIntV = Builder.CreateLoad(Addr, IsVolatile: Volatile, Name: "load_bits");
2160 const auto *RawIntTy = RawIntV->getType();
2161 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2162 // Bitcast iP --> <P x i1>.
2163 auto *PaddedVecTy = llvm::FixedVectorType::get(
2164 ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits());
2165 llvm::Value *V = Builder.CreateBitCast(V: RawIntV, DestTy: PaddedVecTy);
2166 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2167 V = emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec");
2168
2169 return EmitFromMemory(Value: V, Ty);
2170 }
2171
2172 // Handles vectors of sizes that are likely to be expanded to a larger size
2173 // to optimize performance.
2174 auto *VTy = cast<llvm::FixedVectorType>(Val: Addr.getElementType());
2175 auto *NewVecTy =
2176 CGM.getABIInfo().getOptimalVectorMemoryType(T: VTy, Opt: getLangOpts());
2177
2178 if (VTy != NewVecTy) {
2179 Address Cast = Addr.withElementType(ElemTy: NewVecTy);
2180 llvm::Value *V = Builder.CreateLoad(Addr: Cast, IsVolatile: Volatile, Name: "loadVecN");
2181 unsigned OldNumElements = VTy->getNumElements();
2182 SmallVector<int, 16> Mask(OldNumElements);
2183 std::iota(first: Mask.begin(), last: Mask.end(), value: 0);
2184 V = Builder.CreateShuffleVector(V, Mask, Name: "extractVec");
2185 return EmitFromMemory(Value: V, Ty);
2186 }
2187 }
2188
2189 // Atomic operations have to be done on integral types.
2190 LValue AtomicLValue =
2191 LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo);
2192 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(Src: AtomicLValue)) {
2193 return EmitAtomicLoad(LV: AtomicLValue, SL: Loc).getScalarVal();
2194 }
2195
2196 Addr =
2197 Addr.withElementType(ElemTy: convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Addr.getElementType()));
2198
2199 llvm::LoadInst *Load = Builder.CreateLoad(Addr, IsVolatile: Volatile);
2200 if (isNontemporal) {
2201 llvm::MDNode *Node = llvm::MDNode::get(
2202 Context&: Load->getContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1)));
2203 Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node);
2204 }
2205
2206 CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo);
2207
2208 maybeAttachRangeForLoad(Load, Ty, Loc);
2209
2210 return EmitFromMemory(Value: Load, Ty);
2211}
2212
2213/// Converts a scalar value from its primary IR type (as returned
2214/// by ConvertType) to its load/store type (as returned by
2215/// convertTypeForLoadStore).
2216llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2217 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2218 Ty = AtomicTy->getValueType();
2219
2220 if (Ty->isExtVectorBoolType() || Ty->isConstantMatrixBoolType()) {
2221 llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType());
2222
2223 if (Value->getType() == StoreTy)
2224 return Value;
2225
2226 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2227 Value->getType()->getScalarSizeInBits())
2228 return Builder.CreateZExt(V: Value, DestTy: StoreTy);
2229
2230 // Expand to the memory bit width.
2231 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2232 // <N x i1> --> <P x i1>.
2233 Value = emitBoolVecConversion(SrcVec: Value, NumElementsDst: MemNumElems, Name: "insertvec");
2234 // <P x i1> --> iP.
2235 Value = Builder.CreateBitCast(V: Value, DestTy: StoreTy);
2236 }
2237
2238 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2239 llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType());
2240 bool Signed = Ty->isSignedIntegerOrEnumerationType();
2241 return Builder.CreateIntCast(V: Value, DestTy: StoreTy, isSigned: Signed, Name: "storedv");
2242 }
2243
2244 return Value;
2245}
2246
2247/// Converts a scalar value from its load/store type (as returned
2248/// by convertTypeForLoadStore) to its primary IR type (as returned
2249/// by ConvertType).
2250llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2251 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2252 Ty = AtomicTy->getValueType();
2253
2254 if (Ty->isPackedVectorBoolType(ctx: getContext())) {
2255 const auto *RawIntTy = Value->getType();
2256
2257 // Bitcast iP --> <P x i1>.
2258 auto *PaddedVecTy = llvm::FixedVectorType::get(
2259 ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits());
2260 auto *V = Builder.CreateBitCast(V: Value, DestTy: PaddedVecTy);
2261 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2262 llvm::Type *ValTy = ConvertType(T: Ty);
2263 unsigned ValNumElems = cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements();
2264 return emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec");
2265 }
2266
2267 llvm::Type *ResTy = ConvertType(T: Ty);
2268 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() ||
2269 Ty->isExtVectorBoolType())
2270 return Builder.CreateTrunc(V: Value, DestTy: ResTy, Name: "loadedv");
2271
2272 return Value;
2273}
2274
2275// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2276// MatrixType), if it points to a array (the memory type of MatrixType).
2277static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
2278 CodeGenFunction &CGF,
2279 bool IsVector = true) {
2280 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: Addr.getElementType());
2281 if (ArrayTy && IsVector) {
2282 auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(),
2283 NumElts: ArrayTy->getNumElements());
2284
2285 return Addr.withElementType(ElemTy: VectorTy);
2286 }
2287 auto *VectorTy = dyn_cast<llvm::VectorType>(Val: Addr.getElementType());
2288 if (VectorTy && !IsVector) {
2289 auto *ArrayTy = llvm::ArrayType::get(
2290 ElementType: VectorTy->getElementType(),
2291 NumElements: cast<llvm::FixedVectorType>(Val: VectorTy)->getNumElements());
2292
2293 return Addr.withElementType(ElemTy: ArrayTy);
2294 }
2295
2296 return Addr;
2297}
2298
2299// Emit a store of a matrix LValue. This may require casting the original
2300// pointer to memory address (ArrayType) to a pointer to the value type
2301// (VectorType).
2302static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2303 bool isInit, CodeGenFunction &CGF) {
2304 Address Addr = MaybeConvertMatrixAddress(Addr: lvalue.getAddress(), CGF,
2305 IsVector: value->getType()->isVectorTy());
2306 CGF.EmitStoreOfScalar(Value: value, Addr, Volatile: lvalue.isVolatile(), Ty: lvalue.getType(),
2307 BaseInfo: lvalue.getBaseInfo(), TBAAInfo: lvalue.getTBAAInfo(), isInit,
2308 isNontemporal: lvalue.isNontemporal());
2309}
2310
2311void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2312 bool Volatile, QualType Ty,
2313 LValueBaseInfo BaseInfo,
2314 TBAAAccessInfo TBAAInfo,
2315 bool isInit, bool isNontemporal) {
2316 if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer()))
2317 if (GV->isThreadLocal())
2318 Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV),
2319 IsKnownNonNull: NotKnownNonNull);
2320
2321 // Handles vectors of sizes that are likely to be expanded to a larger size
2322 // to optimize performance.
2323 llvm::Type *SrcTy = Value->getType();
2324 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2325 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2326 auto *NewVecTy =
2327 CGM.getABIInfo().getOptimalVectorMemoryType(T: VecTy, Opt: getLangOpts());
2328 if (!ClangVecTy->isPackedVectorBoolType(ctx: getContext()) &&
2329 VecTy != NewVecTy) {
2330 SmallVector<int, 16> Mask(NewVecTy->getNumElements(),
2331 VecTy->getNumElements());
2332 std::iota(first: Mask.begin(), last: Mask.begin() + VecTy->getNumElements(), value: 0);
2333 // Use undef instead of poison for the padding lanes, to make sure no
2334 // padding bits are poisoned, which may break coercion.
2335 Value = Builder.CreateShuffleVector(V1: Value, V2: llvm::UndefValue::get(T: VecTy),
2336 Mask, Name: "extractVec");
2337 SrcTy = NewVecTy;
2338 }
2339 if (Addr.getElementType() != SrcTy)
2340 Addr = Addr.withElementType(ElemTy: SrcTy);
2341 }
2342 }
2343
2344 Value = EmitToMemory(Value, Ty);
2345
2346 LValue AtomicLValue =
2347 LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo);
2348 if (Ty->isAtomicType() ||
2349 (!isInit && LValueIsSuitableForInlineAtomic(Src: AtomicLValue))) {
2350 EmitAtomicStore(rvalue: RValue::get(V: Value), lvalue: AtomicLValue, isInit);
2351 return;
2352 }
2353
2354 llvm::StoreInst *Store = Builder.CreateStore(Val: Value, Addr, IsVolatile: Volatile);
2355 addInstToCurrentSourceAtom(KeyInstruction: Store, Backup: Value);
2356
2357 if (isNontemporal) {
2358 llvm::MDNode *Node =
2359 llvm::MDNode::get(Context&: Store->getContext(),
2360 MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1)));
2361 Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node);
2362 }
2363
2364 CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo);
2365}
2366
2367void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2368 bool isInit) {
2369 if (lvalue.getType()->isConstantMatrixType()) {
2370 EmitStoreOfMatrixScalar(value, lvalue, isInit, CGF&: *this);
2371 return;
2372 }
2373
2374 EmitStoreOfScalar(Value: value, Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(),
2375 Ty: lvalue.getType(), BaseInfo: lvalue.getBaseInfo(),
2376 TBAAInfo: lvalue.getTBAAInfo(), isInit, isNontemporal: lvalue.isNontemporal());
2377}
2378
2379// Emit a load of a LValue of matrix type. This may require casting the pointer
2380// to memory address (ArrayType) to a pointer to the value type (VectorType).
2381static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
2382 CodeGenFunction &CGF) {
2383 assert(LV.getType()->isConstantMatrixType());
2384 Address Addr = MaybeConvertMatrixAddress(Addr: LV.getAddress(), CGF);
2385 LV.setAddress(Addr);
2386 return RValue::get(V: CGF.EmitLoadOfScalar(lvalue: LV, Loc));
2387}
2388
2389RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,
2390 SourceLocation Loc) {
2391 QualType Ty = LV.getType();
2392 switch (getEvaluationKind(T: Ty)) {
2393 case TEK_Scalar:
2394 return EmitLoadOfLValue(V: LV, Loc);
2395 case TEK_Complex:
2396 return RValue::getComplex(C: EmitLoadOfComplex(src: LV, loc: Loc));
2397 case TEK_Aggregate:
2398 EmitAggFinalDestCopy(Type: Ty, Dest: Slot, Src: LV, SrcKind: EVK_NonRValue);
2399 return Slot.asRValue();
2400 }
2401 llvm_unreachable("bad evaluation kind");
2402}
2403
2404/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2405/// method emits the address of the lvalue, then loads the result as an rvalue,
2406/// returning the rvalue.
2407RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2408 // Load from __ptrauth.
2409 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2410 LV.getQuals().removePointerAuth();
2411 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2412 return RValue::get(V: EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: Value, PointerType: LV.getType(),
2413 StorageAddress: LV.getAddress(),
2414 /*known nonnull*/ IsKnownNonNull: false));
2415 }
2416
2417 if (LV.isObjCWeak()) {
2418 // load of a __weak object.
2419 Address AddrWeakObj = LV.getAddress();
2420 return RValue::get(V: CGM.getObjCRuntime().EmitObjCWeakRead(CGF&: *this,
2421 AddrWeakObj));
2422 }
2423 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
2424 // In MRC mode, we do a load+autorelease.
2425 if (!getLangOpts().ObjCAutoRefCount) {
2426 return RValue::get(V: EmitARCLoadWeak(addr: LV.getAddress()));
2427 }
2428
2429 // In ARC mode, we load retained and then consume the value.
2430 llvm::Value *Object = EmitARCLoadWeakRetained(addr: LV.getAddress());
2431 Object = EmitObjCConsumeObject(T: LV.getType(), Ptr: Object);
2432 return RValue::get(V: Object);
2433 }
2434
2435 if (LV.isSimple()) {
2436 assert(!LV.getType()->isFunctionType());
2437
2438 if (LV.getType()->isConstantMatrixType())
2439 return EmitLoadOfMatrixLValue(LV, Loc, CGF&: *this);
2440
2441 // Everything needs a load.
2442 return RValue::get(V: EmitLoadOfScalar(lvalue: LV, Loc));
2443 }
2444
2445 if (LV.isVectorElt()) {
2446 llvm::LoadInst *Load = Builder.CreateLoad(Addr: LV.getVectorAddress(),
2447 IsVolatile: LV.isVolatileQualified());
2448 return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx: LV.getVectorIdx(),
2449 Name: "vecext"));
2450 }
2451
2452 // If this is a reference to a subset of the elements of a vector, either
2453 // shuffle the input or extract/insert them as appropriate.
2454 if (LV.isExtVectorElt()) {
2455 return EmitLoadOfExtVectorElementLValue(V: LV);
2456 }
2457
2458 // Global Register variables always invoke intrinsics
2459 if (LV.isGlobalReg())
2460 return EmitLoadOfGlobalRegLValue(LV);
2461
2462 if (LV.isMatrixElt()) {
2463 llvm::Value *Idx = LV.getMatrixIdx();
2464 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2465 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2466 llvm::MatrixBuilder MB(Builder);
2467 MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened());
2468 }
2469 llvm::LoadInst *Load =
2470 Builder.CreateLoad(Addr: LV.getMatrixAddress(), IsVolatile: LV.isVolatileQualified());
2471 return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx, Name: "matrixext"));
2472 }
2473 if (LV.isMatrixRow()) {
2474 QualType MatTy = LV.getType();
2475 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2476
2477 unsigned NumRows = MT->getNumRows();
2478 unsigned NumCols = MT->getNumColumns();
2479 unsigned NumLanes = NumCols;
2480 llvm::Value *MatrixVec = EmitLoadOfScalar(lvalue: LV, Loc);
2481 llvm::Value *Row = LV.getMatrixRowIdx();
2482 llvm::Type *ElemTy = ConvertType(T: MT->getElementType());
2483 llvm::Constant *ColConstsIndices = nullptr;
2484 llvm::MatrixBuilder MB(Builder);
2485
2486 if (LV.isMatrixRowSwizzle()) {
2487 ColConstsIndices = LV.getMatrixRowElts();
2488 NumLanes = llvm::cast<llvm::FixedVectorType>(Val: ColConstsIndices->getType())
2489 ->getNumElements();
2490 }
2491
2492 llvm::Type *RowTy = llvm::FixedVectorType::get(ElementType: ElemTy, NumElts: NumLanes);
2493 llvm::Value *Result = llvm::PoisonValue::get(T: RowTy); // <NumLanes x T>
2494
2495 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2496 llvm::Value *ColIdx;
2497 if (ColConstsIndices)
2498 ColIdx = ColConstsIndices->getAggregateElement(Elt: Col);
2499 else
2500 ColIdx = llvm::ConstantInt::get(Ty: Row->getType(), V: Col);
2501 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2502 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2503 llvm::Value *EltIndex =
2504 MB.CreateIndex(RowIdx: Row, ColumnIdx: ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2505 llvm::Value *Elt = Builder.CreateExtractElement(Vec: MatrixVec, Idx: EltIndex);
2506 llvm::Value *Lane = llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Col);
2507 Result = Builder.CreateInsertElement(Vec: Result, NewElt: Elt, Idx: Lane);
2508 }
2509
2510 return RValue::get(V: Result);
2511 }
2512
2513 assert(LV.isBitField() && "Unknown LValue type!");
2514 return EmitLoadOfBitfieldLValue(LV, Loc);
2515}
2516
2517RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
2518 SourceLocation Loc) {
2519 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2520
2521 // Get the output type.
2522 llvm::Type *ResLTy = ConvertType(T: LV.getType());
2523
2524 Address Ptr = LV.getBitFieldAddress();
2525 llvm::Value *Val =
2526 Builder.CreateLoad(Addr: Ptr, IsVolatile: LV.isVolatileQualified(), Name: "bf.load");
2527
2528 bool UseVolatile = LV.isVolatileQualified() &&
2529 Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget());
2530 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2531 const unsigned StorageSize =
2532 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2533 if (Info.IsSigned) {
2534 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2535 unsigned HighBits = StorageSize - Offset - Info.Size;
2536 if (HighBits)
2537 Val = Builder.CreateShl(LHS: Val, RHS: HighBits, Name: "bf.shl");
2538 if (Offset + HighBits)
2539 Val = Builder.CreateAShr(LHS: Val, RHS: Offset + HighBits, Name: "bf.ashr");
2540 } else {
2541 if (Offset)
2542 Val = Builder.CreateLShr(LHS: Val, RHS: Offset, Name: "bf.lshr");
2543 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2544 Val = Builder.CreateAnd(
2545 LHS: Val, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), Name: "bf.clear");
2546 }
2547 Val = Builder.CreateIntCast(V: Val, DestTy: ResLTy, isSigned: Info.IsSigned, Name: "bf.cast");
2548 EmitScalarRangeCheck(Value: Val, Ty: LV.getType(), Loc);
2549 return RValue::get(V: Val);
2550}
2551
2552// If this is a reference to a subset of the elements of a vector, create an
2553// appropriate shufflevector.
2554RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
2555 llvm::Value *Vec = Builder.CreateLoad(Addr: LV.getExtVectorAddress(),
2556 IsVolatile: LV.isVolatileQualified());
2557
2558 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2559 // IR value to a vector here allows the rest of codegen to behave as normal.
2560 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2561 llvm::Type *DstTy = llvm::FixedVectorType::get(ElementType: Vec->getType(), NumElts: 1);
2562 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty);
2563 Vec = Builder.CreateInsertElement(VecTy: DstTy, NewElt: Vec, Idx: Zero, Name: "cast.splat");
2564 }
2565
2566 const llvm::Constant *Elts = LV.getExtVectorElts();
2567
2568 // If the result of the expression is a non-vector type, we must be extracting
2569 // a single element. Just codegen as an extractelement.
2570 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2571 if (!ExprVT) {
2572 unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts);
2573 llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx);
2574
2575 llvm::Value *Element = Builder.CreateExtractElement(Vec, Idx: Elt);
2576
2577 llvm::Type *LVTy = ConvertType(T: LV.getType());
2578 if (Element->getType()->getPrimitiveSizeInBits() >
2579 LVTy->getPrimitiveSizeInBits())
2580 Element = Builder.CreateTrunc(V: Element, DestTy: LVTy);
2581
2582 return RValue::get(V: Element);
2583 }
2584
2585 // Always use shuffle vector to try to retain the original program structure
2586 unsigned NumResultElts = ExprVT->getNumElements();
2587
2588 SmallVector<int, 4> Mask;
2589 for (unsigned i = 0; i != NumResultElts; ++i)
2590 Mask.push_back(Elt: getAccessedFieldNo(Idx: i, Elts));
2591
2592 Vec = Builder.CreateShuffleVector(V: Vec, Mask);
2593
2594 if (LV.getType()->isExtVectorBoolType())
2595 Vec = Builder.CreateTrunc(V: Vec, DestTy: ConvertType(T: LV.getType()), Name: "truncv");
2596
2597 return RValue::get(V: Vec);
2598}
2599
2600/// Generates lvalue for partial ext_vector access.
2601Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
2602 Address VectorAddress = LV.getExtVectorAddress();
2603 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2604 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(T: EQT);
2605
2606 Address CastToPointerElement = VectorAddress.withElementType(ElemTy: VectorElementTy);
2607
2608 const llvm::Constant *Elts = LV.getExtVectorElts();
2609 unsigned ix = getAccessedFieldNo(Idx: 0, Elts);
2610
2611 Address VectorBasePtrPlusIx =
2612 Builder.CreateConstInBoundsGEP(Addr: CastToPointerElement, Index: ix,
2613 Name: "vector.elt");
2614
2615 return VectorBasePtrPlusIx;
2616}
2617
2618/// Load of global named registers are always calls to intrinsics.
2619RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2620 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2621 "Bad type for register variable");
2622 llvm::MDNode *RegName = cast<llvm::MDNode>(
2623 Val: cast<llvm::MetadataAsValue>(Val: LV.getGlobalReg())->getMetadata());
2624
2625 // We accept integer and pointer types only
2626 llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: LV.getType());
2627 llvm::Type *Ty = OrigTy;
2628 if (OrigTy->isPointerTy())
2629 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2630 llvm::Type *Types[] = { Ty };
2631
2632 llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: Types);
2633 llvm::Value *Call = Builder.CreateCall(
2634 Callee: F, Args: llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName));
2635 if (OrigTy->isPointerTy())
2636 Call = Builder.CreateIntToPtr(V: Call, DestTy: OrigTy);
2637 return RValue::get(V: Call);
2638}
2639
2640/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2641/// lvalue, where both are guaranteed to the have the same type, and that type
2642/// is 'Ty'.
2643void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2644 bool isInit) {
2645 if (!Dst.isSimple()) {
2646 if (Dst.isVectorElt()) {
2647 if (getLangOpts().HLSL) {
2648 // HLSL allows direct access to vector elements, so storing to
2649 // individual elements of a vector through VectorElt is handled as
2650 // separate store instructions.
2651 Address DstAddr = Dst.getVectorAddress();
2652 llvm::Type *DestAddrTy = DstAddr.getElementType();
2653 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2654 CharUnits ElemAlign = CharUnits::fromQuantity(
2655 Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty: ElemTy));
2656
2657 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2658 "vector element type must be at least byte-sized");
2659
2660 llvm::Value *Val = Src.getScalarVal();
2661 if (Val->getType()->getPrimitiveSizeInBits() <
2662 ElemTy->getScalarSizeInBits())
2663 Val = Builder.CreateZExt(V: Val, DestTy: ElemTy->getScalarType());
2664
2665 llvm::Value *Idx = Dst.getVectorIdx();
2666 llvm::Value *Zero = llvm::ConstantInt::get(Ty: Int32Ty, V: 0);
2667 Address DstElemAddr =
2668 Builder.CreateGEP(Addr: DstAddr, IdxList: {Zero, Idx}, ElementType: DestAddrTy, Align: ElemAlign);
2669 Builder.CreateStore(Val, Addr: DstElemAddr, IsVolatile: Dst.isVolatileQualified());
2670 return;
2671 }
2672
2673 // Read/modify/write the vector, inserting the new element.
2674 llvm::Value *Vec = Builder.CreateLoad(Addr: Dst.getVectorAddress(),
2675 IsVolatile: Dst.isVolatileQualified());
2676 llvm::Type *VecTy = Vec->getType();
2677 llvm::Value *SrcVal = Src.getScalarVal();
2678
2679 if (SrcVal->getType()->getPrimitiveSizeInBits() <
2680 VecTy->getScalarSizeInBits())
2681 SrcVal = Builder.CreateZExt(V: SrcVal, DestTy: VecTy->getScalarType());
2682
2683 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Val: Vec->getType());
2684 if (IRStoreTy) {
2685 auto *IRVecTy = llvm::FixedVectorType::get(
2686 ElementType: Builder.getInt1Ty(), NumElts: IRStoreTy->getPrimitiveSizeInBits());
2687 Vec = Builder.CreateBitCast(V: Vec, DestTy: IRVecTy);
2688 // iN --> <N x i1>.
2689 }
2690
2691 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2692 // types which are mapped to vector LLVM IR types (e.g. for implementing
2693 // an ABI).
2694 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(Val: SrcVal->getType());
2695 EltTy && EltTy->getNumElements() == 1)
2696 SrcVal = Builder.CreateBitCast(V: SrcVal, DestTy: EltTy->getElementType());
2697
2698 Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Dst.getVectorIdx(),
2699 Name: "vecins");
2700 if (IRStoreTy) {
2701 // <N x i1> --> <iN>.
2702 Vec = Builder.CreateBitCast(V: Vec, DestTy: IRStoreTy);
2703 }
2704
2705 auto *I = Builder.CreateStore(Val: Vec, Addr: Dst.getVectorAddress(),
2706 IsVolatile: Dst.isVolatileQualified());
2707 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: Vec);
2708 return;
2709 }
2710
2711 // If this is an update of extended vector elements, insert them as
2712 // appropriate.
2713 if (Dst.isExtVectorElt())
2714 return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2715
2716 if (Dst.isGlobalReg())
2717 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2718
2719 if (Dst.isMatrixElt()) {
2720 if (getLangOpts().HLSL) {
2721 // HLSL allows direct access to matrix elements, so storing to
2722 // individual elements of a matrix through MatrixElt is handled as
2723 // separate store instructions.
2724 Address DstAddr = Dst.getMatrixAddress();
2725 llvm::Type *DestAddrTy = DstAddr.getElementType();
2726 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2727 CharUnits ElemAlign = CharUnits::fromQuantity(
2728 Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty: ElemTy));
2729
2730 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2731 "matrix element type must be at least byte-sized");
2732
2733 llvm::Value *Val = Src.getScalarVal();
2734 if (Val->getType()->getPrimitiveSizeInBits() <
2735 ElemTy->getScalarSizeInBits())
2736 Val = Builder.CreateZExt(V: Val, DestTy: ElemTy->getScalarType());
2737
2738 llvm::Value *Idx = Dst.getMatrixIdx();
2739 llvm::Value *Zero = llvm::ConstantInt::get(Ty: Int32Ty, V: 0);
2740 Address DstElemAddr =
2741 Builder.CreateGEP(Addr: DstAddr, IdxList: {Zero, Idx}, ElementType: DestAddrTy, Align: ElemAlign);
2742 Builder.CreateStore(Val, Addr: DstElemAddr, IsVolatile: Dst.isVolatileQualified());
2743 return;
2744 }
2745
2746 llvm::Value *Idx = Dst.getMatrixIdx();
2747 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2748 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2749 llvm::MatrixBuilder MB(Builder);
2750 MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened());
2751 }
2752 llvm::Instruction *Load = Builder.CreateLoad(Addr: Dst.getMatrixAddress());
2753 llvm::Value *InsertVal = Src.getScalarVal();
2754 llvm::Value *Vec =
2755 Builder.CreateInsertElement(Vec: Load, NewElt: InsertVal, Idx, Name: "matins");
2756 auto *I = Builder.CreateStore(Val: Vec, Addr: Dst.getMatrixAddress(),
2757 IsVolatile: Dst.isVolatileQualified());
2758 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: Vec);
2759 return;
2760 }
2761 if (Dst.isMatrixRow()) {
2762 // NOTE: Since there are no other languages that implement matrix single
2763 // subscripting, the logic here is specific to HLSL which allows
2764 // per-element stores to rows of matrices.
2765 assert(getLangOpts().HLSL &&
2766 "Store through matrix row LValues is only implemented for HLSL!");
2767 QualType MatTy = Dst.getType();
2768 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
2769
2770 unsigned NumRows = MT->getNumRows();
2771 unsigned NumCols = MT->getNumColumns();
2772 unsigned NumLanes = NumCols;
2773
2774 Address DstAddr = Dst.getMatrixAddress();
2775 llvm::Type *DestAddrTy = DstAddr.getElementType();
2776 llvm::Type *ElemTy = DestAddrTy->getScalarType();
2777 CharUnits ElemAlign =
2778 CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty: ElemTy));
2779
2780 assert(ElemTy->getScalarSizeInBits() >= 8 &&
2781 "matrix element type must be at least byte-sized");
2782
2783 llvm::Value *RowVal = Src.getScalarVal();
2784 if (RowVal->getType()->getScalarType()->getPrimitiveSizeInBits() <
2785 ElemTy->getScalarSizeInBits()) {
2786 auto *RowValVecTy = cast<llvm::FixedVectorType>(Val: RowVal->getType());
2787 llvm::Type *StorageElmTy = llvm::FixedVectorType::get(
2788 ElementType: ElemTy->getScalarType(), NumElts: RowValVecTy->getNumElements());
2789 RowVal = Builder.CreateZExt(V: RowVal, DestTy: StorageElmTy);
2790 }
2791
2792 llvm::MatrixBuilder MB(Builder);
2793
2794 llvm::Constant *ColConstsIndices = nullptr;
2795 if (Dst.isMatrixRowSwizzle()) {
2796 ColConstsIndices = Dst.getMatrixRowElts();
2797 NumLanes =
2798 llvm::cast<llvm::FixedVectorType>(Val: ColConstsIndices->getType())
2799 ->getNumElements();
2800 }
2801
2802 llvm::Value *Row = Dst.getMatrixRowIdx();
2803 for (unsigned Col = 0; Col < NumLanes; ++Col) {
2804 llvm::Value *ColIdx;
2805 if (ColConstsIndices)
2806 ColIdx = ColConstsIndices->getAggregateElement(Elt: Col);
2807 else
2808 ColIdx = llvm::ConstantInt::get(Ty: Row->getType(), V: Col);
2809 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
2810 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2811 llvm::Value *EltIndex =
2812 MB.CreateIndex(RowIdx: Row, ColumnIdx: ColIdx, NumRows, NumCols, IsMatrixRowMajor);
2813 llvm::Value *Lane = llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Col);
2814 llvm::Value *Zero = llvm::ConstantInt::get(Ty: Int32Ty, V: 0);
2815 llvm::Value *NewElt = Builder.CreateExtractElement(Vec: RowVal, Idx: Lane);
2816 Address DstElemAddr =
2817 Builder.CreateGEP(Addr: DstAddr, IdxList: {Zero, EltIndex}, ElementType: DestAddrTy, Align: ElemAlign);
2818 Builder.CreateStore(Val: NewElt, Addr: DstElemAddr, IsVolatile: Dst.isVolatileQualified());
2819 }
2820
2821 return;
2822 }
2823
2824 assert(Dst.isBitField() && "Unknown LValue type");
2825 return EmitStoreThroughBitfieldLValue(Src, Dst);
2826 }
2827
2828 // Handle __ptrauth qualification by re-signing the value.
2829 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2830 Src = RValue::get(V: EmitPointerAuthQualify(Qualifier: PointerAuth, Pointer: Src.getScalarVal(),
2831 ValueType: Dst.getType(), StorageAddress: Dst.getAddress(),
2832 /*known nonnull*/ IsKnownNonNull: false));
2833 }
2834
2835 // There's special magic for assigning into an ARC-qualified l-value.
2836 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2837 switch (Lifetime) {
2838 case Qualifiers::OCL_None:
2839 llvm_unreachable("present but none");
2840
2841 case Qualifiers::OCL_ExplicitNone:
2842 // nothing special
2843 break;
2844
2845 case Qualifiers::OCL_Strong:
2846 if (isInit) {
2847 Src = RValue::get(V: EmitARCRetain(type: Dst.getType(), value: Src.getScalarVal()));
2848 break;
2849 }
2850 EmitARCStoreStrong(lvalue: Dst, value: Src.getScalarVal(), /*ignore*/ resultIgnored: true);
2851 return;
2852
2853 case Qualifiers::OCL_Weak:
2854 if (isInit)
2855 // Initialize and then skip the primitive store.
2856 EmitARCInitWeak(addr: Dst.getAddress(), value: Src.getScalarVal());
2857 else
2858 EmitARCStoreWeak(addr: Dst.getAddress(), value: Src.getScalarVal(),
2859 /*ignore*/ ignored: true);
2860 return;
2861
2862 case Qualifiers::OCL_Autoreleasing:
2863 Src = RValue::get(V: EmitObjCExtendObjectLifetime(T: Dst.getType(),
2864 Ptr: Src.getScalarVal()));
2865 // fall into the normal path
2866 break;
2867 }
2868 }
2869
2870 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2871 // load of a __weak object.
2872 Address LvalueDst = Dst.getAddress();
2873 llvm::Value *src = Src.getScalarVal();
2874 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF&: *this, src, dest: LvalueDst);
2875 return;
2876 }
2877
2878 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2879 // load of a __strong object.
2880 Address LvalueDst = Dst.getAddress();
2881 llvm::Value *src = Src.getScalarVal();
2882 if (Dst.isObjCIvar()) {
2883 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2884 llvm::Type *ResultType = IntPtrTy;
2885 Address dst = EmitPointerWithAlignment(E: Dst.getBaseIvarExp());
2886 llvm::Value *RHS = dst.emitRawPointer(CGF&: *this);
2887 RHS = Builder.CreatePtrToInt(V: RHS, DestTy: ResultType, Name: "sub.ptr.rhs.cast");
2888 llvm::Value *LHS = Builder.CreatePtrToInt(V: LvalueDst.emitRawPointer(CGF&: *this),
2889 DestTy: ResultType, Name: "sub.ptr.lhs.cast");
2890 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, Name: "ivar.offset");
2891 CGM.getObjCRuntime().EmitObjCIvarAssign(CGF&: *this, src, dest: dst, ivarOffset: BytesBetween);
2892 } else if (Dst.isGlobalObjCRef()) {
2893 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF&: *this, src, dest: LvalueDst,
2894 threadlocal: Dst.isThreadLocalRef());
2895 }
2896 else
2897 CGM.getObjCRuntime().EmitObjCStrongCastAssign(CGF&: *this, src, dest: LvalueDst);
2898 return;
2899 }
2900
2901 assert(Src.isScalar() && "Can't emit an agg store with this method");
2902 EmitStoreOfScalar(value: Src.getScalarVal(), lvalue: Dst, isInit);
2903}
2904
2905void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2906 llvm::Value **Result) {
2907 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2908 llvm::Type *ResLTy = convertTypeForLoadStore(ASTTy: Dst.getType());
2909 Address Ptr = Dst.getBitFieldAddress();
2910
2911 // Get the source value, truncated to the width of the bit-field.
2912 llvm::Value *SrcVal = Src.getScalarVal();
2913
2914 // Cast the source to the storage type and shift it into place.
2915 SrcVal = Builder.CreateIntCast(V: SrcVal, DestTy: Ptr.getElementType(),
2916 /*isSigned=*/false);
2917 llvm::Value *MaskedVal = SrcVal;
2918
2919 const bool UseVolatile =
2920 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2921 Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget());
2922 const unsigned StorageSize =
2923 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2924 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2925 // See if there are other bits in the bitfield's storage we'll need to load
2926 // and mask together with source before storing.
2927 if (StorageSize != Info.Size) {
2928 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2929 llvm::Value *Val =
2930 Builder.CreateLoad(Addr: Ptr, IsVolatile: Dst.isVolatileQualified(), Name: "bf.load");
2931
2932 // Mask the source value as needed.
2933 if (!Dst.getType()->hasBooleanRepresentation())
2934 SrcVal = Builder.CreateAnd(
2935 LHS: SrcVal, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size),
2936 Name: "bf.value");
2937 MaskedVal = SrcVal;
2938 if (Offset)
2939 SrcVal = Builder.CreateShl(LHS: SrcVal, RHS: Offset, Name: "bf.shl");
2940
2941 // Mask out the original value.
2942 Val = Builder.CreateAnd(
2943 LHS: Val, RHS: ~llvm::APInt::getBitsSet(numBits: StorageSize, loBit: Offset, hiBit: Offset + Info.Size),
2944 Name: "bf.clear");
2945
2946 // Or together the unchanged values and the source value.
2947 SrcVal = Builder.CreateOr(LHS: Val, RHS: SrcVal, Name: "bf.set");
2948 } else {
2949 assert(Offset == 0);
2950 // According to the AACPS:
2951 // When a volatile bit-field is written, and its container does not overlap
2952 // with any non-bit-field member, its container must be read exactly once
2953 // and written exactly once using the access width appropriate to the type
2954 // of the container. The two accesses are not atomic.
2955 if (Dst.isVolatileQualified() && isAAPCS(TargetInfo: CGM.getTarget()) &&
2956 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2957 Builder.CreateLoad(Addr: Ptr, IsVolatile: true, Name: "bf.load");
2958 }
2959
2960 // Write the new value back out.
2961 auto *I = Builder.CreateStore(Val: SrcVal, Addr: Ptr, IsVolatile: Dst.isVolatileQualified());
2962 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: SrcVal);
2963
2964 // Return the new value of the bit-field, if requested.
2965 if (Result) {
2966 llvm::Value *ResultVal = MaskedVal;
2967
2968 // Sign extend the value if needed.
2969 if (Info.IsSigned) {
2970 assert(Info.Size <= StorageSize);
2971 unsigned HighBits = StorageSize - Info.Size;
2972 if (HighBits) {
2973 ResultVal = Builder.CreateShl(LHS: ResultVal, RHS: HighBits, Name: "bf.result.shl");
2974 ResultVal = Builder.CreateAShr(LHS: ResultVal, RHS: HighBits, Name: "bf.result.ashr");
2975 }
2976 }
2977
2978 ResultVal = Builder.CreateIntCast(V: ResultVal, DestTy: ResLTy, isSigned: Info.IsSigned,
2979 Name: "bf.result.cast");
2980 *Result = EmitFromMemory(Value: ResultVal, Ty: Dst.getType());
2981 }
2982}
2983
2984void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2985 LValue Dst) {
2986 llvm::Value *SrcVal = Src.getScalarVal();
2987 Address DstAddr = Dst.getExtVectorAddress();
2988 const llvm::Constant *Elts = Dst.getExtVectorElts();
2989 if (DstAddr.getElementType()->getScalarSizeInBits() >
2990 SrcVal->getType()->getScalarSizeInBits())
2991 SrcVal = Builder.CreateZExt(
2992 V: SrcVal, DestTy: convertTypeForLoadStore(ASTTy: Dst.getType(), LLVMTy: SrcVal->getType()));
2993
2994 if (getLangOpts().HLSL) {
2995 llvm::Type *DestAddrTy = DstAddr.getElementType();
2996 // HLSL allows storing to scalar values through ExtVector component LValues.
2997 // To support this we need to handle the case where the destination address
2998 // is a scalar.
2999 if (!DestAddrTy->isVectorTy()) {
3000 assert(!Dst.getType()->isVectorType() &&
3001 "this should only occur for non-vector l-values");
3002 Builder.CreateStore(Val: SrcVal, Addr: DstAddr, IsVolatile: Dst.isVolatileQualified());
3003 return;
3004 }
3005
3006 // HLSL allows direct access to vector elements, so storing to individual
3007 // elements of a vector through ExtVector is handled as separate store
3008 // instructions.
3009 // If we are updating multiple elements, Dst and Src are vectors; for
3010 // a single element update they are scalars.
3011 const VectorType *VTy = Dst.getType()->getAs<VectorType>();
3012 unsigned NumSrcElts = VTy ? VTy->getNumElements() : 1;
3013 CharUnits ElemAlign = CharUnits::fromQuantity(
3014 Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty: DestAddrTy->getScalarType()));
3015 llvm::Value *Zero = llvm::ConstantInt::get(Ty: Int32Ty, V: 0);
3016
3017 for (unsigned I = 0; I != NumSrcElts; ++I) {
3018 llvm::Value *Val = VTy ? Builder.CreateExtractElement(
3019 Vec: SrcVal, Idx: llvm::ConstantInt::get(Ty: Int32Ty, V: I))
3020 : SrcVal;
3021 unsigned FieldNo = getAccessedFieldNo(Idx: I, Elts);
3022 Address DstElemAddr = Address::invalid();
3023 if (FieldNo == 0)
3024 DstElemAddr = DstAddr.withAlignment(NewAlignment: ElemAlign);
3025 else
3026 DstElemAddr = Builder.CreateGEP(
3027 Addr: DstAddr, IdxList: {Zero, llvm::ConstantInt::get(Ty: Int32Ty, V: FieldNo)},
3028 ElementType: DestAddrTy, Align: ElemAlign);
3029 Builder.CreateStore(Val, Addr: DstElemAddr, IsVolatile: Dst.isVolatileQualified());
3030 }
3031 return;
3032 }
3033
3034 // This access turns into a read/modify/write of the vector. Load the input
3035 // value now.
3036 llvm::Value *Vec = Builder.CreateLoad(Addr: DstAddr, IsVolatile: Dst.isVolatileQualified());
3037 llvm::Type *VecTy = Vec->getType();
3038
3039 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
3040 unsigned NumSrcElts = VTy->getNumElements();
3041 unsigned NumDstElts = cast<llvm::FixedVectorType>(Val: VecTy)->getNumElements();
3042 if (NumDstElts == NumSrcElts) {
3043 // Use shuffle vector is the src and destination are the same number of
3044 // elements and restore the vector mask since it is on the side it will be
3045 // stored.
3046 SmallVector<int, 4> Mask(NumDstElts);
3047 for (unsigned i = 0; i != NumSrcElts; ++i)
3048 Mask[getAccessedFieldNo(Idx: i, Elts)] = i;
3049
3050 Vec = Builder.CreateShuffleVector(V: SrcVal, Mask);
3051 } else if (NumDstElts > NumSrcElts) {
3052 // Extended the source vector to the same length and then shuffle it
3053 // into the destination.
3054 // FIXME: since we're shuffling with undef, can we just use the indices
3055 // into that? This could be simpler.
3056 SmallVector<int, 4> ExtMask;
3057 for (unsigned i = 0; i != NumSrcElts; ++i)
3058 ExtMask.push_back(Elt: i);
3059 ExtMask.resize(N: NumDstElts, NV: -1);
3060 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(V: SrcVal, Mask: ExtMask);
3061 // build identity
3062 SmallVector<int, 4> Mask;
3063 for (unsigned i = 0; i != NumDstElts; ++i)
3064 Mask.push_back(Elt: i);
3065
3066 // When the vector size is odd and .odd or .hi is used, the last element
3067 // of the Elts constant array will be one past the size of the vector.
3068 // Ignore the last element here, if it is greater than the mask size.
3069 if (getAccessedFieldNo(Idx: NumSrcElts - 1, Elts) == Mask.size())
3070 NumSrcElts--;
3071
3072 // modify when what gets shuffled in
3073 for (unsigned i = 0; i != NumSrcElts; ++i)
3074 Mask[getAccessedFieldNo(Idx: i, Elts)] = i + NumDstElts;
3075 Vec = Builder.CreateShuffleVector(V1: Vec, V2: ExtSrcVal, Mask);
3076 } else {
3077 // We should never shorten the vector
3078 llvm_unreachable("unexpected shorten vector length");
3079 }
3080 } else {
3081 // If the Src is a scalar (not a vector), and the target is a vector it must
3082 // be updating one element.
3083 unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts);
3084 llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx);
3085
3086 Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Elt);
3087 }
3088
3089 Builder.CreateStore(Val: Vec, Addr: Dst.getExtVectorAddress(),
3090 IsVolatile: Dst.isVolatileQualified());
3091}
3092
3093/// Store of global named registers are always calls to intrinsics.
3094void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
3095 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
3096 "Bad type for register variable");
3097 llvm::MDNode *RegName = cast<llvm::MDNode>(
3098 Val: cast<llvm::MetadataAsValue>(Val: Dst.getGlobalReg())->getMetadata());
3099 assert(RegName && "Register LValue is not metadata");
3100
3101 // We accept integer and pointer types only
3102 llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: Dst.getType());
3103 llvm::Type *Ty = OrigTy;
3104 if (OrigTy->isPointerTy())
3105 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
3106 llvm::Type *Types[] = { Ty };
3107
3108 llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types);
3109 llvm::Value *Value = Src.getScalarVal();
3110 if (OrigTy->isPointerTy())
3111 Value = Builder.CreatePtrToInt(V: Value, DestTy: Ty);
3112 Builder.CreateCall(
3113 Callee: F, Args: {llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName), Value});
3114}
3115
3116// setObjCGCLValueClass - sets class of the lvalue for the purpose of
3117// generating write-barries API. It is currently a global, ivar,
3118// or neither.
3119static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
3120 LValue &LV,
3121 bool IsMemberAccess=false) {
3122 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
3123 return;
3124
3125 if (isa<ObjCIvarRefExpr>(Val: E)) {
3126 QualType ExpTy = E->getType();
3127 if (IsMemberAccess && ExpTy->isPointerType()) {
3128 // If ivar is a structure pointer, assigning to field of
3129 // this struct follows gcc's behavior and makes it a non-ivar
3130 // writer-barrier conservatively.
3131 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3132 if (ExpTy->isRecordType()) {
3133 LV.setObjCIvar(false);
3134 return;
3135 }
3136 }
3137 LV.setObjCIvar(true);
3138 auto *Exp = cast<ObjCIvarRefExpr>(Val: const_cast<Expr *>(E));
3139 LV.setBaseIvarExp(Exp->getBase());
3140 LV.setObjCArray(E->getType()->isArrayType());
3141 return;
3142 }
3143
3144 if (const auto *Exp = dyn_cast<DeclRefExpr>(Val: E)) {
3145 if (const auto *VD = dyn_cast<VarDecl>(Val: Exp->getDecl())) {
3146 if (VD->hasGlobalStorage()) {
3147 LV.setGlobalObjCRef(true);
3148 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
3149 }
3150 }
3151 LV.setObjCArray(E->getType()->isArrayType());
3152 return;
3153 }
3154
3155 if (const auto *Exp = dyn_cast<UnaryOperator>(Val: E)) {
3156 setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess);
3157 return;
3158 }
3159
3160 if (const auto *Exp = dyn_cast<ParenExpr>(Val: E)) {
3161 setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess);
3162 if (LV.isObjCIvar()) {
3163 // If cast is to a structure pointer, follow gcc's behavior and make it
3164 // a non-ivar write-barrier.
3165 QualType ExpTy = E->getType();
3166 if (ExpTy->isPointerType())
3167 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
3168 if (ExpTy->isRecordType())
3169 LV.setObjCIvar(false);
3170 }
3171 return;
3172 }
3173
3174 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(Val: E)) {
3175 setObjCGCLValueClass(Ctx, E: Exp->getResultExpr(), LV);
3176 return;
3177 }
3178
3179 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(Val: E)) {
3180 setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess);
3181 return;
3182 }
3183
3184 if (const auto *Exp = dyn_cast<CStyleCastExpr>(Val: E)) {
3185 setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess);
3186 return;
3187 }
3188
3189 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(Val: E)) {
3190 setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess);
3191 return;
3192 }
3193
3194 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(Val: E)) {
3195 setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV);
3196 if (LV.isObjCIvar() && !LV.isObjCArray())
3197 // Using array syntax to assigning to what an ivar points to is not
3198 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
3199 LV.setObjCIvar(false);
3200 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
3201 // Using array syntax to assigning to what global points to is not
3202 // same as assigning to the global itself. {id *G;} G[i] = 0;
3203 LV.setGlobalObjCRef(false);
3204 return;
3205 }
3206
3207 if (const auto *Exp = dyn_cast<MemberExpr>(Val: E)) {
3208 setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV, IsMemberAccess: true);
3209 // We don't know if member is an 'ivar', but this flag is looked at
3210 // only in the context of LV.isObjCIvar().
3211 LV.setObjCArray(E->getType()->isArrayType());
3212 return;
3213 }
3214}
3215
3216static LValue EmitThreadPrivateVarDeclLValue(
3217 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
3218 llvm::Type *RealVarTy, SourceLocation Loc) {
3219 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
3220 Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
3221 CGF, VD, VDAddr: Addr, Loc);
3222 else
3223 Addr =
3224 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, VDAddr: Addr, Loc);
3225
3226 Addr = Addr.withElementType(ElemTy: RealVarTy);
3227 return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl);
3228}
3229
3230static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
3231 const VarDecl *VD, QualType T) {
3232 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3233 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3234 // Return an invalid address if variable is MT_To (or MT_Enter starting with
3235 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
3236 // and MT_To (or MT_Enter) with unified memory, return a valid address.
3237 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3238 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3239 !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
3240 return Address::invalid();
3241 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3242 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3243 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3244 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
3245 "Expected link clause OR to clause with unified memory enabled.");
3246 QualType PtrTy = CGF.getContext().getPointerType(T: VD->getType());
3247 Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
3248 return CGF.EmitLoadOfPointer(Ptr: Addr, PtrTy: PtrTy->castAs<PointerType>());
3249}
3250
3251Address
3252CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
3253 LValueBaseInfo *PointeeBaseInfo,
3254 TBAAAccessInfo *PointeeTBAAInfo) {
3255 llvm::LoadInst *Load =
3256 Builder.CreateLoad(Addr: RefLVal.getAddress(), IsVolatile: RefLVal.isVolatile());
3257 CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo: RefLVal.getTBAAInfo());
3258 QualType PTy = RefLVal.getType()->getPointeeType();
3259 CharUnits Align = CGM.getNaturalTypeAlignment(
3260 T: PTy, BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo, /*ForPointeeType=*/forPointeeType: true);
3261 if (!PTy->isIncompleteType()) {
3262 llvm::LLVMContext &Ctx = getLLVMContext();
3263 llvm::MDBuilder MDB(Ctx);
3264 // Emit !nonnull metadata
3265 if (CGM.getTypes().getTargetAddressSpace(T: PTy) == 0 &&
3266 !CGM.getCodeGenOpts().NullPointerIsValid)
3267 Load->setMetadata(KindID: llvm::LLVMContext::MD_nonnull,
3268 Node: llvm::MDNode::get(Context&: Ctx, MDs: {}));
3269 // Emit !align metadata
3270 if (PTy->isObjectType()) {
3271 auto AlignVal = Align.getQuantity();
3272 if (AlignVal > 1) {
3273 Load->setMetadata(
3274 KindID: llvm::LLVMContext::MD_align,
3275 Node: llvm::MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: llvm::ConstantInt::get(
3276 Ty: Builder.getInt64Ty(), V: AlignVal))));
3277 }
3278 }
3279 }
3280 return makeNaturalAddressForPointer(Ptr: Load, T: PTy, Alignment: Align,
3281 /*ForPointeeType=*/true, BaseInfo: PointeeBaseInfo,
3282 TBAAInfo: PointeeTBAAInfo);
3283}
3284
3285LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
3286 LValueBaseInfo PointeeBaseInfo;
3287 TBAAAccessInfo PointeeTBAAInfo;
3288 Address PointeeAddr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &PointeeBaseInfo,
3289 PointeeTBAAInfo: &PointeeTBAAInfo);
3290 return MakeAddrLValue(Addr: PointeeAddr, T: RefLVal.getType()->getPointeeType(),
3291 BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo);
3292}
3293
3294Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
3295 const PointerType *PtrTy,
3296 LValueBaseInfo *BaseInfo,
3297 TBAAAccessInfo *TBAAInfo) {
3298 llvm::Value *Addr = Builder.CreateLoad(Addr: Ptr);
3299 return makeNaturalAddressForPointer(Ptr: Addr, T: PtrTy->getPointeeType(),
3300 Alignment: CharUnits(), /*ForPointeeType=*/true,
3301 BaseInfo, TBAAInfo);
3302}
3303
3304LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
3305 const PointerType *PtrTy) {
3306 LValueBaseInfo BaseInfo;
3307 TBAAAccessInfo TBAAInfo;
3308 Address Addr = EmitLoadOfPointer(Ptr: PtrAddr, PtrTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
3309 return MakeAddrLValue(Addr, T: PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3310}
3311
3312static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
3313 const Expr *E, const VarDecl *VD) {
3314 QualType T = E->getType();
3315
3316 // If it's thread_local, emit a call to its wrapper function instead.
3317 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3318 CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
3319 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, LValType: T);
3320 // Check if the variable is marked as declare target with link clause in
3321 // device codegen.
3322 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3323 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
3324 if (Addr.isValid())
3325 return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl);
3326 }
3327
3328 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(D: VD);
3329
3330 if (VD->getTLSKind() != VarDecl::TLS_None)
3331 V = CGF.Builder.CreateThreadLocalAddress(Ptr: V);
3332
3333 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(T: VD->getType());
3334 CharUnits Alignment = CGF.getContext().getDeclAlign(D: VD);
3335 Address Addr(V, RealVarTy, Alignment);
3336 // Emit reference to the private copy of the variable if it is an OpenMP
3337 // threadprivate variable.
3338 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3339 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3340 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3341 Loc: E->getExprLoc());
3342 }
3343 LValue LV = VD->getType()->isReferenceType() ?
3344 CGF.EmitLoadOfReferenceLValue(RefAddr: Addr, RefTy: VD->getType(),
3345 Source: AlignmentSource::Decl) :
3346 CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl);
3347 setObjCGCLValueClass(Ctx: CGF.getContext(), E, LV);
3348 return LV;
3349}
3350
3351llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,
3352 llvm::Type *Ty) {
3353 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
3354 if (FD->hasAttr<WeakRefAttr>()) {
3355 ConstantAddress aliasee = GetWeakRefReference(VD: FD);
3356 return aliasee.getPointer();
3357 }
3358
3359 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3360 return V;
3361}
3362
3363static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3364 GlobalDecl GD) {
3365 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
3366 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3367 QualType ETy = E->getType();
3368 if (ETy->isCFIUncheckedCalleeFunctionType()) {
3369 if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: V))
3370 V = llvm::NoCFIValue::get(GV);
3371 }
3372 CharUnits Alignment = CGF.getContext().getDeclAlign(D: FD);
3373 return CGF.MakeAddrLValue(V, T: ETy, Alignment, Source: AlignmentSource::Decl);
3374}
3375
3376static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
3377 llvm::Value *ThisValue) {
3378
3379 return CGF.EmitLValueForLambdaField(Field: FD, ThisValue);
3380}
3381
3382/// Named Registers are named metadata pointing to the register name
3383/// which will be read from/written to as an argument to the intrinsic
3384/// @llvm.read/write_register.
3385/// So far, only the name is being passed down, but other options such as
3386/// register type, allocation type or even optimization options could be
3387/// passed down via the metadata node.
3388static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3389 SmallString<64> Name("llvm.named.register.");
3390 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3391 assert(Asm->getLabel().size() < 64-Name.size() &&
3392 "Register name too big");
3393 Name.append(RHS: Asm->getLabel());
3394 llvm::NamedMDNode *M =
3395 CGM.getModule().getOrInsertNamedMetadata(Name);
3396 if (M->getNumOperands() == 0) {
3397 llvm::MDString *Str = llvm::MDString::get(Context&: CGM.getLLVMContext(),
3398 Str: Asm->getLabel());
3399 llvm::Metadata *Ops[] = {Str};
3400 M->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops));
3401 }
3402
3403 CharUnits Alignment = CGM.getContext().getDeclAlign(D: VD);
3404
3405 llvm::Value *Ptr =
3406 llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD: M->getOperand(i: 0));
3407 return LValue::MakeGlobalReg(V: Ptr, alignment: Alignment, type: VD->getType());
3408}
3409
3410/// Determine whether we can emit a reference to \p VD from the current
3411/// context, despite not necessarily having seen an odr-use of the variable in
3412/// this context.
3413static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
3414 const DeclRefExpr *E,
3415 const VarDecl *VD) {
3416 // For a variable declared in an enclosing scope, do not emit a spurious
3417 // reference even if we have a capture, as that will emit an unwarranted
3418 // reference to our capture state, and will likely generate worse code than
3419 // emitting a local copy.
3420 if (E->refersToEnclosingVariableOrCapture())
3421 return false;
3422
3423 // For a local declaration declared in this function, we can always reference
3424 // it even if we don't have an odr-use.
3425 if (VD->hasLocalStorage()) {
3426 return VD->getDeclContext() ==
3427 dyn_cast_or_null<DeclContext>(Val: CGF.CurCodeDecl);
3428 }
3429
3430 // For a global declaration, we can emit a reference to it if we know
3431 // for sure that we are able to emit a definition of it.
3432 VD = VD->getDefinition(C&: CGF.getContext());
3433 if (!VD)
3434 return false;
3435
3436 // Don't emit a spurious reference if it might be to a variable that only
3437 // exists on a different device / target.
3438 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3439 // cross-target reference.
3440 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3441 CGF.getLangOpts().OpenCL) {
3442 return false;
3443 }
3444
3445 // We can emit a spurious reference only if the linkage implies that we'll
3446 // be emitting a non-interposable symbol that will be retained until link
3447 // time.
3448 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3449 case llvm::GlobalValue::ExternalLinkage:
3450 case llvm::GlobalValue::LinkOnceODRLinkage:
3451 case llvm::GlobalValue::WeakODRLinkage:
3452 case llvm::GlobalValue::InternalLinkage:
3453 case llvm::GlobalValue::PrivateLinkage:
3454 return true;
3455 default:
3456 return false;
3457 }
3458}
3459
3460LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
3461 const NamedDecl *ND = E->getDecl();
3462 QualType T = E->getType();
3463
3464 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3465 "should not emit an unevaluated operand");
3466
3467 if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) {
3468 // Global Named registers access via intrinsics only
3469 if (VD->getStorageClass() == SC_Register &&
3470 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3471 return EmitGlobalNamedRegister(VD, CGM);
3472
3473 // If this DeclRefExpr does not constitute an odr-use of the variable,
3474 // we're not permitted to emit a reference to it in general, and it might
3475 // not be captured if capture would be necessary for a use. Emit the
3476 // constant value directly instead.
3477 if (E->isNonOdrUse() == NOUR_Constant &&
3478 (VD->getType()->isReferenceType() ||
3479 !canEmitSpuriousReferenceToVariable(CGF&: *this, E, VD))) {
3480 VD->getAnyInitializer(D&: VD);
3481 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3482 loc: E->getLocation(), value: *VD->evaluateValue(), T: VD->getType());
3483 assert(Val && "failed to emit constant expression");
3484
3485 Address Addr = Address::invalid();
3486 if (!VD->getType()->isReferenceType()) {
3487 // Spill the constant value to a global.
3488 Addr = CGM.createUnnamedGlobalFrom(D: *VD, Constant: Val,
3489 Align: getContext().getDeclAlign(D: VD));
3490 llvm::Type *VarTy = getTypes().ConvertTypeForMem(T: VD->getType());
3491 auto *PTy = llvm::PointerType::get(
3492 C&: getLLVMContext(), AddressSpace: getTypes().getTargetAddressSpace(T: VD->getType()));
3493 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty: PTy, ElementTy: VarTy);
3494 } else {
3495 // Should we be using the alignment of the constant pointer we emitted?
3496 CharUnits Alignment =
3497 CGM.getNaturalTypeAlignment(T: E->getType(),
3498 /* BaseInfo= */ nullptr,
3499 /* TBAAInfo= */ nullptr,
3500 /* forPointeeType= */ true);
3501 Addr = makeNaturalAddressForPointer(Ptr: Val, T, Alignment);
3502 }
3503 return MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl);
3504 }
3505
3506 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3507
3508 // Check for captured variables.
3509 if (E->refersToEnclosingVariableOrCapture()) {
3510 VD = VD->getCanonicalDecl();
3511 if (auto *FD = LambdaCaptureFields.lookup(Val: VD))
3512 return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue);
3513 if (CapturedStmtInfo) {
3514 auto I = LocalDeclMap.find(Val: VD);
3515 if (I != LocalDeclMap.end()) {
3516 LValue CapLVal;
3517 if (VD->getType()->isReferenceType())
3518 CapLVal = EmitLoadOfReferenceLValue(RefAddr: I->second, RefTy: VD->getType(),
3519 Source: AlignmentSource::Decl);
3520 else
3521 CapLVal = MakeAddrLValue(Addr: I->second, T);
3522 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3523 // in simd context.
3524 if (getLangOpts().OpenMP &&
3525 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3526 CapLVal.setNontemporal(/*Value=*/true);
3527 return CapLVal;
3528 }
3529 LValue CapLVal =
3530 EmitCapturedFieldLValue(CGF&: *this, FD: CapturedStmtInfo->lookup(VD),
3531 ThisValue: CapturedStmtInfo->getContextValue());
3532 Address LValueAddress = CapLVal.getAddress();
3533 CapLVal = MakeAddrLValue(Addr: Address(LValueAddress.emitRawPointer(CGF&: *this),
3534 LValueAddress.getElementType(),
3535 getContext().getDeclAlign(D: VD)),
3536 T: CapLVal.getType(),
3537 BaseInfo: LValueBaseInfo(AlignmentSource::Decl),
3538 TBAAInfo: CapLVal.getTBAAInfo());
3539 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3540 // in simd context.
3541 if (getLangOpts().OpenMP &&
3542 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3543 CapLVal.setNontemporal(/*Value=*/true);
3544 return CapLVal;
3545 }
3546
3547 assert(isa<BlockDecl>(CurCodeDecl));
3548 Address addr = GetAddrOfBlockDecl(var: VD);
3549 return MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl);
3550 }
3551 }
3552
3553 // FIXME: We should be able to assert this for FunctionDecls as well!
3554 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3555 // those with a valid source location.
3556 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3557 !E->getLocation().isValid()) &&
3558 "Should not use decl without marking it used!");
3559
3560 if (ND->hasAttr<WeakRefAttr>()) {
3561 const auto *VD = cast<ValueDecl>(Val: ND);
3562 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3563 return MakeAddrLValue(Addr: Aliasee, T, Source: AlignmentSource::Decl);
3564 }
3565
3566 if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) {
3567 // Check if this is a global variable.
3568 if (VD->hasLinkage() || VD->isStaticDataMember())
3569 return EmitGlobalVarDeclLValue(CGF&: *this, E, VD);
3570
3571 Address addr = Address::invalid();
3572
3573 // The variable should generally be present in the local decl map.
3574 auto iter = LocalDeclMap.find(Val: VD);
3575 if (iter != LocalDeclMap.end()) {
3576 addr = iter->second;
3577
3578 // Otherwise, it might be static local we haven't emitted yet for
3579 // some reason; most likely, because it's in an outer function.
3580 } else if (VD->isStaticLocal()) {
3581 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3582 D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD));
3583 addr = Address(
3584 var, ConvertTypeForMem(T: VD->getType()), getContext().getDeclAlign(D: VD));
3585
3586 // No other cases for now.
3587 } else {
3588 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3589 }
3590
3591 // Handle threadlocal function locals.
3592 if (VD->getTLSKind() != VarDecl::TLS_None)
3593 addr = addr.withPointer(
3594 NewPointer: Builder.CreateThreadLocalAddress(Ptr: addr.getBasePointer()),
3595 IsKnownNonNull: NotKnownNonNull);
3596
3597 // Check for OpenMP threadprivate variables.
3598 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3599 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3600 return EmitThreadPrivateVarDeclLValue(
3601 CGF&: *this, VD, T, Addr: addr, RealVarTy: getTypes().ConvertTypeForMem(T: VD->getType()),
3602 Loc: E->getExprLoc());
3603 }
3604
3605 // Drill into block byref variables.
3606 bool isBlockByref = VD->isEscapingByref();
3607 if (isBlockByref) {
3608 addr = emitBlockByrefAddress(baseAddr: addr, V: VD);
3609 }
3610
3611 // Drill into reference types.
3612 LValue LV = VD->getType()->isReferenceType() ?
3613 EmitLoadOfReferenceLValue(RefAddr: addr, RefTy: VD->getType(), Source: AlignmentSource::Decl) :
3614 MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl);
3615
3616 bool isLocalStorage = VD->hasLocalStorage();
3617
3618 bool NonGCable = isLocalStorage &&
3619 !VD->getType()->isReferenceType() &&
3620 !isBlockByref;
3621 if (NonGCable) {
3622 LV.getQuals().removeObjCGCAttr();
3623 LV.setNonGC(true);
3624 }
3625
3626 bool isImpreciseLifetime =
3627 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3628 if (isImpreciseLifetime)
3629 LV.setARCPreciseLifetime(ARCImpreciseLifetime);
3630 setObjCGCLValueClass(Ctx: getContext(), E, LV);
3631 return LV;
3632 }
3633
3634 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
3635 return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD);
3636
3637 // FIXME: While we're emitting a binding from an enclosing scope, all other
3638 // DeclRefExprs we see should be implicitly treated as if they also refer to
3639 // an enclosing scope.
3640 if (const auto *BD = dyn_cast<BindingDecl>(Val: ND)) {
3641 if (E->refersToEnclosingVariableOrCapture()) {
3642 auto *FD = LambdaCaptureFields.lookup(Val: BD);
3643 return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue);
3644 }
3645 // Suppress debug location updates when visiting the binding, since the
3646 // binding may emit instructions that would otherwise be associated with the
3647 // binding itself, rather than the expression referencing the binding. (this
3648 // leads to jumpy debug stepping behavior where the location/debugger jump
3649 // back to the binding declaration, then back to the expression referencing
3650 // the binding)
3651 DisableDebugLocationUpdates D(*this);
3652 return EmitLValue(E: BD->getBinding(), IsKnownNonNull: NotKnownNonNull);
3653 }
3654
3655 // We can form DeclRefExprs naming GUID declarations when reconstituting
3656 // non-type template parameters into expressions.
3657 if (const auto *GD = dyn_cast<MSGuidDecl>(Val: ND))
3658 return MakeAddrLValue(Addr: CGM.GetAddrOfMSGuidDecl(GD), T,
3659 Source: AlignmentSource::Decl);
3660
3661 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: ND)) {
3662 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3663 auto AS = getLangASFromTargetAS(TargetAS: ATPO.getAddressSpace());
3664
3665 if (AS != T.getAddressSpace()) {
3666 auto TargetAS = getContext().getTargetAddressSpace(AS: T.getAddressSpace());
3667 auto PtrTy = llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: TargetAS);
3668 auto ASC = getTargetHooks().performAddrSpaceCast(CGM, V: ATPO.getPointer(),
3669 SrcAddr: AS, DestTy: PtrTy);
3670 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3671 }
3672
3673 return MakeAddrLValue(Addr: ATPO, T, Source: AlignmentSource::Decl);
3674 }
3675
3676 llvm_unreachable("Unhandled DeclRefExpr");
3677}
3678
3679LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
3680 // __extension__ doesn't affect lvalue-ness.
3681 if (E->getOpcode() == UO_Extension)
3682 return EmitLValue(E: E->getSubExpr());
3683
3684 QualType ExprTy = getContext().getCanonicalType(T: E->getSubExpr()->getType());
3685 switch (E->getOpcode()) {
3686 default: llvm_unreachable("Unknown unary operator lvalue!");
3687 case UO_Deref: {
3688 QualType T = E->getSubExpr()->getType()->getPointeeType();
3689 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3690
3691 LValueBaseInfo BaseInfo;
3692 TBAAAccessInfo TBAAInfo;
3693 Address Addr = EmitPointerWithAlignment(E: E->getSubExpr(), BaseInfo: &BaseInfo,
3694 TBAAInfo: &TBAAInfo);
3695 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3696 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
3697
3698 // We should not generate __weak write barrier on indirect reference
3699 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3700 // But, we continue to generate __strong write barrier on indirect write
3701 // into a pointer to object.
3702 if (getLangOpts().ObjC &&
3703 getLangOpts().getGC() != LangOptions::NonGC &&
3704 LV.isObjCWeak())
3705 LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext()));
3706 return LV;
3707 }
3708 case UO_Real:
3709 case UO_Imag: {
3710 LValue LV = EmitLValue(E: E->getSubExpr());
3711 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3712
3713 // __real is valid on scalars. This is a faster way of testing that.
3714 // __imag can only produce an rvalue on scalars.
3715 if (E->getOpcode() == UO_Real &&
3716 !LV.getAddress().getElementType()->isStructTy()) {
3717 assert(E->getSubExpr()->getType()->isArithmeticType());
3718 return LV;
3719 }
3720
3721 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3722
3723 Address Component =
3724 (E->getOpcode() == UO_Real
3725 ? emitAddrOfRealComponent(complex: LV.getAddress(), complexType: LV.getType())
3726 : emitAddrOfImagComponent(complex: LV.getAddress(), complexType: LV.getType()));
3727 LValue ElemLV = MakeAddrLValue(Addr: Component, T, BaseInfo: LV.getBaseInfo(),
3728 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: T));
3729 ElemLV.getQuals().addQualifiers(Q: LV.getQuals());
3730 return ElemLV;
3731 }
3732 case UO_PreInc:
3733 case UO_PreDec: {
3734 LValue LV = EmitLValue(E: E->getSubExpr());
3735 bool isInc = E->getOpcode() == UO_PreInc;
3736
3737 if (E->getType()->isAnyComplexType())
3738 EmitComplexPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/);
3739 else
3740 EmitScalarPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/);
3741 return LV;
3742 }
3743 }
3744}
3745
3746LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
3747 return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromLiteral(S: E),
3748 T: E->getType(), Source: AlignmentSource::Decl);
3749}
3750
3751LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
3752 return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromObjCEncode(E),
3753 T: E->getType(), Source: AlignmentSource::Decl);
3754}
3755
3756LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
3757 auto SL = E->getFunctionName();
3758 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3759 StringRef FnName = CurFn->getName();
3760 FnName.consume_front(Prefix: "\01");
3761 StringRef NameItems[] = {
3762 PredefinedExpr::getIdentKindName(IK: E->getIdentKind()), FnName};
3763 std::string GVName = llvm::join(Begin: NameItems, End: NameItems + 2, Separator: ".");
3764 if (auto *BD = dyn_cast_or_null<BlockDecl>(Val: CurCodeDecl)) {
3765 std::string Name = std::string(SL->getString());
3766 if (!Name.empty()) {
3767 unsigned Discriminator =
3768 CGM.getCXXABI().getMangleContext().getBlockId(BD, Local: true);
3769 if (Discriminator)
3770 Name += "_" + Twine(Discriminator + 1).str();
3771 auto C = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: GVName);
3772 return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl);
3773 } else {
3774 auto C = CGM.GetAddrOfConstantCString(Str: std::string(FnName), GlobalName: GVName);
3775 return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl);
3776 }
3777 }
3778 auto C = CGM.GetAddrOfConstantStringFromLiteral(S: SL, Name: GVName);
3779 return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl);
3780}
3781
3782/// Emit a type description suitable for use by a runtime sanitizer library. The
3783/// format of a type descriptor is
3784///
3785/// \code
3786/// { i16 TypeKind, i16 TypeInfo }
3787/// \endcode
3788///
3789/// followed by an array of i8 containing the type name with extra information
3790/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3791/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3792/// anything else.
3793llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
3794 // Only emit each type's descriptor once.
3795 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(Ty: T))
3796 return C;
3797
3798 uint16_t TypeKind = TK_Unknown;
3799 uint16_t TypeInfo = 0;
3800 bool IsBitInt = false;
3801
3802 if (T->isIntegerType()) {
3803 TypeKind = TK_Integer;
3804 TypeInfo = (llvm::Log2_32(Value: getContext().getTypeSize(T)) << 1) |
3805 (T->isSignedIntegerType() ? 1 : 0);
3806 // Follow suggestion from discussion of issue 64100.
3807 // So we can write the exact amount of bits in TypeName after '\0'
3808 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3809 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3810 // Do a sanity checks as we are using 32-bit type to store bit length.
3811 assert(getContext().getTypeSize(T) > 0 &&
3812 " non positive amount of bits in __BitInt type");
3813 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3814 " too many bits in __BitInt type");
3815
3816 // Redefine TypeKind with the actual __BitInt type if we have signed
3817 // BitInt.
3818 TypeKind = TK_BitInt;
3819 IsBitInt = true;
3820 }
3821 } else if (T->isFloatingType()) {
3822 TypeKind = TK_Float;
3823 TypeInfo = getContext().getTypeSize(T);
3824 }
3825
3826 // Format the type name as if for a diagnostic, including quotes and
3827 // optionally an 'aka'.
3828 SmallString<32> Buffer;
3829 CGM.getDiags().ConvertArgToString(Kind: DiagnosticsEngine::ak_qualtype,
3830 Val: (intptr_t)T.getAsOpaquePtr(), Modifier: StringRef(),
3831 Argument: StringRef(), PrevArgs: {}, Output&: Buffer, QualTypeVals: {});
3832
3833 if (IsBitInt) {
3834 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3835 // endianness, zero.
3836 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3837 const auto *EIT = T->castAs<BitIntType>();
3838 uint32_t Bits = EIT->getNumBits();
3839 llvm::support::endian::write32(P: S + 1, V: Bits,
3840 E: getTarget().isBigEndian()
3841 ? llvm::endianness::big
3842 : llvm::endianness::little);
3843 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3844 Buffer.append(RHS: Str);
3845 }
3846
3847 llvm::Constant *Components[] = {
3848 Builder.getInt16(C: TypeKind), Builder.getInt16(C: TypeInfo),
3849 llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Buffer)
3850 };
3851 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(V: Components);
3852
3853 auto *GV = new llvm::GlobalVariable(
3854 CGM.getModule(), Descriptor->getType(),
3855 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3856 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3857 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3858
3859 // Remember the descriptor for this type.
3860 CGM.setTypeDescriptorInMap(Ty: T, C: GV);
3861
3862 return GV;
3863}
3864
3865llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3866 llvm::Type *TargetTy = IntPtrTy;
3867
3868 if (V->getType() == TargetTy)
3869 return V;
3870
3871 // Floating-point types which fit into intptr_t are bitcast to integers
3872 // and then passed directly (after zero-extension, if necessary).
3873 if (V->getType()->isFloatingPointTy()) {
3874 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3875 if (Bits <= TargetTy->getIntegerBitWidth())
3876 V = Builder.CreateBitCast(V, DestTy: llvm::Type::getIntNTy(C&: getLLVMContext(),
3877 N: Bits));
3878 }
3879
3880 // Integers which fit in intptr_t are zero-extended and passed directly.
3881 if (V->getType()->isIntegerTy() &&
3882 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3883 return Builder.CreateZExt(V, DestTy: TargetTy);
3884
3885 // Pointers are passed directly, everything else is passed by address.
3886 if (!V->getType()->isPointerTy()) {
3887 RawAddress Ptr = CreateDefaultAlignTempAlloca(Ty: V->getType());
3888 Builder.CreateStore(Val: V, Addr: Ptr);
3889 V = Ptr.getPointer();
3890 }
3891 return Builder.CreatePtrToInt(V, DestTy: TargetTy);
3892}
3893
3894/// Emit a representation of a SourceLocation for passing to a handler
3895/// in a sanitizer runtime library. The format for this data is:
3896/// \code
3897/// struct SourceLocation {
3898/// const char *Filename;
3899/// int32_t Line, Column;
3900/// };
3901/// \endcode
3902/// For an invalid SourceLocation, the Filename pointer is null.
3903llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3904 llvm::Constant *Filename;
3905 int Line, Column;
3906
3907 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3908 if (PLoc.isValid()) {
3909 StringRef FilenameString = PLoc.getFilename();
3910
3911 int PathComponentsToStrip =
3912 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3913 if (PathComponentsToStrip < 0) {
3914 assert(PathComponentsToStrip != INT_MIN);
3915 int PathComponentsToKeep = -PathComponentsToStrip;
3916 auto I = llvm::sys::path::rbegin(path: FilenameString);
3917 auto E = llvm::sys::path::rend(path: FilenameString);
3918 while (I != E && --PathComponentsToKeep)
3919 ++I;
3920
3921 FilenameString = FilenameString.substr(Start: I - E);
3922 } else if (PathComponentsToStrip > 0) {
3923 auto I = llvm::sys::path::begin(path: FilenameString);
3924 auto E = llvm::sys::path::end(path: FilenameString);
3925 while (I != E && PathComponentsToStrip--)
3926 ++I;
3927
3928 if (I != E)
3929 FilenameString =
3930 FilenameString.substr(Start: I - llvm::sys::path::begin(path: FilenameString));
3931 else
3932 FilenameString = llvm::sys::path::filename(path: FilenameString);
3933 }
3934
3935 auto FilenameGV =
3936 CGM.GetAddrOfConstantCString(Str: std::string(FilenameString), GlobalName: ".src");
3937 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3938 GV: cast<llvm::GlobalVariable>(
3939 Val: FilenameGV.getPointer()->stripPointerCasts()));
3940 Filename = FilenameGV.getPointer();
3941 Line = PLoc.getLine();
3942 Column = PLoc.getColumn();
3943 } else {
3944 Filename = llvm::Constant::getNullValue(Ty: Int8PtrTy);
3945 Line = Column = 0;
3946 }
3947
3948 llvm::Constant *Data[] = {Filename, Builder.getInt32(C: Line),
3949 Builder.getInt32(C: Column)};
3950
3951 return llvm::ConstantStruct::getAnon(V: Data);
3952}
3953
3954namespace {
3955/// Specify under what conditions this check can be recovered
3956enum class CheckRecoverableKind {
3957 /// Always terminate program execution if this check fails.
3958 Unrecoverable,
3959 /// Check supports recovering, runtime has both fatal (noreturn) and
3960 /// non-fatal handlers for this check.
3961 Recoverable,
3962 /// Runtime conditionally aborts, always need to support recovery.
3963 AlwaysRecoverable
3964};
3965}
3966
3967static CheckRecoverableKind
3968getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal) {
3969 if (Ordinal == SanitizerKind::SO_Vptr)
3970 return CheckRecoverableKind::AlwaysRecoverable;
3971 else if (Ordinal == SanitizerKind::SO_Return ||
3972 Ordinal == SanitizerKind::SO_Unreachable)
3973 return CheckRecoverableKind::Unrecoverable;
3974 else
3975 return CheckRecoverableKind::Recoverable;
3976}
3977
3978namespace {
3979struct SanitizerHandlerInfo {
3980 char const *const Name;
3981 unsigned Version;
3982};
3983}
3984
3985const SanitizerHandlerInfo SanitizerHandlers[] = {
3986#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
3987 LIST_SANITIZER_CHECKS
3988#undef SANITIZER_CHECK
3989};
3990
3991static void emitCheckHandlerCall(CodeGenFunction &CGF,
3992 llvm::FunctionType *FnType,
3993 ArrayRef<llvm::Value *> FnArgs,
3994 SanitizerHandler CheckHandler,
3995 CheckRecoverableKind RecoverKind, bool IsFatal,
3996 llvm::BasicBlock *ContBB, bool NoMerge) {
3997 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3998 std::optional<ApplyDebugLocation> DL;
3999 if (!CGF.Builder.getCurrentDebugLocation()) {
4000 // Ensure that the call has at least an artificial debug location.
4001 DL.emplace(args&: CGF, args: SourceLocation());
4002 }
4003 bool NeedsAbortSuffix =
4004 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
4005 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
4006 bool HandlerPreserveAllRegs =
4007 CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs;
4008 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
4009 const StringRef CheckName = CheckInfo.Name;
4010 std::string FnName = "__ubsan_handle_" + CheckName.str();
4011 if (CheckInfo.Version && !MinimalRuntime)
4012 FnName += "_v" + llvm::utostr(X: CheckInfo.Version);
4013 if (MinimalRuntime)
4014 FnName += "_minimal";
4015 if (NeedsAbortSuffix)
4016 FnName += "_abort";
4017 if (HandlerPreserveAllRegs && !NeedsAbortSuffix)
4018 FnName += "_preserve";
4019 bool MayReturn =
4020 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
4021
4022 llvm::AttrBuilder B(CGF.getLLVMContext());
4023 if (!MayReturn) {
4024 B.addAttribute(Val: llvm::Attribute::NoReturn)
4025 .addAttribute(Val: llvm::Attribute::NoUnwind);
4026 }
4027 B.addUWTableAttr(Kind: llvm::UWTableKind::Default);
4028
4029 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
4030 Ty: FnType, Name: FnName,
4031 ExtraAttrs: llvm::AttributeList::get(C&: CGF.getLLVMContext(),
4032 Index: llvm::AttributeList::FunctionIndex, B),
4033 /*Local=*/true);
4034 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(callee: Fn, args: FnArgs);
4035 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
4036 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4037 if (NoMerge)
4038 HandlerCall->addFnAttr(Kind: llvm::Attribute::NoMerge);
4039 if (HandlerPreserveAllRegs && !NeedsAbortSuffix) {
4040 // N.B. there is also a clang::CallingConv which is not what we want here.
4041 HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll);
4042 }
4043 if (!MayReturn) {
4044 HandlerCall->setDoesNotReturn();
4045 CGF.Builder.CreateUnreachable();
4046 } else {
4047 CGF.Builder.CreateBr(Dest: ContBB);
4048 }
4049}
4050
4051void CodeGenFunction::EmitCheck(
4052 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
4053 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
4054 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
4055 assert(IsSanitizerScope);
4056 assert(Checked.size() > 0);
4057 assert(CheckHandler >= 0 &&
4058 size_t(CheckHandler) < std::size(SanitizerHandlers));
4059 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
4060
4061 llvm::Value *FatalCond = nullptr;
4062 llvm::Value *RecoverableCond = nullptr;
4063 llvm::Value *TrapCond = nullptr;
4064 bool NoMerge = false;
4065 // Expand checks into:
4066 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
4067 // We need separate allow_ubsan_check intrinsics because they have separately
4068 // specified cutoffs.
4069 // This expression looks expensive but will be simplified after
4070 // LowerAllowCheckPass.
4071 for (auto &[Check, Ord] : Checked) {
4072 llvm::Value *GuardedCheck = Check;
4073 if (ClSanitizeGuardChecks ||
4074 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
4075 llvm::Value *Allow = Builder.CreateCall(
4076 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::allow_ubsan_check),
4077 Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Ord));
4078 GuardedCheck = Builder.CreateOr(LHS: Check, RHS: Builder.CreateNot(V: Allow));
4079 }
4080
4081 // -fsanitize-trap= overrides -fsanitize-recover=.
4082 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(O: Ord) ? TrapCond
4083 : CGM.getCodeGenOpts().SanitizeRecover.has(O: Ord)
4084 ? RecoverableCond
4085 : FatalCond;
4086 Cond = Cond ? Builder.CreateAnd(LHS: Cond, RHS: GuardedCheck) : GuardedCheck;
4087
4088 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(O: Ord))
4089 NoMerge = true;
4090 }
4091
4092 if (TrapCond)
4093 EmitTrapCheck(Checked: TrapCond, CheckHandlerID: CheckHandler, NoMerge, TR);
4094 if (!FatalCond && !RecoverableCond)
4095 return;
4096
4097 llvm::Value *JointCond;
4098 if (FatalCond && RecoverableCond)
4099 JointCond = Builder.CreateAnd(LHS: FatalCond, RHS: RecoverableCond);
4100 else
4101 JointCond = FatalCond ? FatalCond : RecoverableCond;
4102 assert(JointCond);
4103
4104 CheckRecoverableKind RecoverKind = getRecoverableKind(Ordinal: Checked[0].second);
4105 assert(SanOpts.has(Checked[0].second));
4106#ifndef NDEBUG
4107 for (int i = 1, n = Checked.size(); i < n; ++i) {
4108 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
4109 "All recoverable kinds in a single check must be same!");
4110 assert(SanOpts.has(Checked[i].second));
4111 }
4112#endif
4113
4114 llvm::BasicBlock *Cont = createBasicBlock(name: "cont");
4115 llvm::BasicBlock *Handlers = createBasicBlock(name: "handler." + CheckName);
4116 llvm::Instruction *Branch = Builder.CreateCondBr(Cond: JointCond, True: Cont, False: Handlers);
4117 // Give hint that we very much don't expect to execute the handler
4118 llvm::MDBuilder MDHelper(getLLVMContext());
4119 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4120 Branch->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node);
4121 EmitBlock(BB: Handlers);
4122
4123 // Clear arguments for the MinimalRuntime handler.
4124 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
4125 StaticArgs = {};
4126 DynamicArgs = {};
4127 }
4128
4129 // Handler functions take an i8* pointing to the (handler-specific) static
4130 // information block, followed by a sequence of intptr_t arguments
4131 // representing operand values.
4132 SmallVector<llvm::Value *, 4> Args;
4133 SmallVector<llvm::Type *, 4> ArgTypes;
4134
4135 Args.reserve(N: DynamicArgs.size() + 1);
4136 ArgTypes.reserve(N: DynamicArgs.size() + 1);
4137
4138 // Emit handler arguments and create handler function type.
4139 if (!StaticArgs.empty()) {
4140 llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs);
4141 auto *InfoPtr = new llvm::GlobalVariable(
4142 CGM.getModule(), Info->getType(),
4143 // Non-constant global is used in a handler to deduplicate reports.
4144 // TODO: change deduplication logic and make it constant.
4145 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
4146 nullptr, llvm::GlobalVariable::NotThreadLocal,
4147 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
4148 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4149 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr);
4150 Args.push_back(Elt: InfoPtr);
4151 ArgTypes.push_back(Elt: Args.back()->getType());
4152 }
4153
4154 for (llvm::Value *DynamicArg : DynamicArgs) {
4155 Args.push_back(Elt: EmitCheckValue(V: DynamicArg));
4156 ArgTypes.push_back(Elt: IntPtrTy);
4157 }
4158
4159 llvm::FunctionType *FnType =
4160 llvm::FunctionType::get(Result: CGM.VoidTy, Params: ArgTypes, isVarArg: false);
4161
4162 if (!FatalCond || !RecoverableCond) {
4163 // Simple case: we need to generate a single handler call, either
4164 // fatal, or non-fatal.
4165 emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind,
4166 IsFatal: (FatalCond != nullptr), ContBB: Cont, NoMerge);
4167 } else {
4168 // Emit two handler calls: first one for set of unrecoverable checks,
4169 // another one for recoverable.
4170 llvm::BasicBlock *NonFatalHandlerBB =
4171 createBasicBlock(name: "non_fatal." + CheckName);
4172 llvm::BasicBlock *FatalHandlerBB = createBasicBlock(name: "fatal." + CheckName);
4173 Builder.CreateCondBr(Cond: FatalCond, True: NonFatalHandlerBB, False: FatalHandlerBB);
4174 EmitBlock(BB: FatalHandlerBB);
4175 emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: true,
4176 ContBB: NonFatalHandlerBB, NoMerge);
4177 EmitBlock(BB: NonFatalHandlerBB);
4178 emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: false,
4179 ContBB: Cont, NoMerge);
4180 }
4181
4182 EmitBlock(BB: Cont);
4183}
4184
4185void CodeGenFunction::EmitCfiSlowPathCheck(
4186 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
4187 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4188 ArrayRef<llvm::Constant *> StaticArgs) {
4189 llvm::BasicBlock *Cont = createBasicBlock(name: "cfi.cont");
4190
4191 llvm::BasicBlock *CheckBB = createBasicBlock(name: "cfi.slowpath");
4192 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, True: Cont, False: CheckBB);
4193
4194 llvm::MDBuilder MDHelper(getLLVMContext());
4195 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4196 BI->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node);
4197
4198 EmitBlock(BB: CheckBB);
4199
4200 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(O: Ordinal);
4201
4202 llvm::CallInst *CheckCall;
4203 llvm::FunctionCallee SlowPathFn;
4204 if (WithDiag) {
4205 llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs);
4206 auto *InfoPtr =
4207 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
4208 llvm::GlobalVariable::PrivateLinkage, Info);
4209 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4210 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr);
4211
4212 SlowPathFn = CGM.getModule().getOrInsertFunction(
4213 Name: "__cfi_slowpath_diag",
4214 T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy, Int8PtrTy},
4215 isVarArg: false));
4216 CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr, InfoPtr});
4217 } else {
4218 SlowPathFn = CGM.getModule().getOrInsertFunction(
4219 Name: "__cfi_slowpath",
4220 T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy}, isVarArg: false));
4221 CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr});
4222 }
4223
4224 CGM.setDSOLocal(
4225 cast<llvm::GlobalValue>(Val: SlowPathFn.getCallee()->stripPointerCasts()));
4226 CheckCall->setDoesNotThrow();
4227
4228 EmitBlock(BB: Cont);
4229}
4230
4231// Emit a stub for __cfi_check function so that the linker knows about this
4232// symbol in LTO mode.
4233void CodeGenFunction::EmitCfiCheckStub() {
4234 llvm::Module *M = &CGM.getModule();
4235 ASTContext &C = getContext();
4236 QualType QInt64Ty = C.getIntTypeForBitwidth(DestWidth: 64, Signed: false);
4237
4238 FunctionArgList FnArgs;
4239 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
4240 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
4241 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
4242 ImplicitParamKind::Other);
4243 FnArgs.push_back(Elt: &ArgCallsiteTypeId);
4244 FnArgs.push_back(Elt: &ArgAddr);
4245 FnArgs.push_back(Elt: &ArgCFICheckFailData);
4246 const CGFunctionInfo &FI =
4247 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: C.VoidTy, args: FnArgs);
4248
4249 llvm::Function *F = llvm::Function::Create(
4250 Ty: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, VoidPtrTy, VoidPtrTy}, isVarArg: false),
4251 Linkage: llvm::GlobalValue::WeakAnyLinkage, N: "__cfi_check", M);
4252 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false);
4253 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F);
4254 F->setAlignment(llvm::Align(4096));
4255 CGM.setDSOLocal(F);
4256
4257 llvm::LLVMContext &Ctx = M->getContext();
4258 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Context&: Ctx, Name: "entry", Parent: F);
4259 // CrossDSOCFI pass is not executed if there is no executable code.
4260 SmallVector<llvm::Value*> Args{F->getArg(i: 2), F->getArg(i: 1)};
4261 llvm::CallInst::Create(Func: M->getFunction(Name: "__cfi_check_fail"), Args, NameStr: "", InsertBefore: BB);
4262 llvm::ReturnInst::Create(C&: Ctx, retVal: nullptr, InsertBefore: BB);
4263}
4264
4265// This function is basically a switch over the CFI failure kind, which is
4266// extracted from CFICheckFailData (1st function argument). Each case is either
4267// llvm.trap or a call to one of the two runtime handlers, based on
4268// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4269// failure kind) traps, but this should really never happen. CFICheckFailData
4270// can be nullptr if the calling module has -fsanitize-trap behavior for this
4271// check kind; in this case __cfi_check_fail traps as well.
4272void CodeGenFunction::EmitCfiCheckFail() {
4273 auto CheckHandler = SanitizerHandler::CFICheckFail;
4274 // TODO: the SanitizerKind is not yet determined for this check (and might
4275 // not even be available, if Data == nullptr). However, we still want to
4276 // annotate the instrumentation. We approximate this by using all the CFI
4277 // kinds.
4278 SanitizerDebugLocation SanScope(
4279 this,
4280 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4281 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4282 SanitizerKind::SO_CFIICall},
4283 CheckHandler);
4284 FunctionArgList Args;
4285 ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
4286 ImplicitParamKind::Other);
4287 ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
4288 ImplicitParamKind::Other);
4289 Args.push_back(Elt: &ArgData);
4290 Args.push_back(Elt: &ArgAddr);
4291
4292 const CGFunctionInfo &FI =
4293 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: getContext().VoidTy, args: Args);
4294
4295 llvm::Function *F = llvm::Function::Create(
4296 Ty: llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy, VoidPtrTy}, isVarArg: false),
4297 Linkage: llvm::GlobalValue::WeakODRLinkage, N: "__cfi_check_fail", M: &CGM.getModule());
4298
4299 CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false);
4300 CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F);
4301 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4302
4303 StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: F, FnInfo: FI, Args,
4304 Loc: SourceLocation());
4305
4306 ApplyDebugLocation ADL = ApplyDebugLocation::CreateArtificial(CGF&: *this);
4307
4308 // This function is not affected by NoSanitizeList. This function does
4309 // not have a source location, but "src:*" would still apply. Revert any
4310 // changes to SanOpts made in StartFunction.
4311 SanOpts = CGM.getLangOpts().Sanitize;
4312
4313 llvm::Value *Data =
4314 EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgData), /*Volatile=*/false,
4315 Ty: CGM.getContext().VoidPtrTy, Loc: ArgData.getLocation());
4316 llvm::Value *Addr =
4317 EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgAddr), /*Volatile=*/false,
4318 Ty: CGM.getContext().VoidPtrTy, Loc: ArgAddr.getLocation());
4319
4320 // Data == nullptr means the calling module has trap behaviour for this check.
4321 llvm::Value *DataIsNotNullPtr =
4322 Builder.CreateICmpNE(LHS: Data, RHS: llvm::ConstantPointerNull::get(T: Int8PtrTy));
4323 // TODO: since there is no data, we don't know the CheckKind, and therefore
4324 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4325 // NoMerge = false. Users can disable merging by disabling optimization.
4326 EmitTrapCheck(Checked: DataIsNotNullPtr, CheckHandlerID: SanitizerHandler::CFICheckFail,
4327 /*NoMerge=*/false);
4328
4329 llvm::StructType *SourceLocationTy =
4330 llvm::StructType::get(elt1: VoidPtrTy, elts: Int32Ty, elts: Int32Ty);
4331 llvm::StructType *CfiCheckFailDataTy =
4332 llvm::StructType::get(elt1: Int8Ty, elts: SourceLocationTy, elts: VoidPtrTy);
4333
4334 llvm::Value *V = Builder.CreateConstGEP2_32(
4335 Ty: CfiCheckFailDataTy, Ptr: Builder.CreatePointerCast(V: Data, DestTy: DefaultPtrTy), Idx0: 0, Idx1: 0);
4336
4337 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4338 llvm::Value *CheckKind = Builder.CreateLoad(Addr: CheckKindAddr);
4339
4340 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4341 Context&: CGM.getLLVMContext(),
4342 MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables"));
4343 llvm::Value *ValidVtable = Builder.CreateZExt(
4344 V: Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test),
4345 Args: {Addr, AllVtables}),
4346 DestTy: IntPtrTy);
4347
4348 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4349 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4350 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4351 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4352 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4353 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4354
4355 for (auto CheckKindOrdinalPair : CheckKinds) {
4356 int Kind = CheckKindOrdinalPair.first;
4357 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4358
4359 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4360 // relying on the SanitizerScope with all CFI ordinals
4361
4362 llvm::Value *Cond =
4363 Builder.CreateICmpNE(LHS: CheckKind, RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: Kind));
4364 if (CGM.getLangOpts().Sanitize.has(O: Ordinal))
4365 EmitCheck(Checked: std::make_pair(x&: Cond, y&: Ordinal), CheckHandler: SanitizerHandler::CFICheckFail,
4366 StaticArgs: {}, DynamicArgs: {Data, Addr, ValidVtable});
4367 else
4368 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4369 // Although the compiler allows SanitizeMergeHandlers to be set
4370 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4371 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4372 EmitTrapCheck(Checked: Cond, CheckHandlerID: CheckHandler, /*NoMerge=*/false);
4373 }
4374
4375 FinishFunction();
4376 // The only reference to this function will be created during LTO link.
4377 // Make sure it survives until then.
4378 CGM.addUsedGlobal(GV: F);
4379}
4380
4381void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
4382 if (SanOpts.has(K: SanitizerKind::Unreachable)) {
4383 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4384 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4385 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4386 EmitCheck(Checked: std::make_pair(x: static_cast<llvm::Value *>(Builder.getFalse()),
4387 y&: CheckOrdinal),
4388 CheckHandler, StaticArgs: EmitCheckSourceLocation(Loc), DynamicArgs: {});
4389 }
4390 Builder.CreateUnreachable();
4391}
4392
4393void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4394 SanitizerHandler CheckHandlerID,
4395 bool NoMerge, const TrapReason *TR) {
4396 llvm::BasicBlock *Cont = createBasicBlock(name: "cont");
4397
4398 // If we're optimizing, collapse all calls to trap down to just one per
4399 // check-type per function to save on code size.
4400 if ((int)TrapBBs.size() <= CheckHandlerID)
4401 TrapBBs.resize(N: CheckHandlerID + 1);
4402
4403 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4404
4405 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4406 llvm::StringRef TrapMessage;
4407 llvm::StringRef TrapCategory;
4408 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4409 if (TR && !TR->isEmpty() &&
4410 DebugTrapReasonKind ==
4411 CodeGenOptions::SanitizeDebugTrapReasonKind::Detailed) {
4412 TrapMessage = TR->getMessage();
4413 TrapCategory = TR->getCategory();
4414 } else {
4415 TrapMessage = GetUBSanTrapForHandler(ID: CheckHandlerID);
4416 TrapCategory = "Undefined Behavior Sanitizer";
4417 }
4418
4419 if (getDebugInfo() && !TrapMessage.empty() &&
4420 DebugTrapReasonKind !=
4421 CodeGenOptions::SanitizeDebugTrapReasonKind::None &&
4422 TrapLocation) {
4423 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4424 TrapLocation, Category: TrapCategory, FailureMsg: TrapMessage);
4425 }
4426
4427 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
4428 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4429
4430 llvm::MDBuilder MDHelper(getLLVMContext());
4431 if (TrapBB && !NoMerge) {
4432 auto Call = TrapBB->begin();
4433 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4434
4435 Call->applyMergedLocation(LocA: Call->getDebugLoc(), LocB: TrapLocation);
4436
4437 Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB,
4438 BranchWeights: MDHelper.createLikelyBranchWeights());
4439 } else {
4440 TrapBB = createBasicBlock(name: "trap");
4441 Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB,
4442 BranchWeights: MDHelper.createLikelyBranchWeights());
4443 EmitBlock(BB: TrapBB);
4444
4445 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4446
4447 llvm::CallInst *TrapCall =
4448 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::ubsantrap),
4449 Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: CheckHandlerID));
4450
4451 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4452 auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name",
4453 Val: CGM.getCodeGenOpts().TrapFuncName);
4454 TrapCall->addFnAttr(Attr: A);
4455 }
4456 if (NoMerge)
4457 TrapCall->addFnAttr(Kind: llvm::Attribute::NoMerge);
4458 TrapCall->setDoesNotReturn();
4459 TrapCall->setDoesNotThrow();
4460 Builder.CreateUnreachable();
4461 }
4462
4463 EmitBlock(BB: Cont);
4464}
4465
4466llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4467 llvm::CallInst *TrapCall =
4468 Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntrID));
4469
4470 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4471 auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name",
4472 Val: CGM.getCodeGenOpts().TrapFuncName);
4473 TrapCall->addFnAttr(Attr: A);
4474 }
4475
4476 if (InNoMergeAttributedStmt)
4477 TrapCall->addFnAttr(Kind: llvm::Attribute::NoMerge);
4478 return TrapCall;
4479}
4480
4481Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
4482 LValueBaseInfo *BaseInfo,
4483 TBAAAccessInfo *TBAAInfo) {
4484 assert(E->getType()->isArrayType() &&
4485 "Array to pointer decay must have array source type!");
4486
4487 // Expressions of array type can't be bitfields or vector elements.
4488 LValue LV = EmitLValue(E);
4489 Address Addr = LV.getAddress();
4490
4491 // If the array type was an incomplete type, we need to make sure
4492 // the decay ends up being the right type.
4493 llvm::Type *NewTy = ConvertType(T: E->getType());
4494 Addr = Addr.withElementType(ElemTy: NewTy);
4495
4496 // Note that VLA pointers are always decayed, so we don't need to do
4497 // anything here.
4498 if (!E->getType()->isVariableArrayType()) {
4499 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4500 "Expected pointer to array");
4501 Addr = Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay");
4502 }
4503
4504 // The result of this decay conversion points to an array element within the
4505 // base lvalue. However, since TBAA currently does not support representing
4506 // accesses to elements of member arrays, we conservatively represent accesses
4507 // to the pointee object as if it had no any base lvalue specified.
4508 // TODO: Support TBAA for member arrays.
4509 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
4510 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4511 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(AccessType: EltType);
4512
4513 return Addr.withElementType(ElemTy: ConvertTypeForMem(T: EltType));
4514}
4515
4516/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4517/// array to pointer, return the array subexpression.
4518static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4519 // If this isn't just an array->pointer decay, bail out.
4520 const auto *CE = dyn_cast<CastExpr>(Val: E);
4521 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4522 return nullptr;
4523
4524 // If this is a decay from variable width array, bail out.
4525 const Expr *SubExpr = CE->getSubExpr();
4526 if (SubExpr->getType()->isVariableArrayType())
4527 return nullptr;
4528
4529 return SubExpr;
4530}
4531
4532static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
4533 llvm::Type *elemType,
4534 llvm::Value *ptr,
4535 ArrayRef<llvm::Value*> indices,
4536 bool inbounds,
4537 bool signedIndices,
4538 SourceLocation loc,
4539 const llvm::Twine &name = "arrayidx") {
4540 if (inbounds) {
4541 return CGF.EmitCheckedInBoundsGEP(ElemTy: elemType, Ptr: ptr, IdxList: indices, SignedIndices: signedIndices,
4542 IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc,
4543 Name: name);
4544 } else {
4545 return CGF.Builder.CreateGEP(Ty: elemType, Ptr: ptr, IdxList: indices, Name: name);
4546 }
4547}
4548
4549static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4550 ArrayRef<llvm::Value *> indices,
4551 llvm::Type *elementType, bool inbounds,
4552 bool signedIndices, SourceLocation loc,
4553 CharUnits align,
4554 const llvm::Twine &name = "arrayidx") {
4555 if (inbounds) {
4556 return CGF.EmitCheckedInBoundsGEP(Addr: addr, IdxList: indices, elementType, SignedIndices: signedIndices,
4557 IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc,
4558 Align: align, Name: name);
4559 } else {
4560 return CGF.Builder.CreateGEP(Addr: addr, IdxList: indices, ElementType: elementType, Align: align, Name: name);
4561 }
4562}
4563
4564static QualType getFixedSizeElementType(const ASTContext &ctx,
4565 const VariableArrayType *vla) {
4566 QualType eltType;
4567 do {
4568 eltType = vla->getElementType();
4569 } while ((vla = ctx.getAsVariableArrayType(T: eltType)));
4570 return eltType;
4571}
4572
4573static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
4574 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4575}
4576
4577static bool hasBPFPreserveStaticOffset(const Expr *E) {
4578 if (!E)
4579 return false;
4580 QualType PointeeType = E->getType()->getPointeeType();
4581 if (PointeeType.isNull())
4582 return false;
4583 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4584 return hasBPFPreserveStaticOffset(D: BaseDecl);
4585 return false;
4586}
4587
4588// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4589static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
4590 Address &Addr) {
4591 if (!CGF.getTarget().getTriple().isBPF())
4592 return Addr;
4593
4594 llvm::Function *Fn =
4595 CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::preserve_static_offset);
4596 llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: Fn, Args: {Addr.emitRawPointer(CGF)});
4597 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4598}
4599
4600/// Given an array base, check whether its member access belongs to a record
4601/// with preserve_access_index attribute or not.
4602static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4603 if (!ArrayBase || !CGF.getDebugInfo())
4604 return false;
4605
4606 // Only support base as either a MemberExpr or DeclRefExpr.
4607 // DeclRefExpr to cover cases like:
4608 // struct s { int a; int b[10]; };
4609 // struct s *p;
4610 // p[1].a
4611 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4612 // p->b[5] is a MemberExpr example.
4613 const Expr *E = ArrayBase->IgnoreImpCasts();
4614 if (const auto *ME = dyn_cast<MemberExpr>(Val: E))
4615 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4616
4617 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
4618 const auto *VarDef = dyn_cast<VarDecl>(Val: DRE->getDecl());
4619 if (!VarDef)
4620 return false;
4621
4622 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4623 if (!PtrT)
4624 return false;
4625
4626 const auto *PointeeT = PtrT->getPointeeType()
4627 ->getUnqualifiedDesugaredType();
4628 if (const auto *RecT = dyn_cast<RecordType>(Val: PointeeT))
4629 return RecT->getDecl()
4630 ->getMostRecentDecl()
4631 ->hasAttr<BPFPreserveAccessIndexAttr>();
4632 return false;
4633 }
4634
4635 return false;
4636}
4637
4638static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4639 ArrayRef<llvm::Value *> indices,
4640 QualType eltType, bool inbounds,
4641 bool signedIndices, SourceLocation loc,
4642 QualType *arrayType = nullptr,
4643 const Expr *Base = nullptr,
4644 const llvm::Twine &name = "arrayidx") {
4645 // All the indices except that last must be zero.
4646#ifndef NDEBUG
4647 for (auto *idx : indices.drop_back())
4648 assert(isa<llvm::ConstantInt>(idx) &&
4649 cast<llvm::ConstantInt>(idx)->isZero());
4650#endif
4651
4652 // Determine the element size of the statically-sized base. This is
4653 // the thing that the indices are expressed in terms of.
4654 if (auto vla = CGF.getContext().getAsVariableArrayType(T: eltType)) {
4655 eltType = getFixedSizeElementType(ctx: CGF.getContext(), vla);
4656 }
4657
4658 // We can use that to compute the best alignment of the element.
4659 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: eltType);
4660 CharUnits eltAlign =
4661 getArrayElementAlign(arrayAlign: addr.getAlignment(), idx: indices.back(), eltSize);
4662
4663 if (hasBPFPreserveStaticOffset(E: Base))
4664 addr = wrapWithBPFPreserveStaticOffset(CGF, Addr&: addr);
4665
4666 llvm::Value *eltPtr;
4667 auto LastIndex = dyn_cast<llvm::ConstantInt>(Val: indices.back());
4668 if (!LastIndex ||
4669 (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, ArrayBase: Base))) {
4670 addr = emitArraySubscriptGEP(CGF, addr, indices,
4671 elementType: CGF.ConvertTypeForMem(T: eltType), inbounds,
4672 signedIndices, loc, align: eltAlign, name);
4673 return addr;
4674 } else {
4675 // Remember the original array subscript for bpf target
4676 unsigned idx = LastIndex->getZExtValue();
4677 llvm::DIType *DbgInfo = nullptr;
4678 if (arrayType)
4679 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(Ty: *arrayType, Loc: loc);
4680 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4681 ElTy: addr.getElementType(), Base: addr.emitRawPointer(CGF), Dimension: indices.size() - 1,
4682 LastIndex: idx, DbgInfo);
4683 }
4684
4685 return Address(eltPtr, CGF.ConvertTypeForMem(T: eltType), eltAlign);
4686}
4687
4688namespace {
4689
4690/// StructFieldAccess is a simple visitor class to grab the first l-value to
4691/// r-value cast Expr.
4692struct StructFieldAccess
4693 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4694 const Expr *VisitCastExpr(const CastExpr *E) {
4695 if (E->getCastKind() == CK_LValueToRValue)
4696 return E;
4697 return Visit(S: E->getSubExpr());
4698 }
4699 const Expr *VisitParenExpr(const ParenExpr *E) {
4700 return Visit(S: E->getSubExpr());
4701 }
4702};
4703
4704} // end anonymous namespace
4705
4706/// The offset of a field from the beginning of the record.
4707static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
4708 const FieldDecl *Field, int64_t &Offset) {
4709 ASTContext &Ctx = CGF.getContext();
4710 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD);
4711 unsigned FieldNo = 0;
4712
4713 for (const FieldDecl *FD : RD->fields()) {
4714 if (FD == Field) {
4715 Offset += Layout.getFieldOffset(FieldNo);
4716 return true;
4717 }
4718
4719 QualType Ty = FD->getType();
4720 if (Ty->isRecordType())
4721 if (getFieldOffsetInBits(CGF, RD: Ty->getAsRecordDecl(), Field, Offset)) {
4722 Offset += Layout.getFieldOffset(FieldNo);
4723 return true;
4724 }
4725
4726 if (!RD->isUnion())
4727 ++FieldNo;
4728 }
4729
4730 return false;
4731}
4732
4733/// Returns the relative offset difference between \p FD1 and \p FD2.
4734/// \code
4735/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4736/// \endcode
4737/// Both fields must be within the same struct.
4738static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4739 const FieldDecl *FD1,
4740 const FieldDecl *FD2) {
4741 const RecordDecl *FD1OuterRec =
4742 FD1->getParent()->getOuterLexicalRecordContext();
4743 const RecordDecl *FD2OuterRec =
4744 FD2->getParent()->getOuterLexicalRecordContext();
4745
4746 if (FD1OuterRec != FD2OuterRec)
4747 // Fields must be within the same RecordDecl.
4748 return std::optional<int64_t>();
4749
4750 int64_t FD1Offset = 0;
4751 if (!getFieldOffsetInBits(CGF, RD: FD1OuterRec, Field: FD1, Offset&: FD1Offset))
4752 return std::optional<int64_t>();
4753
4754 int64_t FD2Offset = 0;
4755 if (!getFieldOffsetInBits(CGF, RD: FD2OuterRec, Field: FD2, Offset&: FD2Offset))
4756 return std::optional<int64_t>();
4757
4758 return std::make_optional<int64_t>(t: FD1Offset - FD2Offset);
4759}
4760
4761/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4762/// attribute, generate bounds checking code. The "count" field is at the top
4763/// level of the struct or in an anonymous struct, that's also at the top level.
4764/// Future expansions may allow the "count" to reside at any place in the
4765/// struct, but the value of "counted_by" will be a "simple" path to the count,
4766/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4767/// similar to emit the correct GEP.
4768void CodeGenFunction::EmitCountedByBoundsChecking(
4769 const Expr *ArrayExpr, QualType ArrayType, Address ArrayInst,
4770 QualType IndexType, llvm::Value *IndexVal, bool Accessed,
4771 bool FlexibleArray) {
4772 const auto *ME = dyn_cast<MemberExpr>(Val: ArrayExpr->IgnoreImpCasts());
4773 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4774 return;
4775
4776 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4777 getLangOpts().getStrictFlexArraysLevel();
4778 if (FlexibleArray &&
4779 !ME->isFlexibleArrayMemberLike(Context: getContext(), StrictFlexArraysLevel))
4780 return;
4781
4782 const FieldDecl *FD = cast<FieldDecl>(Val: ME->getMemberDecl());
4783 const FieldDecl *CountFD = FD->findCountedByField();
4784 if (!CountFD)
4785 return;
4786
4787 if (std::optional<int64_t> Diff =
4788 getOffsetDifferenceInBits(CGF&: *this, FD1: CountFD, FD2: FD)) {
4789 if (!ArrayInst.isValid()) {
4790 // An invalid Address indicates we're checking a pointer array access.
4791 // Emit the checked L-Value here.
4792 LValue LV = EmitCheckedLValue(E: ArrayExpr, TCK: TCK_MemberAccess);
4793 ArrayInst = LV.getAddress();
4794 }
4795
4796 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4797 // uint64_t, which messes things up if we have a negative offset difference.
4798 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4799
4800 // Create a GEP with the byte offset between the counted object and the
4801 // count and use that to load the count value.
4802 ArrayInst = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr: ArrayInst,
4803 Ty: Int8PtrTy, ElementTy: Int8Ty);
4804
4805 llvm::Type *BoundsType = ConvertType(T: CountFD->getType());
4806 llvm::Value *BoundsVal =
4807 Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: ArrayInst.emitRawPointer(CGF&: *this),
4808 IdxList: Builder.getInt32(C: *Diff), Name: ".counted_by.gep");
4809 BoundsVal = Builder.CreateAlignedLoad(Ty: BoundsType, Addr: BoundsVal, Align: getIntAlign(),
4810 Name: ".counted_by.load");
4811
4812 // Now emit the bounds checking.
4813 EmitBoundsCheckImpl(ArrayExpr, ArrayBaseType: ArrayType, IndexVal, IndexType, BoundsVal,
4814 BoundsType: CountFD->getType(), Accessed);
4815 }
4816}
4817
4818LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4819 bool Accessed) {
4820 // The index must always be an integer, which is not an aggregate. Emit it
4821 // in lexical order (this complexity is, sadly, required by C++17).
4822 llvm::Value *IdxPre =
4823 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E: E->getIdx()) : nullptr;
4824 bool SignedIndices = false;
4825 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4826 auto *Idx = IdxPre;
4827 if (E->getLHS() != E->getIdx()) {
4828 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4829 Idx = EmitScalarExpr(E: E->getIdx());
4830 }
4831
4832 QualType IdxTy = E->getIdx()->getType();
4833 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4834 SignedIndices |= IdxSigned;
4835
4836 if (SanOpts.has(K: SanitizerKind::ArrayBounds))
4837 EmitBoundsCheck(ArrayExpr: E, ArrayExprBase: E->getBase(), IndexVal: Idx, IndexType: IdxTy, Accessed);
4838
4839 // Extend or truncate the index type to 32 or 64-bits.
4840 if (Promote && Idx->getType() != IntPtrTy)
4841 Idx = Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IdxSigned, Name: "idxprom");
4842
4843 return Idx;
4844 };
4845 IdxPre = nullptr;
4846
4847 // If the base is a vector type, then we are forming a vector element lvalue
4848 // with this subscript.
4849 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4850 !isa<ExtVectorElementExpr>(Val: E->getBase())) {
4851 // Emit the vector as an lvalue to get its address.
4852 LValue LHS = EmitLValue(E: E->getBase());
4853 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4854 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4855 return LValue::MakeVectorElt(vecAddress: LHS.getAddress(), Idx, type: E->getBase()->getType(),
4856 BaseInfo: LHS.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
4857 }
4858
4859 // The HLSL runtime handles subscript expressions on global resource arrays
4860 // and objects with HLSL buffer layouts.
4861 if (getLangOpts().HLSL) {
4862 std::optional<LValue> LV;
4863 if (E->getType()->isHLSLResourceRecord() ||
4864 E->getType()->isHLSLResourceRecordArray()) {
4865 LV = CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, CGF&: *this);
4866 } else if (E->getType().getAddressSpace() == LangAS::hlsl_constant) {
4867 LV = CGM.getHLSLRuntime().emitBufferArraySubscriptExpr(E, CGF&: *this,
4868 EmitIdxAfterBase);
4869 }
4870 if (LV.has_value())
4871 return *LV;
4872 }
4873
4874 // All the other cases basically behave like simple offsetting.
4875
4876 // Handle the extvector case we ignored above.
4877 if (isa<ExtVectorElementExpr>(Val: E->getBase())) {
4878 LValue LV = EmitLValue(E: E->getBase());
4879 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4880 Address Addr = EmitExtVectorElementLValue(LV);
4881
4882 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4883 Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: EltType, /*inbounds*/ true,
4884 signedIndices: SignedIndices, loc: E->getExprLoc());
4885 return MakeAddrLValue(Addr, T: EltType, BaseInfo: LV.getBaseInfo(),
4886 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: EltType));
4887 }
4888
4889 LValueBaseInfo EltBaseInfo;
4890 TBAAAccessInfo EltTBAAInfo;
4891 Address Addr = Address::invalid();
4892 if (const VariableArrayType *vla =
4893 getContext().getAsVariableArrayType(T: E->getType())) {
4894 // The base must be a pointer, which is not an aggregate. Emit
4895 // it. It needs to be emitted first in case it's what captures
4896 // the VLA bounds.
4897 Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo);
4898 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4899
4900 // The element count here is the total number of non-VLA elements.
4901 llvm::Value *numElements = getVLASize(vla).NumElts;
4902
4903 // Effectively, the multiply by the VLA size is part of the GEP.
4904 // GEP indexes are signed, and scaling an index isn't permitted to
4905 // signed-overflow, so we use the same semantics for our explicit
4906 // multiply. We suppress this if overflow is not undefined behavior.
4907 if (getLangOpts().PointerOverflowDefined) {
4908 Idx = Builder.CreateMul(LHS: Idx, RHS: numElements);
4909 } else {
4910 Idx = Builder.CreateNSWMul(LHS: Idx, RHS: numElements);
4911 }
4912
4913 Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: vla->getElementType(),
4914 inbounds: !getLangOpts().PointerOverflowDefined,
4915 signedIndices: SignedIndices, loc: E->getExprLoc());
4916
4917 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4918 // Indexing over an interface, as in "NSString *P; P[4];"
4919
4920 // Emit the base pointer.
4921 Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo);
4922 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4923
4924 CharUnits InterfaceSize = getContext().getTypeSizeInChars(T: OIT);
4925 llvm::Value *InterfaceSizeVal =
4926 llvm::ConstantInt::get(Ty: Idx->getType(), V: InterfaceSize.getQuantity());
4927
4928 llvm::Value *ScaledIdx = Builder.CreateMul(LHS: Idx, RHS: InterfaceSizeVal);
4929
4930 // We don't necessarily build correct LLVM struct types for ObjC
4931 // interfaces, so we can't rely on GEP to do this scaling
4932 // correctly, so we need to cast to i8*. FIXME: is this actually
4933 // true? A lot of other things in the fragile ABI would break...
4934 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4935
4936 // Do the GEP.
4937 CharUnits EltAlign =
4938 getArrayElementAlign(arrayAlign: Addr.getAlignment(), idx: Idx, eltSize: InterfaceSize);
4939 llvm::Value *EltPtr =
4940 emitArraySubscriptGEP(CGF&: *this, elemType: Int8Ty, ptr: Addr.emitRawPointer(CGF&: *this),
4941 indices: ScaledIdx, inbounds: false, signedIndices: SignedIndices, loc: E->getExprLoc());
4942 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4943 } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) {
4944 // If this is A[i] where A is an array, the frontend will have decayed the
4945 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4946 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4947 // "gep x, i" here. Emit one "gep A, 0, i".
4948 assert(Array->getType()->isArrayType() &&
4949 "Array to pointer decay must have array source type!");
4950 LValue ArrayLV;
4951 // For simple multidimensional array indexing, set the 'accessed' flag for
4952 // better bounds-checking of the base expression.
4953 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array))
4954 ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true);
4955 else
4956 ArrayLV = EmitLValue(E: Array);
4957 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4958
4959 if (SanOpts.has(K: SanitizerKind::ArrayBounds))
4960 EmitCountedByBoundsChecking(ArrayExpr: Array, ArrayType: Array->getType(), ArrayInst: ArrayLV.getAddress(),
4961 IndexType: E->getIdx()->getType(), IndexVal: Idx, Accessed,
4962 /*FlexibleArray=*/true);
4963
4964 // Propagate the alignment from the array itself to the result.
4965 QualType arrayType = Array->getType();
4966 Addr = emitArraySubscriptGEP(
4967 CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx},
4968 eltType: E->getType(), inbounds: !getLangOpts().PointerOverflowDefined, signedIndices: SignedIndices,
4969 loc: E->getExprLoc(), arrayType: &arrayType, Base: E->getBase());
4970 EltBaseInfo = ArrayLV.getBaseInfo();
4971 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
4972 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
4973 // new struct path TBAA, we must a use a plain access.
4974 EltTBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: E->getType());
4975 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
4976 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4977 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
4978 // The array element is complete, even if the array is not.
4979 EltTBAAInfo = CGM.getTBAAAccessInfo(AccessType: E->getType());
4980 } else {
4981 // The TBAA access info from the array (base) lvalue is ordinary. We will
4982 // adapt it to create access info for the element.
4983 EltTBAAInfo = ArrayLV.getTBAAInfo();
4984
4985 // We retain the TBAA struct path (BaseType and Offset members) from the
4986 // array. In the TBAA representation, we map any array access to the
4987 // element at index 0, as the index is generally a runtime value. This
4988 // element has the same offset in the base type as the array itself.
4989 // If the array lvalue had no base type, there is no point trying to
4990 // generate one, since an array itself is not a valid base type.
4991
4992 // We also retain the access type from the base lvalue, but the access
4993 // size must be updated to the size of an individual element.
4994 EltTBAAInfo.Size =
4995 getContext().getTypeSizeInChars(T: E->getType()).getQuantity();
4996 }
4997 } else {
4998 // The base must be a pointer; emit it with an estimate of its alignment.
4999 Address BaseAddr =
5000 EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo);
5001 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
5002 QualType ptrType = E->getBase()->getType();
5003 Addr = emitArraySubscriptGEP(CGF&: *this, addr: BaseAddr, indices: Idx, eltType: E->getType(),
5004 inbounds: !getLangOpts().PointerOverflowDefined,
5005 signedIndices: SignedIndices, loc: E->getExprLoc(), arrayType: &ptrType,
5006 Base: E->getBase());
5007
5008 if (SanOpts.has(K: SanitizerKind::ArrayBounds)) {
5009 StructFieldAccess Visitor;
5010 const Expr *Base = Visitor.Visit(S: E->getBase());
5011
5012 if (const auto *CE = dyn_cast_if_present<CastExpr>(Val: Base);
5013 CE && CE->getCastKind() == CK_LValueToRValue)
5014 EmitCountedByBoundsChecking(ArrayExpr: CE, ArrayType: ptrType, ArrayInst: Address::invalid(),
5015 IndexType: E->getIdx()->getType(), IndexVal: Idx, Accessed,
5016 /*FlexibleArray=*/false);
5017 }
5018 }
5019
5020 LValue LV = MakeAddrLValue(Addr, T: E->getType(), BaseInfo: EltBaseInfo, TBAAInfo: EltTBAAInfo);
5021
5022 if (getLangOpts().ObjC &&
5023 getLangOpts().getGC() != LangOptions::NonGC) {
5024 LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext()));
5025 setObjCGCLValueClass(Ctx: getContext(), E, LV);
5026 }
5027 return LV;
5028}
5029
5030llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
5031 llvm::Value *Idx = EmitScalarExpr(E);
5032 if (Idx->getType() == IntPtrTy)
5033 return Idx;
5034 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
5035 return Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IsSigned);
5036}
5037
5038LValue CodeGenFunction::EmitMatrixSingleSubscriptExpr(
5039 const MatrixSingleSubscriptExpr *E) {
5040 LValue Base = EmitLValue(E: E->getBase());
5041 llvm::Value *RowIdx = EmitMatrixIndexExpr(E: E->getRowIdx());
5042 return LValue::MakeMatrixRow(
5043 Addr: MaybeConvertMatrixAddress(Addr: Base.getAddress(), CGF&: *this), RowIdx,
5044 MatrixTy: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5045}
5046
5047LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
5048 assert(
5049 !E->isIncomplete() &&
5050 "incomplete matrix subscript expressions should be rejected during Sema");
5051 LValue Base = EmitLValue(E: E->getBase());
5052
5053 // Extend or truncate the index type to 32 or 64-bits if needed.
5054 llvm::Value *RowIdx = EmitMatrixIndexExpr(E: E->getRowIdx());
5055 llvm::Value *ColIdx = EmitMatrixIndexExpr(E: E->getColumnIdx());
5056 llvm::MatrixBuilder MB(Builder);
5057 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
5058 unsigned NumCols = MatrixTy->getNumColumns();
5059 unsigned NumRows = MatrixTy->getNumRows();
5060 bool IsMatrixRowMajor = getLangOpts().getDefaultMatrixMemoryLayout() ==
5061 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
5062 llvm::Value *FinalIdx =
5063 MB.CreateIndex(RowIdx, ColumnIdx: ColIdx, NumRows, NumCols, IsMatrixRowMajor);
5064
5065 return LValue::MakeMatrixElt(
5066 matAddress: MaybeConvertMatrixAddress(Addr: Base.getAddress(), CGF&: *this), Idx: FinalIdx,
5067 type: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5068}
5069
5070static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
5071 LValueBaseInfo &BaseInfo,
5072 TBAAAccessInfo &TBAAInfo,
5073 QualType BaseTy, QualType ElTy,
5074 bool IsLowerBound) {
5075 LValue BaseLVal;
5076 if (auto *ASE = dyn_cast<ArraySectionExpr>(Val: Base->IgnoreParenImpCasts())) {
5077 BaseLVal = CGF.EmitArraySectionExpr(E: ASE, IsLowerBound);
5078 if (BaseTy->isArrayType()) {
5079 Address Addr = BaseLVal.getAddress();
5080 BaseInfo = BaseLVal.getBaseInfo();
5081
5082 // If the array type was an incomplete type, we need to make sure
5083 // the decay ends up being the right type.
5084 llvm::Type *NewTy = CGF.ConvertType(T: BaseTy);
5085 Addr = Addr.withElementType(ElemTy: NewTy);
5086
5087 // Note that VLA pointers are always decayed, so we don't need to do
5088 // anything here.
5089 if (!BaseTy->isVariableArrayType()) {
5090 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
5091 "Expected pointer to array");
5092 Addr = CGF.Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay");
5093 }
5094
5095 return Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: ElTy));
5096 }
5097 LValueBaseInfo TypeBaseInfo;
5098 TBAAAccessInfo TypeTBAAInfo;
5099 CharUnits Align =
5100 CGF.CGM.getNaturalTypeAlignment(T: ElTy, BaseInfo: &TypeBaseInfo, TBAAInfo: &TypeTBAAInfo);
5101 BaseInfo.mergeForCast(Info: TypeBaseInfo);
5102 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(SourceInfo: TBAAInfo, TargetInfo: TypeTBAAInfo);
5103 return Address(CGF.Builder.CreateLoad(Addr: BaseLVal.getAddress()),
5104 CGF.ConvertTypeForMem(T: ElTy), Align);
5105 }
5106 return CGF.EmitPointerWithAlignment(E: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
5107}
5108
5109LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
5110 bool IsLowerBound) {
5111
5112 assert(!E->isOpenACCArraySection() &&
5113 "OpenACC Array section codegen not implemented");
5114
5115 QualType BaseTy = ArraySectionExpr::getBaseOriginalType(Base: E->getBase());
5116 QualType ResultExprTy;
5117 if (auto *AT = getContext().getAsArrayType(T: BaseTy))
5118 ResultExprTy = AT->getElementType();
5119 else
5120 ResultExprTy = BaseTy->getPointeeType();
5121 llvm::Value *Idx = nullptr;
5122 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
5123 // Requesting lower bound or upper bound, but without provided length and
5124 // without ':' symbol for the default length -> length = 1.
5125 // Idx = LowerBound ?: 0;
5126 if (auto *LowerBound = E->getLowerBound()) {
5127 Idx = Builder.CreateIntCast(
5128 V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy,
5129 isSigned: LowerBound->getType()->hasSignedIntegerRepresentation());
5130 } else
5131 Idx = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5132 } else {
5133 // Try to emit length or lower bound as constant. If this is possible, 1
5134 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
5135 // IR (LB + Len) - 1.
5136 auto &C = CGM.getContext();
5137 auto *Length = E->getLength();
5138 llvm::APSInt ConstLength;
5139 if (Length) {
5140 // Idx = LowerBound + Length - 1;
5141 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(Ctx: C)) {
5142 ConstLength = CL->zextOrTrunc(width: PointerWidthInBits);
5143 Length = nullptr;
5144 }
5145 auto *LowerBound = E->getLowerBound();
5146 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
5147 if (LowerBound) {
5148 if (std::optional<llvm::APSInt> LB =
5149 LowerBound->getIntegerConstantExpr(Ctx: C)) {
5150 ConstLowerBound = LB->zextOrTrunc(width: PointerWidthInBits);
5151 LowerBound = nullptr;
5152 }
5153 }
5154 if (!Length)
5155 --ConstLength;
5156 else if (!LowerBound)
5157 --ConstLowerBound;
5158
5159 if (Length || LowerBound) {
5160 auto *LowerBoundVal =
5161 LowerBound
5162 ? Builder.CreateIntCast(
5163 V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy,
5164 isSigned: LowerBound->getType()->hasSignedIntegerRepresentation())
5165 : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLowerBound);
5166 auto *LengthVal =
5167 Length
5168 ? Builder.CreateIntCast(
5169 V: EmitScalarExpr(E: Length), DestTy: IntPtrTy,
5170 isSigned: Length->getType()->hasSignedIntegerRepresentation())
5171 : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength);
5172 Idx = Builder.CreateAdd(LHS: LowerBoundVal, RHS: LengthVal, Name: "lb_add_len",
5173 /*HasNUW=*/false,
5174 HasNSW: !getLangOpts().PointerOverflowDefined);
5175 if (Length && LowerBound) {
5176 Idx = Builder.CreateSub(
5177 LHS: Idx, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "idx_sub_1",
5178 /*HasNUW=*/false, HasNSW: !getLangOpts().PointerOverflowDefined);
5179 }
5180 } else
5181 Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength + ConstLowerBound);
5182 } else {
5183 // Idx = ArraySize - 1;
5184 QualType ArrayTy = BaseTy->isPointerType()
5185 ? E->getBase()->IgnoreParenImpCasts()->getType()
5186 : BaseTy;
5187 if (auto *VAT = C.getAsVariableArrayType(T: ArrayTy)) {
5188 Length = VAT->getSizeExpr();
5189 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(Ctx: C)) {
5190 ConstLength = *L;
5191 Length = nullptr;
5192 }
5193 } else {
5194 auto *CAT = C.getAsConstantArrayType(T: ArrayTy);
5195 assert(CAT && "unexpected type for array initializer");
5196 ConstLength = CAT->getSize();
5197 }
5198 if (Length) {
5199 auto *LengthVal = Builder.CreateIntCast(
5200 V: EmitScalarExpr(E: Length), DestTy: IntPtrTy,
5201 isSigned: Length->getType()->hasSignedIntegerRepresentation());
5202 Idx = Builder.CreateSub(
5203 LHS: LengthVal, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "len_sub_1",
5204 /*HasNUW=*/false, HasNSW: !getLangOpts().PointerOverflowDefined);
5205 } else {
5206 ConstLength = ConstLength.zextOrTrunc(width: PointerWidthInBits);
5207 --ConstLength;
5208 Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength);
5209 }
5210 }
5211 }
5212 assert(Idx);
5213
5214 Address EltPtr = Address::invalid();
5215 LValueBaseInfo BaseInfo;
5216 TBAAAccessInfo TBAAInfo;
5217 if (auto *VLA = getContext().getAsVariableArrayType(T: ResultExprTy)) {
5218 // The base must be a pointer, which is not an aggregate. Emit
5219 // it. It needs to be emitted first in case it's what captures
5220 // the VLA bounds.
5221 Address Base =
5222 emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo,
5223 BaseTy, ElTy: VLA->getElementType(), IsLowerBound);
5224 // The element count here is the total number of non-VLA elements.
5225 llvm::Value *NumElements = getVLASize(vla: VLA).NumElts;
5226
5227 // Effectively, the multiply by the VLA size is part of the GEP.
5228 // GEP indexes are signed, and scaling an index isn't permitted to
5229 // signed-overflow, so we use the same semantics for our explicit
5230 // multiply. We suppress this if overflow is not undefined behavior.
5231 if (getLangOpts().PointerOverflowDefined)
5232 Idx = Builder.CreateMul(LHS: Idx, RHS: NumElements);
5233 else
5234 Idx = Builder.CreateNSWMul(LHS: Idx, RHS: NumElements);
5235 EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: VLA->getElementType(),
5236 inbounds: !getLangOpts().PointerOverflowDefined,
5237 /*signedIndices=*/false, loc: E->getExprLoc());
5238 } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) {
5239 // If this is A[i] where A is an array, the frontend will have decayed the
5240 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5241 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5242 // "gep x, i" here. Emit one "gep A, 0, i".
5243 assert(Array->getType()->isArrayType() &&
5244 "Array to pointer decay must have array source type!");
5245 LValue ArrayLV;
5246 // For simple multidimensional array indexing, set the 'accessed' flag for
5247 // better bounds-checking of the base expression.
5248 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array))
5249 ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true);
5250 else
5251 ArrayLV = EmitLValue(E: Array);
5252
5253 // Propagate the alignment from the array itself to the result.
5254 EltPtr = emitArraySubscriptGEP(
5255 CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx},
5256 eltType: ResultExprTy, inbounds: !getLangOpts().PointerOverflowDefined,
5257 /*signedIndices=*/false, loc: E->getExprLoc());
5258 BaseInfo = ArrayLV.getBaseInfo();
5259 TBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: ResultExprTy);
5260 } else {
5261 Address Base =
5262 emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, BaseTy,
5263 ElTy: ResultExprTy, IsLowerBound);
5264 EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: ResultExprTy,
5265 inbounds: !getLangOpts().PointerOverflowDefined,
5266 /*signedIndices=*/false, loc: E->getExprLoc());
5267 }
5268
5269 return MakeAddrLValue(Addr: EltPtr, T: ResultExprTy, BaseInfo, TBAAInfo);
5270}
5271
5272LValue CodeGenFunction::
5273EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
5274 // Emit the base vector as an l-value.
5275 LValue Base;
5276
5277 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5278 if (E->isArrow()) {
5279 // If it is a pointer to a vector, emit the address and form an lvalue with
5280 // it.
5281 LValueBaseInfo BaseInfo;
5282 TBAAAccessInfo TBAAInfo;
5283 Address Ptr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
5284 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5285 Base = MakeAddrLValue(Addr: Ptr, T: PT->getPointeeType(), BaseInfo, TBAAInfo);
5286 Base.getQuals().removeObjCGCAttr();
5287 } else if (E->getBase()->isGLValue()) {
5288 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5289 // emit the base as an lvalue.
5290 assert(E->getBase()->getType()->isVectorType());
5291 Base = EmitLValue(E: E->getBase());
5292 } else {
5293 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5294 assert(E->getBase()->getType()->isVectorType() &&
5295 "Result must be a vector");
5296 llvm::Value *Vec = EmitScalarExpr(E: E->getBase());
5297
5298 // Store the vector to memory (because LValue wants an address).
5299 Address VecMem = CreateMemTemp(Ty: E->getBase()->getType());
5300 // need to zero extend an hlsl boolean vector to store it back to memory
5301 QualType Ty = E->getBase()->getType();
5302 llvm::Type *LTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Vec->getType());
5303 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5304 Vec = Builder.CreateZExt(V: Vec, DestTy: LTy);
5305 Builder.CreateStore(Val: Vec, Addr: VecMem);
5306 Base = MakeAddrLValue(Addr: VecMem, T: Ty, Source: AlignmentSource::Decl);
5307 }
5308
5309 QualType type =
5310 E->getType().withCVRQualifiers(CVR: Base.getQuals().getCVRQualifiers());
5311
5312 // Encode the element access list into a vector of unsigned indices.
5313 SmallVector<uint32_t, 4> Indices;
5314 E->getEncodedElementAccess(Elts&: Indices);
5315
5316 if (Base.isSimple()) {
5317 llvm::Constant *CV =
5318 llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices);
5319 return LValue::MakeExtVectorElt(Addr: Base.getAddress(), Elts: CV, type,
5320 BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5321 }
5322 if (Base.isMatrixRow()) {
5323 if (auto *RowIdx =
5324 llvm::dyn_cast<llvm::ConstantInt>(Val: Base.getMatrixRowIdx())) {
5325 llvm::SmallVector<llvm::Constant *> MatIndices;
5326 QualType MatTy = Base.getType();
5327 const ConstantMatrixType *MT = MatTy->castAs<ConstantMatrixType>();
5328 unsigned NumCols = MT->getNumColumns();
5329 unsigned NumRows = MT->getNumRows();
5330 MatIndices.reserve(N: NumCols);
5331
5332 unsigned Row = RowIdx->getZExtValue();
5333 for (unsigned C = 0; C < NumCols; ++C) {
5334 unsigned Col = Indices[C];
5335 unsigned Linear = Col * NumRows + Row;
5336 MatIndices.push_back(Elt: llvm::ConstantInt::get(Ty: Int32Ty, V: Linear));
5337 }
5338
5339 llvm::Constant *ConstIdxs = llvm::ConstantVector::get(V: MatIndices);
5340 return LValue::MakeExtVectorElt(Addr: Base.getMatrixAddress(), Elts: ConstIdxs,
5341 type: E->getBase()->getType(),
5342 BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5343 }
5344 llvm::Constant *Cols =
5345 llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices);
5346 // Note: intentionally not using E.getType() so we can reuse isMatrixRow()
5347 // implementations in EmitLoadOfLValue & EmitStoreThroughLValue and don't
5348 // need the LValue to have its own number of rows and columns when the
5349 // type is a vector.
5350 return LValue::MakeMatrixRowSwizzle(
5351 MatAddr: Base.getMatrixAddress(), RowIdx: Base.getMatrixRowIdx(), Cols, MatrixTy: Base.getType(),
5352 BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5353 }
5354
5355 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5356
5357 llvm::Constant *BaseElts = Base.getExtVectorElts();
5358 SmallVector<llvm::Constant *, 4> CElts;
5359
5360 for (unsigned Index : Indices)
5361 CElts.push_back(Elt: BaseElts->getAggregateElement(Elt: Index));
5362 llvm::Constant *CV = llvm::ConstantVector::get(V: CElts);
5363 return LValue::MakeExtVectorElt(Addr: Base.getExtVectorAddress(), Elts: CV, type,
5364 BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
5365}
5366
5367bool CodeGenFunction::isUnderlyingBasePointerConstantNull(const Expr *E) {
5368 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5369 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(Val: UnderlyingBaseExpr))
5370 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5371 return getContext().isSentinelNullExpr(E: UnderlyingBaseExpr);
5372}
5373
5374LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
5375 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME: E)) {
5376 EmitIgnoredExpr(E: E->getBase());
5377 return EmitDeclRefLValue(E: DRE);
5378 }
5379 if (getLangOpts().HLSL &&
5380 E->getType().getAddressSpace() == LangAS::hlsl_constant) {
5381 // We have an HLSL buffer - emit using HLSL's layout rules.
5382 return CGM.getHLSLRuntime().emitBufferMemberExpr(CGF&: *this, E);
5383 }
5384
5385 Expr *BaseExpr = E->getBase();
5386 // Check whether the underlying base pointer is a constant null.
5387 // If so, we do not set inbounds flag for GEP to avoid breaking some
5388 // old-style offsetof idioms.
5389 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5390 !isUnderlyingBasePointerConstantNull(E: BaseExpr);
5391 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5392 LValue BaseLV;
5393 if (E->isArrow()) {
5394 LValueBaseInfo BaseInfo;
5395 TBAAAccessInfo TBAAInfo;
5396 Address Addr = EmitPointerWithAlignment(E: BaseExpr, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
5397 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5398 SanitizerSet SkippedChecks;
5399 bool IsBaseCXXThis = IsWrappedCXXThis(Obj: BaseExpr);
5400 if (IsBaseCXXThis)
5401 SkippedChecks.set(K: SanitizerKind::Alignment, Value: true);
5402 if (IsBaseCXXThis || isa<DeclRefExpr>(Val: BaseExpr))
5403 SkippedChecks.set(K: SanitizerKind::Null, Value: true);
5404 EmitTypeCheck(TCK: TCK_MemberAccess, Loc: E->getExprLoc(), Addr, Type: PtrTy,
5405 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5406 BaseLV = MakeAddrLValue(Addr, T: PtrTy, BaseInfo, TBAAInfo);
5407 } else
5408 BaseLV = EmitCheckedLValue(E: BaseExpr, TCK: TCK_MemberAccess);
5409
5410 NamedDecl *ND = E->getMemberDecl();
5411 if (auto *Field = dyn_cast<FieldDecl>(Val: ND)) {
5412 LValue LV = EmitLValueForField(Base: BaseLV, Field, IsInBounds);
5413 setObjCGCLValueClass(Ctx: getContext(), E, LV);
5414 if (getLangOpts().OpenMP) {
5415 // If the member was explicitly marked as nontemporal, mark it as
5416 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5417 // to children as nontemporal too.
5418 if ((IsWrappedCXXThis(Obj: BaseExpr) &&
5419 CGM.getOpenMPRuntime().isNontemporalDecl(VD: Field)) ||
5420 BaseLV.isNontemporal())
5421 LV.setNontemporal(/*Value=*/true);
5422 }
5423 return LV;
5424 }
5425
5426 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
5427 return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD);
5428
5429 llvm_unreachable("Unhandled member declaration!");
5430}
5431
5432/// Given that we are currently emitting a lambda, emit an l-value for
5433/// one of its members.
5434///
5435LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
5436 llvm::Value *ThisValue) {
5437 bool HasExplicitObjectParameter = false;
5438 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Val: CurCodeDecl);
5439 if (MD) {
5440 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5441 assert(MD->getParent()->isLambda());
5442 assert(MD->getParent() == Field->getParent());
5443 }
5444 LValue LambdaLV;
5445 if (HasExplicitObjectParameter) {
5446 const VarDecl *D = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParamDecl(i: 0);
5447 auto It = LocalDeclMap.find(Val: D);
5448 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5449 Address AddrOfExplicitObject = It->getSecond();
5450 if (D->getType()->isReferenceType())
5451 LambdaLV = EmitLoadOfReferenceLValue(RefAddr: AddrOfExplicitObject, RefTy: D->getType(),
5452 Source: AlignmentSource::Decl);
5453 else
5454 LambdaLV = MakeAddrLValue(Addr: AddrOfExplicitObject,
5455 T: D->getType().getNonReferenceType());
5456
5457 // Make sure we have an lvalue to the lambda itself and not a derived class.
5458 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5459 auto *LambdaTy = cast<CXXRecordDecl>(Val: Field->getParent());
5460 if (ThisTy != LambdaTy) {
5461 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(Val: MD);
5462 Address Base = GetAddressOfBaseClass(
5463 Value: LambdaLV.getAddress(), Derived: ThisTy, PathBegin: BasePathArray.begin(),
5464 PathEnd: BasePathArray.end(), /*NullCheckValue=*/false, Loc: SourceLocation());
5465 CanQualType T = getContext().getCanonicalTagType(TD: LambdaTy);
5466 LambdaLV = MakeAddrLValue(Addr: Base, T);
5467 }
5468 } else {
5469 CanQualType LambdaTagType =
5470 getContext().getCanonicalTagType(TD: Field->getParent());
5471 LambdaLV = MakeNaturalAlignAddrLValue(V: ThisValue, T: LambdaTagType);
5472 }
5473 return EmitLValueForField(Base: LambdaLV, Field);
5474}
5475
5476LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
5477 return EmitLValueForLambdaField(Field, ThisValue: CXXABIThisValue);
5478}
5479
5480/// Get the field index in the debug info. The debug info structure/union
5481/// will ignore the unnamed bitfields.
5482unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
5483 unsigned FieldIndex) {
5484 unsigned I = 0, Skipped = 0;
5485
5486 for (auto *F : Rec->getDefinition()->fields()) {
5487 if (I == FieldIndex)
5488 break;
5489 if (F->isUnnamedBitField())
5490 Skipped++;
5491 I++;
5492 }
5493
5494 return FieldIndex - Skipped;
5495}
5496
5497/// Get the address of a zero-sized field within a record. The resulting
5498/// address doesn't necessarily have the right type.
5499static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
5500 const FieldDecl *Field,
5501 bool IsInBounds) {
5502 CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
5503 BitSize: CGF.getContext().getFieldOffset(FD: Field));
5504 if (Offset.isZero())
5505 return Base;
5506 Base = Base.withElementType(ElemTy: CGF.Int8Ty);
5507 if (!IsInBounds)
5508 return CGF.Builder.CreateConstByteGEP(Addr: Base, Offset);
5509 return CGF.Builder.CreateConstInBoundsByteGEP(Addr: Base, Offset);
5510}
5511
5512/// Drill down to the storage of a field without walking into
5513/// reference types.
5514///
5515/// The resulting address doesn't necessarily have the right type.
5516static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
5517 const FieldDecl *field, bool IsInBounds) {
5518 if (isEmptyFieldForLayout(Context: CGF.getContext(), FD: field))
5519 return emitAddrOfZeroSizeField(CGF, Base: base, Field: field, IsInBounds);
5520
5521 const RecordDecl *rec = field->getParent();
5522
5523 unsigned idx =
5524 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field);
5525
5526 if (!IsInBounds)
5527 return CGF.Builder.CreateConstGEP2_32(Addr: base, Idx0: 0, Idx1: idx, Name: field->getName());
5528
5529 return CGF.Builder.CreateStructGEP(Addr: base, Index: idx, Name: field->getName());
5530}
5531
5532static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
5533 Address addr, const FieldDecl *field) {
5534 const RecordDecl *rec = field->getParent();
5535 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5536 Ty: base.getType(), Loc: rec->getLocation());
5537
5538 unsigned idx =
5539 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field);
5540
5541 return CGF.Builder.CreatePreserveStructAccessIndex(
5542 Addr: addr, Index: idx, FieldIndex: CGF.getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo);
5543}
5544
5545static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5546 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5547 if (!RD)
5548 return false;
5549
5550 if (RD->isDynamicClass())
5551 return true;
5552
5553 for (const auto &Base : RD->bases())
5554 if (hasAnyVptr(Type: Base.getType(), Context))
5555 return true;
5556
5557 for (const FieldDecl *Field : RD->fields())
5558 if (hasAnyVptr(Type: Field->getType(), Context))
5559 return true;
5560
5561 return false;
5562}
5563
5564LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field,
5565 bool IsInBounds) {
5566 LValueBaseInfo BaseInfo = base.getBaseInfo();
5567
5568 if (field->isBitField()) {
5569 const CGRecordLayout &RL =
5570 CGM.getTypes().getCGRecordLayout(field->getParent());
5571 const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: field);
5572 const bool UseVolatile = isAAPCS(TargetInfo: CGM.getTarget()) &&
5573 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5574 Info.VolatileStorageSize != 0 &&
5575 field->getType()
5576 .withCVRQualifiers(CVR: base.getVRQualifiers())
5577 .isVolatileQualified();
5578 Address Addr = base.getAddress();
5579 unsigned Idx = RL.getLLVMFieldNo(FD: field);
5580 const RecordDecl *rec = field->getParent();
5581 if (hasBPFPreserveStaticOffset(D: rec))
5582 Addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr);
5583 if (!UseVolatile) {
5584 if (!IsInPreservedAIRegion &&
5585 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5586 if (Idx != 0) {
5587 // For structs, we GEP to the field that the record layout suggests.
5588 if (!IsInBounds)
5589 Addr = Builder.CreateConstGEP2_32(Addr, Idx0: 0, Idx1: Idx, Name: field->getName());
5590 else
5591 Addr = Builder.CreateStructGEP(Addr, Index: Idx, Name: field->getName());
5592 }
5593 } else {
5594 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5595 Ty: getContext().getCanonicalTagType(TD: rec), L: rec->getLocation());
5596 Addr = Builder.CreatePreserveStructAccessIndex(
5597 Addr, Index: Idx, FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()),
5598 DbgInfo);
5599 }
5600 }
5601 const unsigned SS =
5602 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5603 // Get the access type.
5604 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), N: SS);
5605 Addr = Addr.withElementType(ElemTy: FieldIntTy);
5606 if (UseVolatile) {
5607 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5608 if (VolatileOffset)
5609 Addr = Builder.CreateConstInBoundsGEP(Addr, Index: VolatileOffset);
5610 }
5611
5612 QualType fieldType =
5613 field->getType().withCVRQualifiers(CVR: base.getVRQualifiers());
5614 // TODO: Support TBAA for bit fields.
5615 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5616 return LValue::MakeBitfield(Addr, Info, type: fieldType, BaseInfo: FieldBaseInfo,
5617 TBAAInfo: TBAAAccessInfo());
5618 }
5619
5620 // Fields of may-alias structures are may-alias themselves.
5621 // FIXME: this should get propagated down through anonymous structs
5622 // and unions.
5623 QualType FieldType = field->getType();
5624 const RecordDecl *rec = field->getParent();
5625 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5626 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: BaseAlignSource));
5627 TBAAAccessInfo FieldTBAAInfo;
5628 if (base.getTBAAInfo().isMayAlias() ||
5629 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5630 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5631 } else if (rec->isUnion()) {
5632 // TODO: Support TBAA for unions.
5633 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5634 } else {
5635 // If no base type been assigned for the base access, then try to generate
5636 // one for this base lvalue.
5637 FieldTBAAInfo = base.getTBAAInfo();
5638 if (!FieldTBAAInfo.BaseType) {
5639 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(QTy: base.getType());
5640 assert(!FieldTBAAInfo.Offset &&
5641 "Nonzero offset for an access with no base type!");
5642 }
5643
5644 // Adjust offset to be relative to the base type.
5645 const ASTRecordLayout &Layout =
5646 getContext().getASTRecordLayout(D: field->getParent());
5647 unsigned CharWidth = getContext().getCharWidth();
5648 if (FieldTBAAInfo.BaseType)
5649 FieldTBAAInfo.Offset +=
5650 Layout.getFieldOffset(FieldNo: field->getFieldIndex()) / CharWidth;
5651
5652 // Update the final access type and size.
5653 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(QTy: FieldType);
5654 FieldTBAAInfo.Size =
5655 getContext().getTypeSizeInChars(T: FieldType).getQuantity();
5656 }
5657
5658 Address addr = base.getAddress();
5659 if (hasBPFPreserveStaticOffset(D: rec))
5660 addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr&: addr);
5661 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(Val: rec)) {
5662 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5663 ClassDef->isDynamicClass()) {
5664 // Getting to any field of dynamic object requires stripping dynamic
5665 // information provided by invariant.group. This is because accessing
5666 // fields may leak the real address of dynamic object, which could result
5667 // in miscompilation when leaked pointer would be compared.
5668 auto *stripped =
5669 Builder.CreateStripInvariantGroup(Ptr: addr.emitRawPointer(CGF&: *this));
5670 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5671 }
5672 }
5673
5674 unsigned RecordCVR = base.getVRQualifiers();
5675 if (rec->isUnion()) {
5676 // For unions, there is no pointer adjustment.
5677 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5678 hasAnyVptr(Type: FieldType, Context: getContext()))
5679 // Because unions can easily skip invariant.barriers, we need to add
5680 // a barrier every time CXXRecord field with vptr is referenced.
5681 addr = Builder.CreateLaunderInvariantGroup(Addr: addr);
5682
5683 if (IsInPreservedAIRegion ||
5684 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5685 // Remember the original union field index
5686 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(Ty: base.getType(),
5687 Loc: rec->getLocation());
5688 addr =
5689 Address(Builder.CreatePreserveUnionAccessIndex(
5690 Base: addr.emitRawPointer(CGF&: *this),
5691 FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo),
5692 addr.getElementType(), addr.getAlignment());
5693 }
5694
5695 if (FieldType->isReferenceType())
5696 addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType));
5697 } else {
5698 if (!IsInPreservedAIRegion &&
5699 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5700 // For structs, we GEP to the field that the record layout suggests.
5701 addr = emitAddrOfFieldStorage(CGF&: *this, base: addr, field, IsInBounds);
5702 else
5703 // Remember the original struct field index
5704 addr = emitPreserveStructAccess(CGF&: *this, base, addr, field);
5705 }
5706
5707 // If this is a reference field, load the reference right now.
5708 if (FieldType->isReferenceType()) {
5709 LValue RefLVal =
5710 MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo);
5711 if (RecordCVR & Qualifiers::Volatile)
5712 RefLVal.getQuals().addVolatile();
5713 addr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &FieldBaseInfo, PointeeTBAAInfo: &FieldTBAAInfo);
5714
5715 // Qualifiers on the struct don't apply to the referencee.
5716 RecordCVR = 0;
5717 FieldType = FieldType->getPointeeType();
5718 }
5719
5720 // Make sure that the address is pointing to the right type. This is critical
5721 // for both unions and structs.
5722 addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType));
5723
5724 if (field->hasAttr<AnnotateAttr>())
5725 addr = EmitFieldAnnotations(D: field, V: addr);
5726
5727 LValue LV = MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo);
5728 LV.getQuals().addCVRQualifiers(mask: RecordCVR);
5729
5730 // __weak attribute on a field is ignored.
5731 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
5732 LV.getQuals().removeObjCGCAttr();
5733
5734 return LV;
5735}
5736
5737LValue
5738CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
5739 const FieldDecl *Field) {
5740 QualType FieldType = Field->getType();
5741
5742 if (!FieldType->isReferenceType())
5743 return EmitLValueForField(base: Base, field: Field);
5744
5745 Address V = emitAddrOfFieldStorage(
5746 CGF&: *this, base: Base.getAddress(), field: Field,
5747 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5748
5749 // Make sure that the address is pointing to the right type.
5750 llvm::Type *llvmType = ConvertTypeForMem(T: FieldType);
5751 V = V.withElementType(ElemTy: llvmType);
5752
5753 // TODO: Generate TBAA information that describes this access as a structure
5754 // member access and not just an access to an object of the field's type. This
5755 // should be similar to what we do in EmitLValueForField().
5756 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5757 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5758 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: FieldAlignSource));
5759 return MakeAddrLValue(Addr: V, T: FieldType, BaseInfo: FieldBaseInfo,
5760 TBAAInfo: CGM.getTBAAInfoForSubobject(Base, AccessType: FieldType));
5761}
5762
5763LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
5764 if (E->isFileScope()) {
5765 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5766 return MakeAddrLValue(Addr: GlobalPtr, T: E->getType(), Source: AlignmentSource::Decl);
5767 }
5768 if (E->getType()->isVariablyModifiedType())
5769 // make sure to emit the VLA size.
5770 EmitVariablyModifiedType(Ty: E->getType());
5771
5772 Address DeclPtr = CreateMemTemp(Ty: E->getType(), Name: ".compoundliteral");
5773 const Expr *InitExpr = E->getInitializer();
5774 LValue Result = MakeAddrLValue(Addr: DeclPtr, T: E->getType(), Source: AlignmentSource::Decl);
5775
5776 EmitAnyExprToMem(E: InitExpr, Location: DeclPtr, Quals: E->getType().getQualifiers(),
5777 /*Init*/ IsInit: true);
5778
5779 // Block-scope compound literals are destroyed at the end of the enclosing
5780 // scope in C.
5781 if (!getLangOpts().CPlusPlus)
5782 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
5783 pushLifetimeExtendedDestroy(kind: getCleanupKind(kind: DtorKind), addr: DeclPtr,
5784 type: E->getType(), destroyer: getDestroyer(destructionKind: DtorKind),
5785 useEHCleanupForArray: DtorKind & EHCleanup);
5786
5787 return Result;
5788}
5789
5790LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
5791 if (!E->isGLValue())
5792 // Initializing an aggregate temporary in C++11: T{...}.
5793 return EmitAggExprToLValue(E);
5794
5795 // An lvalue initializer list must be initializing a reference.
5796 assert(E->isTransparent() && "non-transparent glvalue init list");
5797 return EmitLValue(E: E->getInit(Init: 0));
5798}
5799
5800/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5801/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5802/// LValue is returned and the current block has been terminated.
5803static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5804 const Expr *Operand) {
5805 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Operand->IgnoreParens())) {
5806 CGF.EmitCXXThrowExpr(E: ThrowExpr, /*KeepInsertionPoint*/false);
5807 return std::nullopt;
5808 }
5809
5810 return CGF.EmitLValue(E: Operand);
5811}
5812
5813namespace {
5814// Handle the case where the condition is a constant evaluatable simple integer,
5815// which means we don't have to separately handle the true/false blocks.
5816std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5817 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5818 const Expr *condExpr = E->getCond();
5819 bool CondExprBool;
5820 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5821 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5822 if (!CondExprBool)
5823 std::swap(a&: Live, b&: Dead);
5824
5825 if (!CGF.ContainsLabel(S: Dead)) {
5826 // If the true case is live, we need to track its region.
5827 CGF.incrementProfileCounter(ExecSkip: CondExprBool ? CGF.UseExecPath
5828 : CGF.UseSkipPath,
5829 S: E, /*UseBoth=*/true);
5830 CGF.markStmtMaybeUsed(S: Dead);
5831 // If a throw expression we emit it and return an undefined lvalue
5832 // because it can't be used.
5833 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Live->IgnoreParens())) {
5834 CGF.EmitCXXThrowExpr(E: ThrowExpr);
5835 llvm::Type *ElemTy = CGF.ConvertType(T: Dead->getType());
5836 llvm::Type *Ty = CGF.DefaultPtrTy;
5837 return CGF.MakeAddrLValue(
5838 Addr: Address(llvm::UndefValue::get(T: Ty), ElemTy, CharUnits::One()),
5839 T: Dead->getType());
5840 }
5841 return CGF.EmitLValue(E: Live);
5842 }
5843 }
5844 return std::nullopt;
5845}
5846struct ConditionalInfo {
5847 llvm::BasicBlock *lhsBlock, *rhsBlock;
5848 std::optional<LValue> LHS, RHS;
5849};
5850
5851// Create and generate the 3 blocks for a conditional operator.
5852// Leaves the 'current block' in the continuation basic block.
5853template<typename FuncTy>
5854ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5855 const AbstractConditionalOperator *E,
5856 const FuncTy &BranchGenFunc) {
5857 ConditionalInfo Info{.lhsBlock: CGF.createBasicBlock(name: "cond.true"),
5858 .rhsBlock: CGF.createBasicBlock(name: "cond.false"), .LHS: std::nullopt,
5859 .RHS: std::nullopt};
5860 llvm::BasicBlock *endBlock = CGF.createBasicBlock(name: "cond.end");
5861
5862 CodeGenFunction::ConditionalEvaluation eval(CGF);
5863 CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: Info.lhsBlock, FalseBlock: Info.rhsBlock,
5864 TrueCount: CGF.getProfileCount(S: E));
5865
5866 // Any temporaries created here are conditional.
5867 CGF.EmitBlock(BB: Info.lhsBlock);
5868 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E);
5869 eval.begin(CGF);
5870 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5871 eval.end(CGF);
5872 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5873
5874 if (Info.LHS)
5875 CGF.Builder.CreateBr(Dest: endBlock);
5876
5877 // Any temporaries created here are conditional.
5878 CGF.EmitBlock(BB: Info.rhsBlock);
5879 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
5880 eval.begin(CGF);
5881 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5882 eval.end(CGF);
5883 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5884 CGF.EmitBlock(BB: endBlock);
5885
5886 return Info;
5887}
5888} // namespace
5889
5890void CodeGenFunction::EmitIgnoredConditionalOperator(
5891 const AbstractConditionalOperator *E) {
5892 if (!E->isGLValue()) {
5893 // ?: here should be an aggregate.
5894 assert(hasAggregateEvaluationKind(E->getType()) &&
5895 "Unexpected conditional operator!");
5896 return (void)EmitAggExprToLValue(E);
5897 }
5898
5899 OpaqueValueMapping binding(*this, E);
5900 if (HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E))
5901 return;
5902
5903 EmitConditionalBlocks(CGF&: *this, E, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) {
5904 CGF.EmitIgnoredExpr(E);
5905 return LValue{};
5906 });
5907}
5908LValue CodeGenFunction::EmitConditionalOperatorLValue(
5909 const AbstractConditionalOperator *expr) {
5910 if (!expr->isGLValue()) {
5911 // ?: here should be an aggregate.
5912 assert(hasAggregateEvaluationKind(expr->getType()) &&
5913 "Unexpected conditional operator!");
5914 return EmitAggExprToLValue(E: expr);
5915 }
5916
5917 OpaqueValueMapping binding(*this, expr);
5918 if (std::optional<LValue> Res =
5919 HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E: expr))
5920 return *Res;
5921
5922 ConditionalInfo Info = EmitConditionalBlocks(
5923 CGF&: *this, E: expr, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) {
5924 return EmitLValueOrThrowExpression(CGF, Operand: E);
5925 });
5926
5927 if ((Info.LHS && !Info.LHS->isSimple()) ||
5928 (Info.RHS && !Info.RHS->isSimple()))
5929 return EmitUnsupportedLValue(E: expr, Name: "conditional operator");
5930
5931 if (Info.LHS && Info.RHS) {
5932 Address lhsAddr = Info.LHS->getAddress();
5933 Address rhsAddr = Info.RHS->getAddress();
5934 Address result = mergeAddressesInConditionalExpr(
5935 LHS: lhsAddr, RHS: rhsAddr, LHSBlock: Info.lhsBlock, RHSBlock: Info.rhsBlock,
5936 MergeBlock: Builder.GetInsertBlock(), MergedType: expr->getType());
5937 AlignmentSource alignSource =
5938 std::max(a: Info.LHS->getBaseInfo().getAlignmentSource(),
5939 b: Info.RHS->getBaseInfo().getAlignmentSource());
5940 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5941 InfoA: Info.LHS->getTBAAInfo(), InfoB: Info.RHS->getTBAAInfo());
5942 return MakeAddrLValue(Addr: result, T: expr->getType(), BaseInfo: LValueBaseInfo(alignSource),
5943 TBAAInfo);
5944 } else {
5945 assert((Info.LHS || Info.RHS) &&
5946 "both operands of glvalue conditional are throw-expressions?");
5947 return Info.LHS ? *Info.LHS : *Info.RHS;
5948 }
5949}
5950
5951/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5952/// type. If the cast is to a reference, we can have the usual lvalue result,
5953/// otherwise if a cast is needed by the code generator in an lvalue context,
5954/// then it must mean that we need the address of an aggregate in order to
5955/// access one of its members. This can happen for all the reasons that casts
5956/// are permitted with aggregate result, including noop aggregate casts, and
5957/// cast from scalar to union.
5958LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
5959 llvm::scope_exit RestoreCurCast([this, Prev = CurCast] { CurCast = Prev; });
5960 CurCast = E;
5961 switch (E->getCastKind()) {
5962 case CK_ToVoid:
5963 case CK_BitCast:
5964 case CK_LValueToRValueBitCast:
5965 case CK_ArrayToPointerDecay:
5966 case CK_FunctionToPointerDecay:
5967 case CK_NullToMemberPointer:
5968 case CK_NullToPointer:
5969 case CK_IntegralToPointer:
5970 case CK_PointerToIntegral:
5971 case CK_PointerToBoolean:
5972 case CK_IntegralCast:
5973 case CK_BooleanToSignedIntegral:
5974 case CK_IntegralToBoolean:
5975 case CK_IntegralToFloating:
5976 case CK_FloatingToIntegral:
5977 case CK_FloatingToBoolean:
5978 case CK_FloatingCast:
5979 case CK_FloatingRealToComplex:
5980 case CK_FloatingComplexToReal:
5981 case CK_FloatingComplexToBoolean:
5982 case CK_FloatingComplexCast:
5983 case CK_FloatingComplexToIntegralComplex:
5984 case CK_IntegralRealToComplex:
5985 case CK_IntegralComplexToReal:
5986 case CK_IntegralComplexToBoolean:
5987 case CK_IntegralComplexCast:
5988 case CK_IntegralComplexToFloatingComplex:
5989 case CK_DerivedToBaseMemberPointer:
5990 case CK_BaseToDerivedMemberPointer:
5991 case CK_MemberPointerToBoolean:
5992 case CK_ReinterpretMemberPointer:
5993 case CK_AnyPointerToBlockPointerCast:
5994 case CK_ARCProduceObject:
5995 case CK_ARCConsumeObject:
5996 case CK_ARCReclaimReturnedObject:
5997 case CK_ARCExtendBlockObject:
5998 case CK_CopyAndAutoreleaseBlockObject:
5999 case CK_IntToOCLSampler:
6000 case CK_FloatingToFixedPoint:
6001 case CK_FixedPointToFloating:
6002 case CK_FixedPointCast:
6003 case CK_FixedPointToBoolean:
6004 case CK_FixedPointToIntegral:
6005 case CK_IntegralToFixedPoint:
6006 case CK_MatrixCast:
6007 case CK_HLSLVectorTruncation:
6008 case CK_HLSLMatrixTruncation:
6009 case CK_HLSLArrayRValue:
6010 case CK_HLSLElementwiseCast:
6011 case CK_HLSLAggregateSplatCast:
6012 return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue");
6013
6014 case CK_Dependent:
6015 llvm_unreachable("dependent cast kind in IR gen!");
6016
6017 case CK_BuiltinFnToFnPtr:
6018 llvm_unreachable("builtin functions are handled elsewhere");
6019
6020 // These are never l-values; just use the aggregate emission code.
6021 case CK_NonAtomicToAtomic:
6022 case CK_AtomicToNonAtomic:
6023 return EmitAggExprToLValue(E);
6024
6025 case CK_Dynamic: {
6026 LValue LV = EmitLValue(E: E->getSubExpr());
6027 Address V = LV.getAddress();
6028 const auto *DCE = cast<CXXDynamicCastExpr>(Val: E);
6029 return MakeNaturalAlignRawAddrLValue(V: EmitDynamicCast(V, DCE), T: E->getType());
6030 }
6031
6032 case CK_ConstructorConversion:
6033 case CK_UserDefinedConversion:
6034 case CK_CPointerToObjCPointerCast:
6035 case CK_BlockPointerToObjCPointerCast:
6036 case CK_LValueToRValue:
6037 return EmitLValue(E: E->getSubExpr());
6038
6039 case CK_NoOp: {
6040 // CK_NoOp can model a qualification conversion, which can remove an array
6041 // bound and change the IR type.
6042 // FIXME: Once pointee types are removed from IR, remove this.
6043 LValue LV = EmitLValue(E: E->getSubExpr());
6044 // Propagate the volatile qualifer to LValue, if exist in E.
6045 if (E->changesVolatileQualification())
6046 LV.getQuals() = E->getType().getQualifiers();
6047 if (LV.isSimple()) {
6048 Address V = LV.getAddress();
6049 if (V.isValid()) {
6050 llvm::Type *T = ConvertTypeForMem(T: E->getType());
6051 if (V.getElementType() != T)
6052 LV.setAddress(V.withElementType(ElemTy: T));
6053 }
6054 }
6055 return LV;
6056 }
6057
6058 case CK_UncheckedDerivedToBase:
6059 case CK_DerivedToBase: {
6060 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
6061 LValue LV = EmitLValue(E: E->getSubExpr());
6062 Address This = LV.getAddress();
6063
6064 // Perform the derived-to-base conversion
6065 Address Base = GetAddressOfBaseClass(
6066 Value: This, Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(),
6067 /*NullCheckValue=*/false, Loc: E->getExprLoc());
6068
6069 // TODO: Support accesses to members of base classes in TBAA. For now, we
6070 // conservatively pretend that the complete object is of the base class
6071 // type.
6072 return MakeAddrLValue(Addr: Base, T: E->getType(), BaseInfo: LV.getBaseInfo(),
6073 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType()));
6074 }
6075 case CK_ToUnion:
6076 return EmitAggExprToLValue(E);
6077 case CK_BaseToDerived: {
6078 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
6079 LValue LV = EmitLValue(E: E->getSubExpr());
6080
6081 // Perform the base-to-derived conversion
6082 Address Derived = GetAddressOfDerivedClass(
6083 Value: LV.getAddress(), Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(),
6084 /*NullCheckValue=*/false);
6085
6086 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
6087 // performed and the object is not of the derived type.
6088 if (sanitizePerformTypeCheck())
6089 EmitTypeCheck(TCK: TCK_DowncastReference, Loc: E->getExprLoc(), Addr: Derived,
6090 Type: E->getType());
6091
6092 if (SanOpts.has(K: SanitizerKind::CFIDerivedCast))
6093 EmitVTablePtrCheckForCast(T: E->getType(), Derived,
6094 /*MayBeNull=*/false, TCK: CFITCK_DerivedCast,
6095 Loc: E->getBeginLoc());
6096
6097 return MakeAddrLValue(Addr: Derived, T: E->getType(), BaseInfo: LV.getBaseInfo(),
6098 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType()));
6099 }
6100 case CK_LValueBitCast: {
6101 // This must be a reinterpret_cast (or c-style equivalent).
6102 const auto *CE = cast<ExplicitCastExpr>(Val: E);
6103
6104 CGM.EmitExplicitCastExprType(E: CE, CGF: this);
6105 LValue LV = EmitLValue(E: E->getSubExpr());
6106 Address V = LV.getAddress().withElementType(
6107 ElemTy: ConvertTypeForMem(T: CE->getTypeAsWritten()->getPointeeType()));
6108
6109 if (SanOpts.has(K: SanitizerKind::CFIUnrelatedCast))
6110 EmitVTablePtrCheckForCast(T: E->getType(), Derived: V,
6111 /*MayBeNull=*/false, TCK: CFITCK_UnrelatedCast,
6112 Loc: E->getBeginLoc());
6113
6114 return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(),
6115 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType()));
6116 }
6117 case CK_AddressSpaceConversion: {
6118 LValue LV = EmitLValue(E: E->getSubExpr());
6119 QualType DestTy = getContext().getPointerType(T: E->getType());
6120 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
6121 CGF&: *this, V: LV.getPointer(CGF&: *this),
6122 SrcAddr: E->getSubExpr()->getType().getAddressSpace(), DestTy: ConvertType(T: DestTy));
6123 return MakeAddrLValue(Addr: Address(V, ConvertTypeForMem(T: E->getType()),
6124 LV.getAddress().getAlignment()),
6125 T: E->getType(), BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo());
6126 }
6127 case CK_ObjCObjectLValueCast: {
6128 LValue LV = EmitLValue(E: E->getSubExpr());
6129 Address V = LV.getAddress().withElementType(ElemTy: ConvertType(T: E->getType()));
6130 return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(),
6131 TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType()));
6132 }
6133 case CK_ZeroToOCLOpaqueType:
6134 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
6135
6136 case CK_VectorSplat: {
6137 // LValue results of vector splats are only supported in HLSL.
6138 if (!getLangOpts().HLSL)
6139 return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue");
6140 return EmitLValue(E: E->getSubExpr());
6141 }
6142 }
6143
6144 llvm_unreachable("Unhandled lvalue cast kind?");
6145}
6146
6147LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
6148 assert(OpaqueValueMappingData::shouldBindAsLValue(e));
6149 return getOrCreateOpaqueLValueMapping(e);
6150}
6151
6152std::pair<LValue, LValue>
6153CodeGenFunction::EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty) {
6154 // Emitting the casted temporary through an opaque value.
6155 LValue BaseLV = EmitLValue(E: E->getArgLValue());
6156 OpaqueValueMappingData::bind(CGF&: *this, ov: E->getOpaqueArgLValue(), lv: BaseLV);
6157
6158 QualType ExprTy = E->getType();
6159 Address OutTemp = CreateIRTemp(Ty: ExprTy);
6160 LValue TempLV = MakeAddrLValue(Addr: OutTemp, T: ExprTy);
6161
6162 if (E->isInOut())
6163 EmitInitializationToLValue(E: E->getCastedTemporary()->getSourceExpr(),
6164 LV: TempLV);
6165
6166 OpaqueValueMappingData::bind(CGF&: *this, ov: E->getCastedTemporary(), lv: TempLV);
6167 return std::make_pair(x&: BaseLV, y&: TempLV);
6168}
6169
6170LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
6171 CallArgList &Args, QualType Ty) {
6172
6173 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
6174
6175 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
6176 llvm::Type *ElTy = ConvertTypeForMem(T: TempLV.getType());
6177
6178 EmitLifetimeStart(Addr);
6179
6180 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
6181 Args.addWriteback(srcLV: BaseLV, temporary: TmpAddr, toUse: nullptr, writebackExpr: E->getWritebackCast());
6182 Args.add(rvalue: RValue::get(Addr: TmpAddr, CGF&: *this), type: Ty);
6183 return TempLV;
6184}
6185
6186LValue
6187CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
6188 assert(OpaqueValueMapping::shouldBindAsLValue(e));
6189
6190 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
6191 it = OpaqueLValues.find(Val: e);
6192
6193 if (it != OpaqueLValues.end())
6194 return it->second;
6195
6196 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
6197 return EmitLValue(E: e->getSourceExpr());
6198}
6199
6200RValue
6201CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
6202 assert(!OpaqueValueMapping::shouldBindAsLValue(e));
6203
6204 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
6205 it = OpaqueRValues.find(Val: e);
6206
6207 if (it != OpaqueRValues.end())
6208 return it->second;
6209
6210 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
6211 return EmitAnyExpr(E: e->getSourceExpr());
6212}
6213
6214bool CodeGenFunction::isOpaqueValueEmitted(const OpaqueValueExpr *E) {
6215 if (OpaqueValueMapping::shouldBindAsLValue(expr: E))
6216 return OpaqueLValues.contains(Val: E);
6217 return OpaqueRValues.contains(Val: E);
6218}
6219
6220RValue CodeGenFunction::EmitRValueForField(LValue LV,
6221 const FieldDecl *FD,
6222 SourceLocation Loc) {
6223 QualType FT = FD->getType();
6224 LValue FieldLV = EmitLValueForField(base: LV, field: FD);
6225 switch (getEvaluationKind(T: FT)) {
6226 case TEK_Complex:
6227 return RValue::getComplex(C: EmitLoadOfComplex(src: FieldLV, loc: Loc));
6228 case TEK_Aggregate:
6229 return FieldLV.asAggregateRValue();
6230 case TEK_Scalar:
6231 // This routine is used to load fields one-by-one to perform a copy, so
6232 // don't load reference fields.
6233 if (FD->getType()->isReferenceType())
6234 return RValue::get(V: FieldLV.getPointer(CGF&: *this));
6235 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
6236 // primitive load.
6237 if (FieldLV.isBitField())
6238 return EmitLoadOfLValue(LV: FieldLV, Loc);
6239 return RValue::get(V: EmitLoadOfScalar(lvalue: FieldLV, Loc));
6240 }
6241 llvm_unreachable("bad evaluation kind");
6242}
6243
6244//===--------------------------------------------------------------------===//
6245// Expression Emission
6246//===--------------------------------------------------------------------===//
6247
6248RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
6249 ReturnValueSlot ReturnValue,
6250 llvm::CallBase **CallOrInvoke) {
6251 llvm::CallBase *CallOrInvokeStorage;
6252 if (!CallOrInvoke) {
6253 CallOrInvoke = &CallOrInvokeStorage;
6254 }
6255
6256 llvm::scope_exit AddCoroElideSafeOnExit([&] {
6257 if (E->isCoroElideSafe()) {
6258 auto *I = *CallOrInvoke;
6259 if (I)
6260 I->addFnAttr(Kind: llvm::Attribute::CoroElideSafe);
6261 }
6262 });
6263
6264 // Builtins never have block type.
6265 if (E->getCallee()->getType()->isBlockPointerType())
6266 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
6267
6268 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: E))
6269 return EmitCXXMemberCallExpr(E: CE, ReturnValue, CallOrInvoke);
6270
6271 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(Val: E))
6272 return EmitCUDAKernelCallExpr(E: CE, ReturnValue, CallOrInvoke);
6273
6274 // A CXXOperatorCallExpr is created even for explicit object methods, but
6275 // these should be treated like static function call.
6276 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(Val: E))
6277 if (const auto *MD =
6278 dyn_cast_if_present<CXXMethodDecl>(Val: CE->getCalleeDecl());
6279 MD && MD->isImplicitObjectMemberFunction())
6280 return EmitCXXOperatorMemberCallExpr(E: CE, MD, ReturnValue, CallOrInvoke);
6281
6282 CGCallee callee = EmitCallee(E: E->getCallee());
6283
6284 if (callee.isBuiltin()) {
6285 return EmitBuiltinExpr(GD: callee.getBuiltinDecl(), BuiltinID: callee.getBuiltinID(),
6286 E, ReturnValue);
6287 }
6288
6289 if (callee.isPseudoDestructor()) {
6290 return EmitCXXPseudoDestructorExpr(E: callee.getPseudoDestructorExpr());
6291 }
6292
6293 return EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue,
6294 /*Chain=*/nullptr, CallOrInvoke);
6295}
6296
6297/// Emit a CallExpr without considering whether it might be a subclass.
6298RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
6299 ReturnValueSlot ReturnValue,
6300 llvm::CallBase **CallOrInvoke) {
6301 CGCallee Callee = EmitCallee(E: E->getCallee());
6302 return EmitCall(FnType: E->getCallee()->getType(), Callee, E, ReturnValue,
6303 /*Chain=*/nullptr, CallOrInvoke);
6304}
6305
6306// Detect the unusual situation where an inline version is shadowed by a
6307// non-inline version. In that case we should pick the external one
6308// everywhere. That's GCC behavior too.
6309static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
6310 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
6311 if (!PD->isInlineBuiltinDeclaration())
6312 return false;
6313 return true;
6314}
6315
6316static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
6317 const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl());
6318
6319 if (auto builtinID = FD->getBuiltinID()) {
6320 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6321 std::string NoBuiltins = "no-builtins";
6322
6323 StringRef Ident = CGF.CGM.getMangledName(GD);
6324 std::string FDInlineName = (Ident + ".inline").str();
6325
6326 bool IsPredefinedLibFunction =
6327 CGF.getContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID);
6328 bool HasAttributeNoBuiltin =
6329 CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltinFD) ||
6330 CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltins);
6331
6332 // When directing calling an inline builtin, call it through it's mangled
6333 // name to make it clear it's not the actual builtin.
6334 if (CGF.CurFn->getName() != FDInlineName &&
6335 OnlyHasInlineBuiltinDeclaration(FD)) {
6336 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6337 llvm::Function *Fn = llvm::cast<llvm::Function>(Val: CalleePtr);
6338 llvm::Module *M = Fn->getParent();
6339 llvm::Function *Clone = M->getFunction(Name: FDInlineName);
6340 if (!Clone) {
6341 Clone = llvm::Function::Create(Ty: Fn->getFunctionType(),
6342 Linkage: llvm::GlobalValue::InternalLinkage,
6343 AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M);
6344 Clone->addFnAttr(Kind: llvm::Attribute::AlwaysInline);
6345 }
6346 return CGCallee::forDirect(functionPtr: Clone, abstractInfo: GD);
6347 }
6348
6349 // Replaceable builtins provide their own implementation of a builtin. If we
6350 // are in an inline builtin implementation, avoid trivial infinite
6351 // recursion. Honor __attribute__((no_builtin("foo"))) or
6352 // __attribute__((no_builtin)) on the current function unless foo is
6353 // not a predefined library function which means we must generate the
6354 // builtin no matter what.
6355 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6356 return CGCallee::forBuiltin(builtinID, builtinDecl: FD);
6357 }
6358
6359 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6360 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6361 FD->hasAttr<CUDAGlobalAttr>())
6362 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6363 Handle: cast<llvm::GlobalValue>(Val: CalleePtr->stripPointerCasts()));
6364
6365 return CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GD);
6366}
6367
6368static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD) {
6369 if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()))
6370 return GlobalDecl(FD, KernelReferenceKind::Stub);
6371 return GlobalDecl(FD);
6372}
6373
6374CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
6375 E = E->IgnoreParens();
6376
6377 // Look through function-to-pointer decay.
6378 if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
6379 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6380 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6381 return EmitCallee(E: ICE->getSubExpr());
6382 }
6383
6384 // Try to remember the original __ptrauth qualifier for loads of
6385 // function pointers.
6386 if (ICE->getCastKind() == CK_LValueToRValue) {
6387 const Expr *SubExpr = ICE->getSubExpr();
6388 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6389 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6390 EmitOrigPointerRValue(E);
6391
6392 QualType FunctionType = PtrType->getPointeeType();
6393 assert(FunctionType->isFunctionType());
6394
6395 GlobalDecl GD;
6396 if (const auto *VD =
6397 dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) {
6398 GD = GlobalDecl(VD);
6399 }
6400 CGCalleeInfo CalleeInfo(FunctionType->getAs<FunctionProtoType>(), GD);
6401 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6402 return Callee;
6403 }
6404 }
6405
6406 // Resolve direct calls.
6407 } else if (auto DRE = dyn_cast<DeclRefExpr>(Val: E)) {
6408 if (auto FD = dyn_cast<FunctionDecl>(Val: DRE->getDecl())) {
6409 return EmitDirectCallee(CGF&: *this, GD: getGlobalDeclForDirectCall(FD));
6410 }
6411 } else if (auto ME = dyn_cast<MemberExpr>(Val: E)) {
6412 if (auto FD = dyn_cast<FunctionDecl>(Val: ME->getMemberDecl())) {
6413 EmitIgnoredExpr(E: ME->getBase());
6414 return EmitDirectCallee(CGF&: *this, GD: FD);
6415 }
6416
6417 // Look through template substitutions.
6418 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(Val: E)) {
6419 return EmitCallee(E: NTTP->getReplacement());
6420
6421 // Treat pseudo-destructor calls differently.
6422 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: E)) {
6423 return CGCallee::forPseudoDestructor(E: PDE);
6424 }
6425
6426 // Otherwise, we have an indirect reference.
6427 llvm::Value *calleePtr;
6428 QualType functionType;
6429 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6430 calleePtr = EmitScalarExpr(E);
6431 functionType = ptrType->getPointeeType();
6432 } else {
6433 functionType = E->getType();
6434 calleePtr = EmitLValue(E, IsKnownNonNull: KnownNonNull).getPointer(CGF&: *this);
6435 }
6436 assert(functionType->isFunctionType());
6437
6438 GlobalDecl GD;
6439 if (const auto *VD =
6440 dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee()))
6441 GD = GlobalDecl(VD);
6442
6443 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6444 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(T: functionType);
6445 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6446 return callee;
6447}
6448
6449LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
6450 // Comma expressions just emit their LHS then their RHS as an l-value.
6451 if (E->getOpcode() == BO_Comma) {
6452 EmitIgnoredExpr(E: E->getLHS());
6453 EnsureInsertPoint();
6454 return EmitLValue(E: E->getRHS());
6455 }
6456
6457 if (E->getOpcode() == BO_PtrMemD ||
6458 E->getOpcode() == BO_PtrMemI)
6459 return EmitPointerToDataMemberBinaryExpr(E);
6460
6461 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6462
6463 // Create a Key Instructions source location atom group that covers both
6464 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6465 // separately grouped (1 below):
6466 //
6467 // 1. `a = b = c` -> Two atoms.
6468 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6469 // 3. Complex and agg assignment -> One atom.
6470 ApplyAtomGroup Grp(getDebugInfo());
6471
6472 // Note that in all of these cases, __block variables need the RHS
6473 // evaluated first just in case the variable gets moved by the RHS.
6474
6475 switch (getEvaluationKind(T: E->getType())) {
6476 case TEK_Scalar: {
6477 if (PointerAuthQualifier PtrAuth =
6478 E->getLHS()->getType().getPointerAuth()) {
6479 LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store);
6480 LValue CopiedLV = LV;
6481 CopiedLV.getQuals().removePointerAuth();
6482 llvm::Value *RV =
6483 EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: CopiedLV.getAddress());
6484 EmitNullabilityCheck(LHS: CopiedLV, RHS: RV, Loc: E->getExprLoc());
6485 EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: CopiedLV);
6486 return LV;
6487 }
6488
6489 switch (E->getLHS()->getType().getObjCLifetime()) {
6490 case Qualifiers::OCL_Strong:
6491 return EmitARCStoreStrong(e: E, /*ignored*/ false).first;
6492
6493 case Qualifiers::OCL_Autoreleasing:
6494 return EmitARCStoreAutoreleasing(e: E).first;
6495
6496 // No reason to do any of these differently.
6497 case Qualifiers::OCL_None:
6498 case Qualifiers::OCL_ExplicitNone:
6499 case Qualifiers::OCL_Weak:
6500 break;
6501 }
6502
6503 // TODO: Can we de-duplicate this code with the corresponding code in
6504 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6505 RValue RV;
6506 llvm::Value *Previous = nullptr;
6507 QualType SrcType = E->getRHS()->getType();
6508 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6509 // we want to extract that value and potentially (if the bitfield sanitizer
6510 // is enabled) use it to check for an implicit conversion.
6511 if (E->getLHS()->refersToBitField()) {
6512 llvm::Value *RHS =
6513 EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
6514 RV = RValue::get(V: RHS);
6515 } else
6516 RV = EmitAnyExpr(E: E->getRHS());
6517
6518 LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store);
6519
6520 if (RV.isScalar())
6521 EmitNullabilityCheck(LHS: LV, RHS: RV.getScalarVal(), Loc: E->getExprLoc());
6522
6523 if (LV.isBitField()) {
6524 llvm::Value *Result = nullptr;
6525 // If bitfield sanitizers are enabled we want to use the result
6526 // to check whether a truncation or sign change has occurred.
6527 if (SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
6528 EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV, Result: &Result);
6529 else
6530 EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV);
6531
6532 // If the expression contained an implicit conversion, make sure
6533 // to use the value before the scalar conversion.
6534 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6535 QualType DstType = E->getLHS()->getType();
6536 EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
6537 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
6538 } else
6539 EmitStoreThroughLValue(Src: RV, Dst: LV);
6540
6541 if (getLangOpts().OpenMP)
6542 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this,
6543 LHS: E->getLHS());
6544 return LV;
6545 }
6546
6547 case TEK_Complex:
6548 return EmitComplexAssignmentLValue(E);
6549
6550 case TEK_Aggregate:
6551 // If the lang opt is HLSL and the LHS is a constant array
6552 // then we are performing a copy assignment and call a special
6553 // function because EmitAggExprToLValue emits to a temporary LValue
6554 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
6555 return EmitHLSLArrayAssignLValue(E);
6556
6557 return EmitAggExprToLValue(E);
6558 }
6559 llvm_unreachable("bad evaluation kind");
6560}
6561
6562// This function implements trivial copy assignment for HLSL's
6563// assignable constant arrays.
6564LValue CodeGenFunction::EmitHLSLArrayAssignLValue(const BinaryOperator *E) {
6565 // Don't emit an LValue for the RHS because it might not be an LValue
6566 LValue LHS = EmitLValue(E: E->getLHS());
6567
6568 // If the RHS is a global resource array, copy all individual resources
6569 // into LHS.
6570 if (E->getRHS()->getType()->isHLSLResourceRecordArray())
6571 if (CGM.getHLSLRuntime().emitResourceArrayCopy(LHS, RHSExpr: E->getRHS(), CGF&: *this))
6572 return LHS;
6573
6574 // In C the RHS of an assignment operator is an RValue.
6575 // EmitAggregateAssign takes an LValue for the RHS. Instead we can call
6576 // EmitInitializationToLValue to emit an RValue into an LValue.
6577 EmitInitializationToLValue(E: E->getRHS(), LV: LHS);
6578 return LHS;
6579}
6580
6581LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E,
6582 llvm::CallBase **CallOrInvoke) {
6583 RValue RV = EmitCallExpr(E, ReturnValue: ReturnValueSlot(), CallOrInvoke);
6584
6585 if (!RV.isScalar())
6586 return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(),
6587 Source: AlignmentSource::Decl);
6588
6589 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6590 "Can't have a scalar return unless the return type is a "
6591 "reference type!");
6592
6593 return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType());
6594}
6595
6596LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
6597 // FIXME: This shouldn't require another copy.
6598 return EmitAggExprToLValue(E);
6599}
6600
6601LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
6602 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
6603 && "binding l-value to type which needs a temporary");
6604 AggValueSlot Slot = CreateAggTemp(T: E->getType());
6605 EmitCXXConstructExpr(E, Dest: Slot);
6606 return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl);
6607}
6608
6609LValue
6610CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
6611 return MakeNaturalAlignRawAddrLValue(V: EmitCXXTypeidExpr(E), T: E->getType());
6612}
6613
6614Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
6615 return CGM.GetAddrOfMSGuidDecl(GD: E->getGuidDecl())
6616 .withElementType(ElemTy: ConvertType(T: E->getType()));
6617}
6618
6619LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
6620 return MakeAddrLValue(Addr: EmitCXXUuidofExpr(E), T: E->getType(),
6621 Source: AlignmentSource::Decl);
6622}
6623
6624LValue
6625CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
6626 AggValueSlot Slot = CreateAggTemp(T: E->getType(), Name: "temp.lvalue");
6627 Slot.setExternallyDestructed();
6628 EmitAggExpr(E: E->getSubExpr(), AS: Slot);
6629 EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Slot.getAddress());
6630 return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl);
6631}
6632
6633LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
6634 RValue RV = EmitObjCMessageExpr(E);
6635
6636 if (!RV.isScalar())
6637 return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(),
6638 Source: AlignmentSource::Decl);
6639
6640 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6641 "Can't have a scalar return unless the return type is a "
6642 "reference type!");
6643
6644 return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType());
6645}
6646
6647LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
6648 Address V =
6649 CGM.getObjCRuntime().GetAddrOfSelector(CGF&: *this, Sel: E->getSelector());
6650 return MakeAddrLValue(Addr: V, T: E->getType(), Source: AlignmentSource::Decl);
6651}
6652
6653llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
6654 const ObjCIvarDecl *Ivar) {
6655 return CGM.getObjCRuntime().EmitIvarOffset(CGF&: *this, Interface, Ivar);
6656}
6657
6658llvm::Value *
6659CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
6660 const ObjCIvarDecl *Ivar) {
6661 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6662 QualType PointerDiffType = getContext().getPointerDiffType();
6663 return Builder.CreateZExtOrTrunc(V: OffsetValue,
6664 DestTy: getTypes().ConvertType(T: PointerDiffType));
6665}
6666
6667LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
6668 llvm::Value *BaseValue,
6669 const ObjCIvarDecl *Ivar,
6670 unsigned CVRQualifiers) {
6671 return CGM.getObjCRuntime().EmitObjCValueForIvar(CGF&: *this, ObjectTy, BaseValue,
6672 Ivar, CVRQualifiers);
6673}
6674
6675LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
6676 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6677 llvm::Value *BaseValue = nullptr;
6678 const Expr *BaseExpr = E->getBase();
6679 Qualifiers BaseQuals;
6680 QualType ObjectTy;
6681 if (E->isArrow()) {
6682 BaseValue = EmitScalarExpr(E: BaseExpr);
6683 ObjectTy = BaseExpr->getType()->getPointeeType();
6684 BaseQuals = ObjectTy.getQualifiers();
6685 } else {
6686 LValue BaseLV = EmitLValue(E: BaseExpr);
6687 BaseValue = BaseLV.getPointer(CGF&: *this);
6688 ObjectTy = BaseExpr->getType();
6689 BaseQuals = ObjectTy.getQualifiers();
6690 }
6691
6692 LValue LV =
6693 EmitLValueForIvar(ObjectTy, BaseValue, Ivar: E->getDecl(),
6694 CVRQualifiers: BaseQuals.getCVRQualifiers());
6695 setObjCGCLValueClass(Ctx: getContext(), E, LV);
6696 return LV;
6697}
6698
6699LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
6700 // Can only get l-value for message expression returning aggregate type
6701 RValue RV = EmitAnyExprToTemp(E);
6702 return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(),
6703 Source: AlignmentSource::Decl);
6704}
6705
6706RValue CodeGenFunction::EmitCall(QualType CalleeType,
6707 const CGCallee &OrigCallee, const CallExpr *E,
6708 ReturnValueSlot ReturnValue,
6709 llvm::Value *Chain,
6710 llvm::CallBase **CallOrInvoke,
6711 CGFunctionInfo const **ResolvedFnInfo) {
6712 // Get the actual function type. The callee type will always be a pointer to
6713 // function type or a block pointer type.
6714 assert(CalleeType->isFunctionPointerType() &&
6715 "Call must have function pointer type!");
6716
6717 const Decl *TargetDecl =
6718 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6719
6720 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6721 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6722 "trying to emit a call to an immediate function");
6723
6724 CalleeType = getContext().getCanonicalType(T: CalleeType);
6725
6726 auto PointeeType = cast<PointerType>(Val&: CalleeType)->getPointeeType();
6727
6728 CGCallee Callee = OrigCallee;
6729
6730 bool CFIUnchecked = CalleeType->hasPointeeToCFIUncheckedCalleeFunctionType();
6731
6732 if (SanOpts.has(K: SanitizerKind::Function) &&
6733 (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) &&
6734 !isa<FunctionNoProtoType>(Val: PointeeType) && !CFIUnchecked) {
6735 if (llvm::Constant *PrefixSig =
6736 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6737 auto CheckOrdinal = SanitizerKind::SO_Function;
6738 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6739 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6740 auto *TypeHash = getUBSanFunctionTypeHash(T: PointeeType);
6741
6742 llvm::Type *PrefixSigType = PrefixSig->getType();
6743 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6744 Context&: CGM.getLLVMContext(), Elements: {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6745
6746 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6747 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6748 // Use raw pointer since we are using the callee pointer as data here.
6749 Address Addr =
6750 Address(CalleePtr, CalleePtr->getType(),
6751 CharUnits::fromQuantity(
6752 Quantity: CalleePtr->getPointerAlignment(DL: CGM.getDataLayout())),
6753 Callee.getPointerAuthInfo(), nullptr);
6754 CalleePtr = Addr.emitRawPointer(CGF&: *this);
6755 }
6756
6757 // On 32-bit Arm, the low bit of a function pointer indicates whether
6758 // it's using the Arm or Thumb instruction set. The actual first
6759 // instruction lives at the same address either way, so we must clear
6760 // that low bit before using the function address to find the prefix
6761 // structure.
6762 //
6763 // This applies to both Arm and Thumb target triples, because
6764 // either one could be used in an interworking context where it
6765 // might be passed function pointers of both types.
6766 llvm::Value *AlignedCalleePtr;
6767 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6768 llvm::Value *CalleeAddress =
6769 Builder.CreatePtrToInt(V: CalleePtr, DestTy: IntPtrTy);
6770 llvm::Value *Mask = llvm::ConstantInt::getSigned(Ty: IntPtrTy, V: ~1);
6771 llvm::Value *AlignedCalleeAddress =
6772 Builder.CreateAnd(LHS: CalleeAddress, RHS: Mask);
6773 AlignedCalleePtr =
6774 Builder.CreateIntToPtr(V: AlignedCalleeAddress, DestTy: CalleePtr->getType());
6775 } else {
6776 AlignedCalleePtr = CalleePtr;
6777 }
6778
6779 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6780 llvm::Value *CalleeSigPtr =
6781 Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 0);
6782 llvm::Value *CalleeSig =
6783 Builder.CreateAlignedLoad(Ty: PrefixSigType, Addr: CalleeSigPtr, Align: getIntAlign());
6784 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(LHS: CalleeSig, RHS: PrefixSig);
6785
6786 llvm::BasicBlock *Cont = createBasicBlock(name: "cont");
6787 llvm::BasicBlock *TypeCheck = createBasicBlock(name: "typecheck");
6788 Builder.CreateCondBr(Cond: CalleeSigMatch, True: TypeCheck, False: Cont);
6789
6790 EmitBlock(BB: TypeCheck);
6791 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6792 Ty: Int32Ty,
6793 Addr: Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 1),
6794 Align: getPointerAlign());
6795 llvm::Value *CalleeTypeHashMatch =
6796 Builder.CreateICmpEQ(LHS: CalleeTypeHash, RHS: TypeHash);
6797 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: E->getBeginLoc()),
6798 EmitCheckTypeDescriptor(T: CalleeType)};
6799 EmitCheck(Checked: std::make_pair(x&: CalleeTypeHashMatch, y&: CheckOrdinal), CheckHandler,
6800 StaticArgs: StaticData, DynamicArgs: {CalleePtr});
6801
6802 Builder.CreateBr(Dest: Cont);
6803 EmitBlock(BB: Cont);
6804 }
6805 }
6806
6807 const auto *FnType = cast<FunctionType>(Val&: PointeeType);
6808
6809 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl);
6810 FD && DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>()))
6811 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
6812
6813 // If we are checking indirect calls and this call is indirect, check that the
6814 // function pointer is a member of the bit set for the function type.
6815 if (SanOpts.has(K: SanitizerKind::CFIICall) &&
6816 (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && !CFIUnchecked) {
6817 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
6818 auto CheckHandler = SanitizerHandler::CFICheckFail;
6819 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6820 EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_ICall);
6821
6822 llvm::Metadata *MD =
6823 CGM.CreateMetadataIdentifierForFnType(T: QualType(FnType, 0));
6824
6825 llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD);
6826
6827 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6828 llvm::Value *TypeTest = Builder.CreateCall(
6829 Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {CalleePtr, TypeId});
6830
6831 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6832 llvm::Constant *StaticData[] = {
6833 llvm::ConstantInt::get(Ty: Int8Ty, V: CFITCK_ICall),
6834 EmitCheckSourceLocation(Loc: E->getBeginLoc()),
6835 EmitCheckTypeDescriptor(T: QualType(FnType, 0)),
6836 };
6837 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6838 EmitCfiSlowPathCheck(Ordinal: CheckOrdinal, Cond: TypeTest, TypeId: CrossDsoTypeId, Ptr: CalleePtr,
6839 StaticArgs: StaticData);
6840 } else {
6841 EmitCheck(Checked: std::make_pair(x&: TypeTest, y&: CheckOrdinal), CheckHandler,
6842 StaticArgs: StaticData, DynamicArgs: {CalleePtr, llvm::UndefValue::get(T: IntPtrTy)});
6843 }
6844 }
6845
6846 CallArgList Args;
6847 if (Chain)
6848 Args.add(rvalue: RValue::get(V: Chain), type: CGM.getContext().VoidPtrTy);
6849
6850 // C++17 requires that we evaluate arguments to a call using assignment syntax
6851 // right-to-left, and that we evaluate arguments to certain other operators
6852 // left-to-right. Note that we allow this to override the order dictated by
6853 // the calling convention on the MS ABI, which means that parameter
6854 // destruction order is not necessarily reverse construction order.
6855 // FIXME: Revisit this based on C++ committee response to unimplementability.
6856 EvaluationOrder Order = EvaluationOrder::Default;
6857 bool StaticOperator = false;
6858 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) {
6859 if (OCE->isAssignmentOp())
6860 Order = EvaluationOrder::ForceRightToLeft;
6861 else {
6862 switch (OCE->getOperator()) {
6863 case OO_LessLess:
6864 case OO_GreaterGreater:
6865 case OO_AmpAmp:
6866 case OO_PipePipe:
6867 case OO_Comma:
6868 case OO_ArrowStar:
6869 Order = EvaluationOrder::ForceLeftToRight;
6870 break;
6871 default:
6872 break;
6873 }
6874 }
6875
6876 if (const auto *MD =
6877 dyn_cast_if_present<CXXMethodDecl>(Val: OCE->getCalleeDecl());
6878 MD && MD->isStatic())
6879 StaticOperator = true;
6880 }
6881
6882 auto Arguments = E->arguments();
6883 if (StaticOperator) {
6884 // If we're calling a static operator, we need to emit the object argument
6885 // and ignore it.
6886 EmitIgnoredExpr(E: E->getArg(Arg: 0));
6887 Arguments = drop_begin(RangeOrContainer&: Arguments, N: 1);
6888 }
6889 EmitCallArgs(Args, Prototype: dyn_cast<FunctionProtoType>(Val: FnType), ArgRange: Arguments,
6890 AC: E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6891
6892 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
6893 Args, Ty: FnType, /*ChainCall=*/Chain);
6894
6895 if (ResolvedFnInfo)
6896 *ResolvedFnInfo = &FnInfo;
6897
6898 // HIP function pointer contains kernel handle when it is used in triple
6899 // chevron. The kernel stub needs to be loaded from kernel handle and used
6900 // as callee.
6901 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6902 isa<CUDAKernelCallExpr>(Val: E) &&
6903 (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) {
6904 llvm::Value *Handle = Callee.getFunctionPointer();
6905 auto *Stub = Builder.CreateLoad(
6906 Addr: Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6907 Callee.setFunctionPointer(Stub);
6908 }
6909 llvm::CallBase *LocalCallOrInvoke = nullptr;
6910 RValue Call = EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, CallOrInvoke: &LocalCallOrInvoke,
6911 IsMustTail: E == MustTailCall, Loc: E->getExprLoc());
6912
6913 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) {
6914 if (CalleeDecl->hasAttr<RestrictAttr>() ||
6915 CalleeDecl->hasAttr<MallocSpanAttr>() ||
6916 CalleeDecl->hasAttr<AllocSizeAttr>()) {
6917 // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute.
6918 if (SanOpts.has(K: SanitizerKind::AllocToken)) {
6919 // Set !alloc_token metadata.
6920 EmitAllocToken(CB: LocalCallOrInvoke, E);
6921 }
6922 }
6923 }
6924 if (CallOrInvoke)
6925 *CallOrInvoke = LocalCallOrInvoke;
6926
6927 return Call;
6928}
6929
6930LValue CodeGenFunction::
6931EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
6932 Address BaseAddr = Address::invalid();
6933 if (E->getOpcode() == BO_PtrMemI) {
6934 BaseAddr = EmitPointerWithAlignment(E: E->getLHS());
6935 } else {
6936 BaseAddr = EmitLValue(E: E->getLHS()).getAddress();
6937 }
6938
6939 llvm::Value *OffsetV = EmitScalarExpr(E: E->getRHS());
6940 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6941
6942 LValueBaseInfo BaseInfo;
6943 TBAAAccessInfo TBAAInfo;
6944 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
6945 !isUnderlyingBasePointerConstantNull(E: E->getLHS());
6946 Address MemberAddr = EmitCXXMemberDataPointerAddress(
6947 E, base: BaseAddr, memberPtr: OffsetV, memberPtrType: MPT, IsInBounds, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo);
6948
6949 return MakeAddrLValue(Addr: MemberAddr, T: MPT->getPointeeType(), BaseInfo, TBAAInfo);
6950}
6951
6952/// Given the address of a temporary variable, produce an r-value of
6953/// its type.
6954RValue CodeGenFunction::convertTempToRValue(Address addr,
6955 QualType type,
6956 SourceLocation loc) {
6957 LValue lvalue = MakeAddrLValue(Addr: addr, T: type, Source: AlignmentSource::Decl);
6958 switch (getEvaluationKind(T: type)) {
6959 case TEK_Complex:
6960 return RValue::getComplex(C: EmitLoadOfComplex(src: lvalue, loc));
6961 case TEK_Aggregate:
6962 return lvalue.asAggregateRValue();
6963 case TEK_Scalar:
6964 return RValue::get(V: EmitLoadOfScalar(lvalue, Loc: loc));
6965 }
6966 llvm_unreachable("bad evaluation kind");
6967}
6968
6969void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6970 assert(Val->getType()->isFPOrFPVectorTy());
6971 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6972 return;
6973
6974 llvm::MDBuilder MDHelper(getLLVMContext());
6975 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6976
6977 cast<llvm::Instruction>(Val)->setMetadata(KindID: llvm::LLVMContext::MD_fpmath, Node);
6978}
6979
6980void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6981 llvm::Type *EltTy = Val->getType()->getScalarType();
6982 if (!EltTy->isFloatTy())
6983 return;
6984
6985 if ((getLangOpts().OpenCL &&
6986 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6987 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6988 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6989 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6990 //
6991 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6992 // build option allows an application to specify that single precision
6993 // floating-point divide (x/y and 1/x) and sqrt used in the program
6994 // source are correctly rounded.
6995 //
6996 // TODO: CUDA has a prec-sqrt flag
6997 SetFPAccuracy(Val, Accuracy: 3.0f);
6998 }
6999}
7000
7001void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
7002 llvm::Type *EltTy = Val->getType()->getScalarType();
7003 if (!EltTy->isFloatTy())
7004 return;
7005
7006 if ((getLangOpts().OpenCL &&
7007 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
7008 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
7009 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
7010 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
7011 //
7012 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
7013 // build option allows an application to specify that single precision
7014 // floating-point divide (x/y and 1/x) and sqrt used in the program
7015 // source are correctly rounded.
7016 //
7017 // TODO: CUDA has a prec-div flag
7018 SetFPAccuracy(Val, Accuracy: 2.5f);
7019 }
7020}
7021
7022namespace {
7023 struct LValueOrRValue {
7024 LValue LV;
7025 RValue RV;
7026 };
7027}
7028
7029static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
7030 const PseudoObjectExpr *E,
7031 bool forLValue,
7032 AggValueSlot slot) {
7033 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
7034
7035 // Find the result expression, if any.
7036 const Expr *resultExpr = E->getResultExpr();
7037 LValueOrRValue result;
7038
7039 for (PseudoObjectExpr::const_semantics_iterator
7040 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
7041 const Expr *semantic = *i;
7042
7043 // If this semantic expression is an opaque value, bind it
7044 // to the result of its source expression.
7045 if (const auto *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) {
7046 // Skip unique OVEs.
7047 if (ov->isUnique()) {
7048 assert(ov != resultExpr &&
7049 "A unique OVE cannot be used as the result expression");
7050 continue;
7051 }
7052
7053 // If this is the result expression, we may need to evaluate
7054 // directly into the slot.
7055 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
7056 OVMA opaqueData;
7057 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
7058 CodeGenFunction::hasAggregateEvaluationKind(T: ov->getType())) {
7059 CGF.EmitAggExpr(E: ov->getSourceExpr(), AS: slot);
7060 LValue LV = CGF.MakeAddrLValue(Addr: slot.getAddress(), T: ov->getType(),
7061 Source: AlignmentSource::Decl);
7062 opaqueData = OVMA::bind(CGF, ov, lv: LV);
7063 result.RV = slot.asRValue();
7064
7065 // Otherwise, emit as normal.
7066 } else {
7067 opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr());
7068
7069 // If this is the result, also evaluate the result now.
7070 if (ov == resultExpr) {
7071 if (forLValue)
7072 result.LV = CGF.EmitLValue(E: ov);
7073 else
7074 result.RV = CGF.EmitAnyExpr(E: ov, aggSlot: slot);
7075 }
7076 }
7077
7078 opaques.push_back(Elt: opaqueData);
7079
7080 // Otherwise, if the expression is the result, evaluate it
7081 // and remember the result.
7082 } else if (semantic == resultExpr) {
7083 if (forLValue)
7084 result.LV = CGF.EmitLValue(E: semantic);
7085 else
7086 result.RV = CGF.EmitAnyExpr(E: semantic, aggSlot: slot);
7087
7088 // Otherwise, evaluate the expression in an ignored context.
7089 } else {
7090 CGF.EmitIgnoredExpr(E: semantic);
7091 }
7092 }
7093
7094 // Unbind all the opaques now.
7095 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
7096 opaque.unbind(CGF);
7097
7098 return result;
7099}
7100
7101RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
7102 AggValueSlot slot) {
7103 return emitPseudoObjectExpr(CGF&: *this, E, forLValue: false, slot).RV;
7104}
7105
7106LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
7107 return emitPseudoObjectExpr(CGF&: *this, E, forLValue: true, slot: AggValueSlot::ignored()).LV;
7108}
7109
7110void CodeGenFunction::FlattenAccessAndTypeLValue(
7111 LValue Val, SmallVectorImpl<LValue> &AccessList) {
7112
7113 llvm::SmallVector<
7114 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7115 WorkList;
7116 llvm::IntegerType *IdxTy = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 32);
7117 WorkList.push_back(Elt: {Val, Val.getType(), {llvm::ConstantInt::get(Ty: IdxTy, V: 0)}});
7118
7119 while (!WorkList.empty()) {
7120 auto [LVal, T, IdxList] = WorkList.pop_back_val();
7121 T = T.getCanonicalType().getUnqualifiedType();
7122 assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL");
7123
7124 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val&: T)) {
7125 uint64_t Size = CAT->getZExtSize();
7126 for (int64_t I = Size - 1; I > -1; I--) {
7127 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7128 IdxListCopy.push_back(Elt: llvm::ConstantInt::get(Ty: IdxTy, V: I));
7129 WorkList.emplace_back(Args&: LVal, Args: CAT->getElementType(), Args&: IdxListCopy);
7130 }
7131 } else if (const auto *RT = dyn_cast<RecordType>(Val&: T)) {
7132 const RecordDecl *Record = RT->getDecl()->getDefinitionOrSelf();
7133 assert(!Record->isUnion() && "Union types not supported in flat cast.");
7134
7135 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Val: Record);
7136
7137 llvm::SmallVector<
7138 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
7139 ReverseList;
7140 if (CXXD && CXXD->isStandardLayout())
7141 Record = CXXD->getStandardLayoutBaseWithFields();
7142
7143 // deal with potential base classes
7144 if (CXXD && !CXXD->isStandardLayout()) {
7145 if (CXXD->getNumBases() > 0) {
7146 assert(CXXD->getNumBases() == 1 &&
7147 "HLSL doesn't support multiple inheritance.");
7148 auto Base = CXXD->bases_begin();
7149 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7150 IdxListCopy.push_back(Elt: llvm::ConstantInt::get(
7151 Ty: IdxTy, V: 0)); // base struct should be at index zero
7152 ReverseList.emplace_back(Args&: LVal, Args: Base->getType(), Args&: IdxListCopy);
7153 }
7154 }
7155
7156 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
7157
7158 llvm::Type *LLVMT = ConvertTypeForMem(T);
7159 CharUnits Align = getContext().getTypeAlignInChars(T);
7160 LValue RLValue;
7161 bool createdGEP = false;
7162 for (auto *FD : Record->fields()) {
7163 if (FD->isBitField()) {
7164 if (FD->isUnnamedBitField())
7165 continue;
7166 if (!createdGEP) {
7167 createdGEP = true;
7168 Address GEP = Builder.CreateInBoundsGEP(Addr: LVal.getAddress(), IdxList,
7169 ElementType: LLVMT, Align, Name: "gep");
7170 RLValue = MakeAddrLValue(Addr: GEP, T);
7171 }
7172 LValue FieldLVal = EmitLValueForField(base: RLValue, field: FD, IsInBounds: true);
7173 ReverseList.push_back(Elt: {FieldLVal, FD->getType(), {}});
7174 } else {
7175 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
7176 IdxListCopy.push_back(
7177 Elt: llvm::ConstantInt::get(Ty: IdxTy, V: Layout.getLLVMFieldNo(FD)));
7178 ReverseList.emplace_back(Args&: LVal, Args: FD->getType(), Args&: IdxListCopy);
7179 }
7180 }
7181
7182 std::reverse(first: ReverseList.begin(), last: ReverseList.end());
7183 llvm::append_range(C&: WorkList, R&: ReverseList);
7184 } else if (const auto *VT = dyn_cast<VectorType>(Val&: T)) {
7185 llvm::Type *LLVMT = ConvertTypeForMem(T);
7186 CharUnits Align = getContext().getTypeAlignInChars(T);
7187 Address GEP = Builder.CreateInBoundsGEP(Addr: LVal.getAddress(), IdxList, ElementType: LLVMT,
7188 Align, Name: "vector.gep");
7189 LValue Base = MakeAddrLValue(Addr: GEP, T);
7190 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
7191 llvm::Constant *Idx = llvm::ConstantInt::get(Ty: IdxTy, V: I);
7192 LValue LV =
7193 LValue::MakeVectorElt(vecAddress: Base.getAddress(), Idx, type: VT->getElementType(),
7194 BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo());
7195 AccessList.emplace_back(Args&: LV);
7196 }
7197 } else { // a scalar/builtin type
7198 if (!IdxList.empty()) {
7199 llvm::Type *LLVMT = ConvertTypeForMem(T);
7200 CharUnits Align = getContext().getTypeAlignInChars(T);
7201 Address GEP = Builder.CreateInBoundsGEP(Addr: LVal.getAddress(), IdxList,
7202 ElementType: LLVMT, Align, Name: "gep");
7203 AccessList.emplace_back(Args: MakeAddrLValue(Addr: GEP, T));
7204 } else // must be a bitfield we already created an lvalue for
7205 AccessList.emplace_back(Args&: LVal);
7206 }
7207 }
7208}
7209