1 | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "ABIInfoImpl.h" |
14 | #include "CGCUDARuntime.h" |
15 | #include "CGCXXABI.h" |
16 | #include "CGCall.h" |
17 | #include "CGCleanup.h" |
18 | #include "CGDebugInfo.h" |
19 | #include "CGObjCRuntime.h" |
20 | #include "CGOpenMPRuntime.h" |
21 | #include "CGRecordLayout.h" |
22 | #include "CodeGenFunction.h" |
23 | #include "CodeGenModule.h" |
24 | #include "CodeGenPGO.h" |
25 | #include "ConstantEmitter.h" |
26 | #include "TargetInfo.h" |
27 | #include "clang/AST/ASTContext.h" |
28 | #include "clang/AST/ASTLambda.h" |
29 | #include "clang/AST/Attr.h" |
30 | #include "clang/AST/DeclObjC.h" |
31 | #include "clang/AST/NSAPI.h" |
32 | #include "clang/AST/StmtVisitor.h" |
33 | #include "clang/Basic/Builtins.h" |
34 | #include "clang/Basic/CodeGenOptions.h" |
35 | #include "clang/Basic/Module.h" |
36 | #include "clang/Basic/SourceManager.h" |
37 | #include "llvm/ADT/STLExtras.h" |
38 | #include "llvm/ADT/ScopeExit.h" |
39 | #include "llvm/ADT/StringExtras.h" |
40 | #include "llvm/IR/DataLayout.h" |
41 | #include "llvm/IR/Intrinsics.h" |
42 | #include "llvm/IR/LLVMContext.h" |
43 | #include "llvm/IR/MDBuilder.h" |
44 | #include "llvm/IR/MatrixBuilder.h" |
45 | #include "llvm/Support/ConvertUTF.h" |
46 | #include "llvm/Support/Endian.h" |
47 | #include "llvm/Support/MathExtras.h" |
48 | #include "llvm/Support/Path.h" |
49 | #include "llvm/Support/xxhash.h" |
50 | #include "llvm/Transforms/Utils/SanitizerStats.h" |
51 | |
52 | #include <numeric> |
53 | #include <optional> |
54 | #include <string> |
55 | |
56 | using namespace clang; |
57 | using namespace CodeGen; |
58 | |
59 | namespace clang { |
60 | // TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed |
61 | // by -fsanitize-skip-hot-cutoff |
62 | llvm::cl::opt<bool> ClSanitizeGuardChecks( |
63 | "ubsan-guard-checks" , llvm::cl::Optional, |
64 | llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`." )); |
65 | |
66 | } // namespace clang |
67 | |
68 | //===--------------------------------------------------------------------===// |
69 | // Defines for metadata |
70 | //===--------------------------------------------------------------------===// |
71 | |
72 | // Those values are crucial to be the SAME as in ubsan runtime library. |
73 | enum VariableTypeDescriptorKind : uint16_t { |
74 | /// An integer type. |
75 | TK_Integer = 0x0000, |
76 | /// A floating-point type. |
77 | TK_Float = 0x0001, |
78 | /// An _BitInt(N) type. |
79 | TK_BitInt = 0x0002, |
80 | /// Any other type. The value representation is unspecified. |
81 | TK_Unknown = 0xffff |
82 | }; |
83 | |
84 | //===--------------------------------------------------------------------===// |
85 | // Miscellaneous Helper Methods |
86 | //===--------------------------------------------------------------------===// |
87 | |
88 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
89 | /// block. |
90 | RawAddress |
91 | CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, |
92 | const Twine &Name, |
93 | llvm::Value *ArraySize) { |
94 | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
95 | Alloca->setAlignment(Align.getAsAlign()); |
96 | return RawAddress(Alloca, Ty, Align, KnownNonNull); |
97 | } |
98 | |
99 | RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS, |
100 | CharUnits Align, const Twine &Name, |
101 | llvm::Value *ArraySize, |
102 | RawAddress *AllocaAddr) { |
103 | RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
104 | if (AllocaAddr) |
105 | *AllocaAddr = Alloca; |
106 | llvm::Value *V = Alloca.getPointer(); |
107 | // Alloca always returns a pointer in alloca address space, which may |
108 | // be different from the type defined by the language. For example, |
109 | // in C++ the auto variables are in the default address space. Therefore |
110 | // cast alloca to the default address space when necessary. |
111 | |
112 | unsigned DestAddrSpace = getContext().getTargetAddressSpace(AS: DestLangAS); |
113 | if (DestAddrSpace != Alloca.getAddressSpace()) { |
114 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
115 | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
116 | // otherwise alloca is inserted at the current insertion point of the |
117 | // builder. |
118 | if (!ArraySize) |
119 | Builder.SetInsertPoint(getPostAllocaInsertPoint()); |
120 | V = getTargetHooks().performAddrSpaceCast( |
121 | CGF&: *this, V, SrcAddr: getASTAllocaAddressSpace(), DestTy: Builder.getPtrTy(AddrSpace: DestAddrSpace), |
122 | /*IsNonNull=*/true); |
123 | } |
124 | |
125 | return RawAddress(V, Ty, Align, KnownNonNull); |
126 | } |
127 | |
128 | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
129 | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
130 | /// insertion point of the builder. |
131 | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
132 | const Twine &Name, |
133 | llvm::Value *ArraySize) { |
134 | llvm::AllocaInst *Alloca; |
135 | if (ArraySize) |
136 | Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); |
137 | else |
138 | Alloca = |
139 | new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
140 | ArraySize, Name, AllocaInsertPt->getIterator()); |
141 | if (SanOpts.Mask & SanitizerKind::Address) { |
142 | Alloca->addAnnotationMetadata(Annotations: {"alloca_name_altered" , Name.str()}); |
143 | } |
144 | if (Allocas) { |
145 | Allocas->Add(I: Alloca); |
146 | } |
147 | return Alloca; |
148 | } |
149 | |
150 | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
151 | /// default alignment of the corresponding LLVM type, which is *not* |
152 | /// guaranteed to be related in any way to the expected alignment of |
153 | /// an AST type that might have been lowered to Ty. |
154 | RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
155 | const Twine &Name) { |
156 | CharUnits Align = |
157 | CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty)); |
158 | return CreateTempAlloca(Ty, align: Align, Name); |
159 | } |
160 | |
161 | RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
162 | CharUnits Align = getContext().getTypeAlignInChars(T: Ty); |
163 | return CreateTempAlloca(Ty: ConvertType(T: Ty), align: Align, Name); |
164 | } |
165 | |
166 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
167 | RawAddress *Alloca) { |
168 | // FIXME: Should we prefer the preferred type alignment here? |
169 | return CreateMemTemp(T: Ty, Align: getContext().getTypeAlignInChars(T: Ty), Name, Alloca); |
170 | } |
171 | |
172 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
173 | const Twine &Name, |
174 | RawAddress *Alloca) { |
175 | RawAddress Result = CreateTempAlloca(Ty: ConvertTypeForMem(T: Ty), align: Align, Name, |
176 | /*ArraySize=*/nullptr, Alloca); |
177 | |
178 | if (Ty->isConstantMatrixType()) { |
179 | auto *ArrayTy = cast<llvm::ArrayType>(Val: Result.getElementType()); |
180 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
181 | NumElts: ArrayTy->getNumElements()); |
182 | |
183 | Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), |
184 | KnownNonNull); |
185 | } |
186 | return Result; |
187 | } |
188 | |
189 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
190 | CharUnits Align, |
191 | const Twine &Name) { |
192 | return CreateTempAllocaWithoutCast(Ty: ConvertTypeForMem(T: Ty), Align, Name); |
193 | } |
194 | |
195 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
196 | const Twine &Name) { |
197 | return CreateMemTempWithoutCast(Ty, Align: getContext().getTypeAlignInChars(T: Ty), |
198 | Name); |
199 | } |
200 | |
201 | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
202 | /// expression and compare the result against zero, returning an Int1Ty value. |
203 | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
204 | PGO->setCurrentStmt(E); |
205 | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
206 | llvm::Value *MemPtr = EmitScalarExpr(E); |
207 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr, MPT); |
208 | } |
209 | |
210 | QualType BoolTy = getContext().BoolTy; |
211 | SourceLocation Loc = E->getExprLoc(); |
212 | CGFPOptionsRAII FPOptsRAII(*this, E); |
213 | if (!E->getType()->isAnyComplexType()) |
214 | return EmitScalarConversion(Src: EmitScalarExpr(E), SrcTy: E->getType(), DstTy: BoolTy, Loc); |
215 | |
216 | return EmitComplexToScalarConversion(Src: EmitComplexExpr(E), SrcTy: E->getType(), DstTy: BoolTy, |
217 | Loc); |
218 | } |
219 | |
220 | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
221 | /// ignoring the result. |
222 | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
223 | if (E->isPRValue()) |
224 | return (void)EmitAnyExpr(E, aggSlot: AggValueSlot::ignored(), ignoreResult: true); |
225 | |
226 | // if this is a bitfield-resulting conditional operator, we can special case |
227 | // emit this. The normal 'EmitLValue' version of this is particularly |
228 | // difficult to codegen for, since creating a single "LValue" for two |
229 | // different sized arguments here is not particularly doable. |
230 | if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( |
231 | Val: E->IgnoreParenNoopCasts(Ctx: getContext()))) { |
232 | if (CondOp->getObjectKind() == OK_BitField) |
233 | return EmitIgnoredConditionalOperator(E: CondOp); |
234 | } |
235 | |
236 | // Just emit it as an l-value and drop the result. |
237 | EmitLValue(E); |
238 | } |
239 | |
240 | /// EmitAnyExpr - Emit code to compute the specified expression which |
241 | /// can have any type. The result is returned as an RValue struct. |
242 | /// If this is an aggregate expression, AggSlot indicates where the |
243 | /// result should be returned. |
244 | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
245 | AggValueSlot aggSlot, |
246 | bool ignoreResult) { |
247 | switch (getEvaluationKind(T: E->getType())) { |
248 | case TEK_Scalar: |
249 | return RValue::get(V: EmitScalarExpr(E, IgnoreResultAssign: ignoreResult)); |
250 | case TEK_Complex: |
251 | return RValue::getComplex(C: EmitComplexExpr(E, IgnoreReal: ignoreResult, IgnoreImag: ignoreResult)); |
252 | case TEK_Aggregate: |
253 | if (!ignoreResult && aggSlot.isIgnored()) |
254 | aggSlot = CreateAggTemp(T: E->getType(), Name: "agg-temp" ); |
255 | EmitAggExpr(E, AS: aggSlot); |
256 | return aggSlot.asRValue(); |
257 | } |
258 | llvm_unreachable("bad evaluation kind" ); |
259 | } |
260 | |
261 | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
262 | /// always be accessible even if no aggregate location is provided. |
263 | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
264 | AggValueSlot AggSlot = AggValueSlot::ignored(); |
265 | |
266 | if (hasAggregateEvaluationKind(T: E->getType())) |
267 | AggSlot = CreateAggTemp(T: E->getType(), Name: "agg.tmp" ); |
268 | return EmitAnyExpr(E, aggSlot: AggSlot); |
269 | } |
270 | |
271 | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
272 | /// location. |
273 | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
274 | Address Location, |
275 | Qualifiers Quals, |
276 | bool IsInit) { |
277 | // FIXME: This function should take an LValue as an argument. |
278 | switch (getEvaluationKind(T: E->getType())) { |
279 | case TEK_Complex: |
280 | EmitComplexExprIntoLValue(E, dest: MakeAddrLValue(Addr: Location, T: E->getType()), |
281 | /*isInit*/ false); |
282 | return; |
283 | |
284 | case TEK_Aggregate: { |
285 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Location, quals: Quals, |
286 | isDestructed: AggValueSlot::IsDestructed_t(IsInit), |
287 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
288 | isAliased: AggValueSlot::IsAliased_t(!IsInit), |
289 | mayOverlap: AggValueSlot::MayOverlap)); |
290 | return; |
291 | } |
292 | |
293 | case TEK_Scalar: { |
294 | RValue RV = RValue::get(V: EmitScalarExpr(E, /*Ignore*/ IgnoreResultAssign: false)); |
295 | LValue LV = MakeAddrLValue(Addr: Location, T: E->getType()); |
296 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
297 | return; |
298 | } |
299 | } |
300 | llvm_unreachable("bad evaluation kind" ); |
301 | } |
302 | |
303 | void CodeGenFunction::EmitInitializationToLValue( |
304 | const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) { |
305 | QualType Type = LV.getType(); |
306 | switch (getEvaluationKind(T: Type)) { |
307 | case TEK_Complex: |
308 | EmitComplexExprIntoLValue(E, dest: LV, /*isInit*/ true); |
309 | return; |
310 | case TEK_Aggregate: |
311 | EmitAggExpr(E, AS: AggValueSlot::forLValue(LV, isDestructed: AggValueSlot::IsDestructed, |
312 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
313 | isAliased: AggValueSlot::IsNotAliased, |
314 | mayOverlap: AggValueSlot::MayOverlap, isZeroed: IsZeroed)); |
315 | return; |
316 | case TEK_Scalar: |
317 | if (LV.isSimple()) |
318 | EmitScalarInit(init: E, /*D=*/nullptr, lvalue: LV, /*Captured=*/capturedByInit: false); |
319 | else |
320 | EmitStoreThroughLValue(Src: RValue::get(V: EmitScalarExpr(E)), Dst: LV); |
321 | return; |
322 | } |
323 | llvm_unreachable("bad evaluation kind" ); |
324 | } |
325 | |
326 | static void |
327 | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
328 | const Expr *E, Address ReferenceTemporary) { |
329 | // Objective-C++ ARC: |
330 | // If we are binding a reference to a temporary that has ownership, we |
331 | // need to perform retain/release operations on the temporary. |
332 | // |
333 | // FIXME: This should be looking at E, not M. |
334 | if (auto Lifetime = M->getType().getObjCLifetime()) { |
335 | switch (Lifetime) { |
336 | case Qualifiers::OCL_None: |
337 | case Qualifiers::OCL_ExplicitNone: |
338 | // Carry on to normal cleanup handling. |
339 | break; |
340 | |
341 | case Qualifiers::OCL_Autoreleasing: |
342 | // Nothing to do; cleaned up by an autorelease pool. |
343 | return; |
344 | |
345 | case Qualifiers::OCL_Strong: |
346 | case Qualifiers::OCL_Weak: |
347 | switch (StorageDuration Duration = M->getStorageDuration()) { |
348 | case SD_Static: |
349 | // Note: we intentionally do not register a cleanup to release |
350 | // the object on program termination. |
351 | return; |
352 | |
353 | case SD_Thread: |
354 | // FIXME: We should probably register a cleanup in this case. |
355 | return; |
356 | |
357 | case SD_Automatic: |
358 | case SD_FullExpression: |
359 | CodeGenFunction::Destroyer *Destroy; |
360 | CleanupKind CleanupKind; |
361 | if (Lifetime == Qualifiers::OCL_Strong) { |
362 | const ValueDecl *VD = M->getExtendingDecl(); |
363 | bool Precise = isa_and_nonnull<VarDecl>(Val: VD) && |
364 | VD->hasAttr<ObjCPreciseLifetimeAttr>(); |
365 | CleanupKind = CGF.getARCCleanupKind(); |
366 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
367 | : &CodeGenFunction::destroyARCStrongImprecise; |
368 | } else { |
369 | // __weak objects always get EH cleanups; otherwise, exceptions |
370 | // could cause really nasty crashes instead of mere leaks. |
371 | CleanupKind = NormalAndEHCleanup; |
372 | Destroy = &CodeGenFunction::destroyARCWeak; |
373 | } |
374 | if (Duration == SD_FullExpression) |
375 | CGF.pushDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
376 | type: M->getType(), destroyer: *Destroy, |
377 | useEHCleanupForArray: CleanupKind & EHCleanup); |
378 | else |
379 | CGF.pushLifetimeExtendedDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
380 | type: M->getType(), |
381 | destroyer: *Destroy, useEHCleanupForArray: CleanupKind & EHCleanup); |
382 | return; |
383 | |
384 | case SD_Dynamic: |
385 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
386 | } |
387 | llvm_unreachable("unknown storage duration" ); |
388 | } |
389 | } |
390 | |
391 | QualType::DestructionKind DK = E->getType().isDestructedType(); |
392 | if (DK != QualType::DK_none) { |
393 | switch (M->getStorageDuration()) { |
394 | case SD_Static: |
395 | case SD_Thread: { |
396 | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
397 | if (const RecordType *RT = |
398 | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
399 | // Get the destructor for the reference temporary. |
400 | if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RT->getDecl()); |
401 | ClassDecl && !ClassDecl->hasTrivialDestructor()) |
402 | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
403 | } |
404 | |
405 | if (!ReferenceTemporaryDtor) |
406 | return; |
407 | |
408 | llvm::FunctionCallee CleanupFn; |
409 | llvm::Constant *CleanupArg; |
410 | if (E->getType()->isArrayType()) { |
411 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
412 | addr: ReferenceTemporary, type: E->getType(), destroyer: CodeGenFunction::destroyCXXObject, |
413 | useEHCleanupForArray: CGF.getLangOpts().Exceptions, |
414 | VD: dyn_cast_or_null<VarDecl>(Val: M->getExtendingDecl())); |
415 | CleanupArg = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy); |
416 | } else { |
417 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
418 | GD: GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
419 | CleanupArg = |
420 | cast<llvm::Constant>(Val: ReferenceTemporary.emitRawPointer(CGF)); |
421 | } |
422 | CGF.CGM.getCXXABI().registerGlobalDtor( |
423 | CGF, D: *cast<VarDecl>(Val: M->getExtendingDecl()), Dtor: CleanupFn, Addr: CleanupArg); |
424 | } break; |
425 | case SD_FullExpression: |
426 | CGF.pushDestroy(dtorKind: DK, addr: ReferenceTemporary, type: E->getType()); |
427 | break; |
428 | case SD_Automatic: |
429 | CGF.pushLifetimeExtendedDestroy(dtorKind: DK, addr: ReferenceTemporary, type: E->getType()); |
430 | break; |
431 | case SD_Dynamic: |
432 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
433 | } |
434 | } |
435 | } |
436 | |
437 | static RawAddress createReferenceTemporary(CodeGenFunction &CGF, |
438 | const MaterializeTemporaryExpr *M, |
439 | const Expr *Inner, |
440 | RawAddress *Alloca = nullptr) { |
441 | auto &TCG = CGF.getTargetHooks(); |
442 | switch (M->getStorageDuration()) { |
443 | case SD_FullExpression: |
444 | case SD_Automatic: { |
445 | // If we have a constant temporary array or record try to promote it into a |
446 | // constant global under the same rules a normal constant would've been |
447 | // promoted. This is easier on the optimizer and generally emits fewer |
448 | // instructions. |
449 | QualType Ty = Inner->getType(); |
450 | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
451 | (Ty->isArrayType() || Ty->isRecordType()) && |
452 | Ty.isConstantStorage(Ctx: CGF.getContext(), ExcludeCtor: true, ExcludeDtor: false)) |
453 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(E: Inner, T: Ty)) { |
454 | auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); |
455 | auto *GV = new llvm::GlobalVariable( |
456 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
457 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp" , nullptr, |
458 | llvm::GlobalValue::NotThreadLocal, |
459 | CGF.getContext().getTargetAddressSpace(AS)); |
460 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(T: Ty); |
461 | GV->setAlignment(alignment.getAsAlign()); |
462 | llvm::Constant *C = GV; |
463 | if (AS != LangAS::Default) |
464 | C = TCG.performAddrSpaceCast( |
465 | CGM&: CGF.CGM, V: GV, SrcAddr: AS, |
466 | DestTy: llvm::PointerType::get( |
467 | C&: CGF.getLLVMContext(), |
468 | AddressSpace: CGF.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
469 | // FIXME: Should we put the new global into a COMDAT? |
470 | return RawAddress(C, GV->getValueType(), alignment); |
471 | } |
472 | return CGF.CreateMemTemp(Ty, Name: "ref.tmp" , Alloca); |
473 | } |
474 | case SD_Thread: |
475 | case SD_Static: |
476 | return CGF.CGM.GetAddrOfGlobalTemporary(E: M, Inner); |
477 | |
478 | case SD_Dynamic: |
479 | llvm_unreachable("temporary can't have dynamic storage duration" ); |
480 | } |
481 | llvm_unreachable("unknown storage duration" ); |
482 | } |
483 | |
484 | /// Helper method to check if the underlying ABI is AAPCS |
485 | static bool isAAPCS(const TargetInfo &TargetInfo) { |
486 | return TargetInfo.getABI().starts_with(Prefix: "aapcs" ); |
487 | } |
488 | |
489 | LValue CodeGenFunction:: |
490 | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
491 | const Expr *E = M->getSubExpr(); |
492 | |
493 | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
494 | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
495 | "Reference should never be pseudo-strong!" ); |
496 | |
497 | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
498 | // as that will cause the lifetime adjustment to be lost for ARC |
499 | auto ownership = M->getType().getObjCLifetime(); |
500 | if (ownership != Qualifiers::OCL_None && |
501 | ownership != Qualifiers::OCL_ExplicitNone) { |
502 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E); |
503 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: Object.getPointer())) { |
504 | llvm::Type *Ty = ConvertTypeForMem(T: E->getType()); |
505 | Object = Object.withElementType(ElemTy: Ty); |
506 | |
507 | // createReferenceTemporary will promote the temporary to a global with a |
508 | // constant initializer if it can. It can only do this to a value of |
509 | // ARC-manageable type if the value is global and therefore "immune" to |
510 | // ref-counting operations. Therefore we have no need to emit either a |
511 | // dynamic initialization or a cleanup and we can just return the address |
512 | // of the temporary. |
513 | if (Var->hasInitializer()) |
514 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
515 | |
516 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
517 | } |
518 | LValue RefTempDst = MakeAddrLValue(Addr: Object, T: M->getType(), |
519 | Source: AlignmentSource::Decl); |
520 | |
521 | switch (getEvaluationKind(T: E->getType())) { |
522 | default: llvm_unreachable("expected scalar or aggregate expression" ); |
523 | case TEK_Scalar: |
524 | EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: RefTempDst, capturedByInit: false); |
525 | break; |
526 | case TEK_Aggregate: { |
527 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Object, |
528 | quals: E->getType().getQualifiers(), |
529 | isDestructed: AggValueSlot::IsDestructed, |
530 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
531 | isAliased: AggValueSlot::IsNotAliased, |
532 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
533 | break; |
534 | } |
535 | } |
536 | |
537 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
538 | return RefTempDst; |
539 | } |
540 | |
541 | SmallVector<const Expr *, 2> CommaLHSs; |
542 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
543 | E = E->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments); |
544 | |
545 | for (const auto &Ignored : CommaLHSs) |
546 | EmitIgnoredExpr(E: Ignored); |
547 | |
548 | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(Val: E)) { |
549 | if (opaque->getType()->isRecordType()) { |
550 | assert(Adjustments.empty()); |
551 | return EmitOpaqueValueLValue(e: opaque); |
552 | } |
553 | } |
554 | |
555 | // Create and initialize the reference temporary. |
556 | RawAddress Alloca = Address::invalid(); |
557 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E, Alloca: &Alloca); |
558 | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
559 | Val: Object.getPointer()->stripPointerCasts())) { |
560 | llvm::Type *TemporaryType = ConvertTypeForMem(T: E->getType()); |
561 | Object = Object.withElementType(ElemTy: TemporaryType); |
562 | // If the temporary is a global and has a constant initializer or is a |
563 | // constant temporary that we promoted to a global, we may have already |
564 | // initialized it. |
565 | if (!Var->hasInitializer()) { |
566 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
567 | QualType RefType = M->getType().withoutLocalFastQualifiers(); |
568 | if (RefType.getPointerAuth()) { |
569 | // Use the qualifier of the reference temporary to sign the pointer. |
570 | LValue LV = MakeRawAddrLValue(V: Object.getPointer(), T: RefType, |
571 | Alignment: Object.getAlignment()); |
572 | EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: LV, capturedByInit: false); |
573 | } else { |
574 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/ true); |
575 | } |
576 | } |
577 | } else { |
578 | switch (M->getStorageDuration()) { |
579 | case SD_Automatic: |
580 | if (auto *Size = EmitLifetimeStart( |
581 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
582 | Addr: Alloca.getPointer())) { |
583 | pushCleanupAfterFullExpr<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker, |
584 | A: Alloca, A: Size); |
585 | } |
586 | break; |
587 | |
588 | case SD_FullExpression: { |
589 | if (!ShouldEmitLifetimeMarkers) |
590 | break; |
591 | |
592 | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
593 | // marker. Instead, start the lifetime of a conditional temporary earlier |
594 | // so that it's unconditional. Don't do this with sanitizers which need |
595 | // more precise lifetime marks. However when inside an "await.suspend" |
596 | // block, we should always avoid conditional cleanup because it creates |
597 | // boolean marker that lives across await_suspend, which can destroy coro |
598 | // frame. |
599 | ConditionalEvaluation *OldConditional = nullptr; |
600 | CGBuilderTy::InsertPoint OldIP; |
601 | if (isInConditionalBranch() && !E->getType().isDestructedType() && |
602 | ((!SanOpts.has(K: SanitizerKind::HWAddress) && |
603 | !SanOpts.has(K: SanitizerKind::Memory) && |
604 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || |
605 | inSuspendBlock())) { |
606 | OldConditional = OutermostConditional; |
607 | OutermostConditional = nullptr; |
608 | |
609 | OldIP = Builder.saveIP(); |
610 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
611 | Builder.restoreIP(IP: CGBuilderTy::InsertPoint( |
612 | Block, llvm::BasicBlock::iterator(Block->back()))); |
613 | } |
614 | |
615 | if (auto *Size = EmitLifetimeStart( |
616 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
617 | Addr: Alloca.getPointer())) { |
618 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: Alloca, |
619 | A: Size); |
620 | } |
621 | |
622 | if (OldConditional) { |
623 | OutermostConditional = OldConditional; |
624 | Builder.restoreIP(IP: OldIP); |
625 | } |
626 | break; |
627 | } |
628 | |
629 | default: |
630 | break; |
631 | } |
632 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
633 | } |
634 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
635 | |
636 | // Perform derived-to-base casts and/or field accesses, to get from the |
637 | // temporary object we created (and, potentially, for which we extended |
638 | // the lifetime) to the subobject we're binding the reference to. |
639 | for (SubobjectAdjustment &Adjustment : llvm::reverse(C&: Adjustments)) { |
640 | switch (Adjustment.Kind) { |
641 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
642 | Object = |
643 | GetAddressOfBaseClass(Value: Object, Derived: Adjustment.DerivedToBase.DerivedClass, |
644 | PathBegin: Adjustment.DerivedToBase.BasePath->path_begin(), |
645 | PathEnd: Adjustment.DerivedToBase.BasePath->path_end(), |
646 | /*NullCheckValue=*/ false, Loc: E->getExprLoc()); |
647 | break; |
648 | |
649 | case SubobjectAdjustment::FieldAdjustment: { |
650 | LValue LV = MakeAddrLValue(Addr: Object, T: E->getType(), Source: AlignmentSource::Decl); |
651 | LV = EmitLValueForField(Base: LV, Field: Adjustment.Field); |
652 | assert(LV.isSimple() && |
653 | "materialized temporary field is not a simple lvalue" ); |
654 | Object = LV.getAddress(); |
655 | break; |
656 | } |
657 | |
658 | case SubobjectAdjustment::MemberPointerAdjustment: { |
659 | llvm::Value *Ptr = EmitScalarExpr(E: Adjustment.Ptr.RHS); |
660 | Object = EmitCXXMemberDataPointerAddress( |
661 | E, base: Object, memberPtr: Ptr, memberPtrType: Adjustment.Ptr.MPT, /*IsInBounds=*/true); |
662 | break; |
663 | } |
664 | } |
665 | } |
666 | |
667 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
668 | } |
669 | |
670 | RValue |
671 | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
672 | // Emit the expression as an lvalue. |
673 | LValue LV = EmitLValue(E); |
674 | assert(LV.isSimple()); |
675 | llvm::Value *Value = LV.getPointer(CGF&: *this); |
676 | |
677 | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { |
678 | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
679 | // If a glvalue to which a reference is directly bound designates neither |
680 | // an existing object or function of an appropriate type nor a region of |
681 | // storage of suitable size and alignment to contain an object of the |
682 | // reference's type, the behavior is undefined. |
683 | QualType Ty = E->getType(); |
684 | EmitTypeCheck(TCK: TCK_ReferenceBinding, Loc: E->getExprLoc(), V: Value, Type: Ty); |
685 | } |
686 | |
687 | return RValue::get(V: Value); |
688 | } |
689 | |
690 | |
691 | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
692 | /// input field number being accessed. |
693 | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
694 | const llvm::Constant *Elts) { |
695 | return cast<llvm::ConstantInt>(Val: Elts->getAggregateElement(Elt: Idx)) |
696 | ->getZExtValue(); |
697 | } |
698 | |
699 | static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, |
700 | llvm::Value *Ptr) { |
701 | llvm::Value *A0 = |
702 | Builder.CreateMul(LHS: Ptr, RHS: Builder.getInt64(C: 0xbf58476d1ce4e5b9u)); |
703 | llvm::Value *A1 = |
704 | Builder.CreateXor(LHS: A0, RHS: Builder.CreateLShr(LHS: A0, RHS: Builder.getInt64(C: 31))); |
705 | return Builder.CreateXor(LHS: Acc, RHS: A1); |
706 | } |
707 | |
708 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
709 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || |
710 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; |
711 | } |
712 | |
713 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
714 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
715 | return (RD && RD->hasDefinition() && RD->isDynamicClass()) && |
716 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || |
717 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || |
718 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); |
719 | } |
720 | |
721 | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
722 | return SanOpts.has(K: SanitizerKind::Null) || |
723 | SanOpts.has(K: SanitizerKind::Alignment) || |
724 | SanOpts.has(K: SanitizerKind::ObjectSize) || |
725 | SanOpts.has(K: SanitizerKind::Vptr); |
726 | } |
727 | |
728 | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
729 | llvm::Value *Ptr, QualType Ty, |
730 | CharUnits Alignment, |
731 | SanitizerSet SkippedChecks, |
732 | llvm::Value *ArraySize) { |
733 | if (!sanitizePerformTypeCheck()) |
734 | return; |
735 | |
736 | // Don't check pointers outside the default address space. The null check |
737 | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
738 | // communicate the addresses to the runtime handler for the vptr check. |
739 | if (Ptr->getType()->getPointerAddressSpace()) |
740 | return; |
741 | |
742 | // Don't check pointers to volatile data. The behavior here is implementation- |
743 | // defined. |
744 | if (Ty.isVolatileQualified()) |
745 | return; |
746 | |
747 | // Quickly determine whether we have a pointer to an alloca. It's possible |
748 | // to skip null checks, and some alignment checks, for these pointers. This |
749 | // can reduce compile-time significantly. |
750 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Val: Ptr->stripPointerCasts()); |
751 | |
752 | llvm::Value *IsNonNull = nullptr; |
753 | bool IsGuaranteedNonNull = |
754 | SkippedChecks.has(K: SanitizerKind::Null) || PtrToAlloca; |
755 | |
756 | llvm::BasicBlock *Done = nullptr; |
757 | bool DoneViaNullSanitize = false; |
758 | |
759 | { |
760 | auto CheckHandler = SanitizerHandler::TypeMismatch; |
761 | SanitizerDebugLocation SanScope(this, |
762 | {SanitizerKind::SO_Null, |
763 | SanitizerKind::SO_ObjectSize, |
764 | SanitizerKind::SO_Alignment}, |
765 | CheckHandler); |
766 | |
767 | SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 3> |
768 | Checks; |
769 | |
770 | llvm::Value *True = llvm::ConstantInt::getTrue(Context&: getLLVMContext()); |
771 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
772 | if ((SanOpts.has(K: SanitizerKind::Null) || AllowNullPointers) && |
773 | !IsGuaranteedNonNull) { |
774 | // The glvalue must not be an empty glvalue. |
775 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
776 | |
777 | // The IR builder can constant-fold the null check if the pointer points |
778 | // to a constant. |
779 | IsGuaranteedNonNull = IsNonNull == True; |
780 | |
781 | // Skip the null check if the pointer is known to be non-null. |
782 | if (!IsGuaranteedNonNull) { |
783 | if (AllowNullPointers) { |
784 | // When performing pointer casts, it's OK if the value is null. |
785 | // Skip the remaining checks in that case. |
786 | Done = createBasicBlock(name: "null" ); |
787 | DoneViaNullSanitize = true; |
788 | llvm::BasicBlock *Rest = createBasicBlock(name: "not.null" ); |
789 | Builder.CreateCondBr(Cond: IsNonNull, True: Rest, False: Done); |
790 | EmitBlock(BB: Rest); |
791 | } else { |
792 | Checks.push_back(Elt: std::make_pair(x&: IsNonNull, y: SanitizerKind::SO_Null)); |
793 | } |
794 | } |
795 | } |
796 | |
797 | if (SanOpts.has(K: SanitizerKind::ObjectSize) && |
798 | !SkippedChecks.has(K: SanitizerKind::ObjectSize) && |
799 | !Ty->isIncompleteType()) { |
800 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
801 | llvm::Value *Size = llvm::ConstantInt::get(Ty: IntPtrTy, V: TySize); |
802 | if (ArraySize) |
803 | Size = Builder.CreateMul(LHS: Size, RHS: ArraySize); |
804 | |
805 | // Degenerate case: new X[0] does not need an objectsize check. |
806 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Val: Size); |
807 | if (!ConstantSize || !ConstantSize->isNullValue()) { |
808 | // The glvalue must refer to a large enough storage region. |
809 | // FIXME: If Address Sanitizer is enabled, insert dynamic |
810 | // instrumentation |
811 | // to check this. |
812 | // FIXME: Get object address space |
813 | llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy}; |
814 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::objectsize, Tys); |
815 | llvm::Value *Min = Builder.getFalse(); |
816 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
817 | llvm::Value *Dynamic = Builder.getFalse(); |
818 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
819 | LHS: Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}), RHS: Size); |
820 | Checks.push_back( |
821 | Elt: std::make_pair(x&: LargeEnough, y: SanitizerKind::SO_ObjectSize)); |
822 | } |
823 | } |
824 | |
825 | llvm::MaybeAlign AlignVal; |
826 | llvm::Value *PtrAsInt = nullptr; |
827 | |
828 | if (SanOpts.has(K: SanitizerKind::Alignment) && |
829 | !SkippedChecks.has(K: SanitizerKind::Alignment)) { |
830 | AlignVal = Alignment.getAsMaybeAlign(); |
831 | if (!Ty->isIncompleteType() && !AlignVal) |
832 | AlignVal = CGM.getNaturalTypeAlignment(T: Ty, BaseInfo: nullptr, TBAAInfo: nullptr, |
833 | /*ForPointeeType=*/forPointeeType: true) |
834 | .getAsMaybeAlign(); |
835 | |
836 | // The glvalue must be suitably aligned. |
837 | if (AlignVal && *AlignVal > llvm::Align(1) && |
838 | (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { |
839 | PtrAsInt = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy); |
840 | llvm::Value *Align = Builder.CreateAnd( |
841 | LHS: PtrAsInt, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: AlignVal->value() - 1)); |
842 | llvm::Value *Aligned = |
843 | Builder.CreateICmpEQ(LHS: Align, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 0)); |
844 | if (Aligned != True) |
845 | Checks.push_back( |
846 | Elt: std::make_pair(x&: Aligned, y: SanitizerKind::SO_Alignment)); |
847 | } |
848 | } |
849 | |
850 | if (Checks.size() > 0) { |
851 | llvm::Constant *StaticData[] = { |
852 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: Ty), |
853 | llvm::ConstantInt::get(Ty: Int8Ty, V: AlignVal ? llvm::Log2(A: *AlignVal) : 1), |
854 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)}; |
855 | EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs: StaticData, DynamicArgs: PtrAsInt ? PtrAsInt : Ptr); |
856 | } |
857 | } |
858 | |
859 | // If possible, check that the vptr indicates that there is a subobject of |
860 | // type Ty at offset zero within this object. |
861 | // |
862 | // C++11 [basic.life]p5,6: |
863 | // [For storage which does not refer to an object within its lifetime] |
864 | // The program has undefined behavior if: |
865 | // -- the [pointer or glvalue] is used to access a non-static data member |
866 | // or call a non-static member function |
867 | if (SanOpts.has(K: SanitizerKind::Vptr) && |
868 | !SkippedChecks.has(K: SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
869 | SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr}, |
870 | SanitizerHandler::DynamicTypeCacheMiss); |
871 | |
872 | // Ensure that the pointer is non-null before loading it. If there is no |
873 | // compile-time guarantee, reuse the run-time null check or emit a new one. |
874 | if (!IsGuaranteedNonNull) { |
875 | if (!IsNonNull) |
876 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
877 | if (!Done) |
878 | Done = createBasicBlock(name: "vptr.null" ); |
879 | llvm::BasicBlock *VptrNotNull = createBasicBlock(name: "vptr.not.null" ); |
880 | Builder.CreateCondBr(Cond: IsNonNull, True: VptrNotNull, False: Done); |
881 | EmitBlock(BB: VptrNotNull); |
882 | } |
883 | |
884 | // Compute a deterministic hash of the mangled name of the type. |
885 | SmallString<64> MangledName; |
886 | llvm::raw_svector_ostream Out(MangledName); |
887 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty.getUnqualifiedType(), |
888 | Out); |
889 | |
890 | // Contained in NoSanitizeList based on the mangled type. |
891 | if (!CGM.getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::Vptr, |
892 | MangledTypeName: Out.str())) { |
893 | // Load the vptr, and mix it with TypeHash. |
894 | llvm::Value *TypeHash = |
895 | llvm::ConstantInt::get(Ty: Int64Ty, V: xxh3_64bits(data: Out.str())); |
896 | |
897 | llvm::Type *VPtrTy = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: 0); |
898 | Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); |
899 | llvm::Value *VPtrVal = GetVTablePtr(This: VPtrAddr, VTableTy: VPtrTy, |
900 | VTableClass: Ty->getAsCXXRecordDecl(), |
901 | AuthMode: VTableAuthMode::UnsafeUbsanStrip); |
902 | VPtrVal = Builder.CreateBitOrPointerCast(V: VPtrVal, DestTy: IntPtrTy); |
903 | |
904 | llvm::Value *Hash = |
905 | emitHashMix(Builder, Acc: TypeHash, Ptr: Builder.CreateZExt(V: VPtrVal, DestTy: Int64Ty)); |
906 | Hash = Builder.CreateTrunc(V: Hash, DestTy: IntPtrTy); |
907 | |
908 | // Look the hash up in our cache. |
909 | const int CacheSize = 128; |
910 | llvm::Type *HashTable = llvm::ArrayType::get(ElementType: IntPtrTy, NumElements: CacheSize); |
911 | llvm::Value *Cache = CGM.CreateRuntimeVariable(Ty: HashTable, |
912 | Name: "__ubsan_vptr_type_cache" ); |
913 | llvm::Value *Slot = Builder.CreateAnd(LHS: Hash, |
914 | RHS: llvm::ConstantInt::get(Ty: IntPtrTy, |
915 | V: CacheSize-1)); |
916 | llvm::Value *Indices[] = { Builder.getInt32(C: 0), Slot }; |
917 | llvm::Value *CacheVal = Builder.CreateAlignedLoad( |
918 | Ty: IntPtrTy, Addr: Builder.CreateInBoundsGEP(Ty: HashTable, Ptr: Cache, IdxList: Indices), |
919 | Align: getPointerAlign()); |
920 | |
921 | // If the hash isn't in the cache, call a runtime handler to perform the |
922 | // hard work of checking whether the vptr is for an object of the right |
923 | // type. This will either fill in the cache and return, or produce a |
924 | // diagnostic. |
925 | llvm::Value *EqualHash = Builder.CreateICmpEQ(LHS: CacheVal, RHS: Hash); |
926 | llvm::Constant *StaticData[] = { |
927 | EmitCheckSourceLocation(Loc), |
928 | EmitCheckTypeDescriptor(T: Ty), |
929 | CGM.GetAddrOfRTTIDescriptor(Ty: Ty.getUnqualifiedType()), |
930 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK) |
931 | }; |
932 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
933 | EmitCheck(Checked: std::make_pair(x&: EqualHash, y: SanitizerKind::SO_Vptr), |
934 | Check: SanitizerHandler::DynamicTypeCacheMiss, StaticArgs: StaticData, |
935 | DynamicArgs: DynamicData); |
936 | } |
937 | } |
938 | |
939 | if (Done) { |
940 | SanitizerDebugLocation SanScope( |
941 | this, |
942 | {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr}, |
943 | DoneViaNullSanitize ? SanitizerHandler::TypeMismatch |
944 | : SanitizerHandler::DynamicTypeCacheMiss); |
945 | Builder.CreateBr(Dest: Done); |
946 | EmitBlock(BB: Done); |
947 | } |
948 | } |
949 | |
950 | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
951 | QualType EltTy) { |
952 | ASTContext &C = getContext(); |
953 | uint64_t EltSize = C.getTypeSizeInChars(T: EltTy).getQuantity(); |
954 | if (!EltSize) |
955 | return nullptr; |
956 | |
957 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()); |
958 | if (!ArrayDeclRef) |
959 | return nullptr; |
960 | |
961 | auto *ParamDecl = dyn_cast<ParmVarDecl>(Val: ArrayDeclRef->getDecl()); |
962 | if (!ParamDecl) |
963 | return nullptr; |
964 | |
965 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
966 | if (!POSAttr) |
967 | return nullptr; |
968 | |
969 | // Don't load the size if it's a lower bound. |
970 | int POSType = POSAttr->getType(); |
971 | if (POSType != 0 && POSType != 1) |
972 | return nullptr; |
973 | |
974 | // Find the implicit size parameter. |
975 | auto PassedSizeIt = SizeArguments.find(Val: ParamDecl); |
976 | if (PassedSizeIt == SizeArguments.end()) |
977 | return nullptr; |
978 | |
979 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
980 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable" ); |
981 | Address AddrOfSize = LocalDeclMap.find(Val: PassedSizeDecl)->second; |
982 | llvm::Value *SizeInBytes = EmitLoadOfScalar(Addr: AddrOfSize, /*Volatile=*/false, |
983 | Ty: C.getSizeType(), Loc: E->getExprLoc()); |
984 | llvm::Value *SizeOfElement = |
985 | llvm::ConstantInt::get(Ty: SizeInBytes->getType(), V: EltSize); |
986 | return Builder.CreateUDiv(LHS: SizeInBytes, RHS: SizeOfElement); |
987 | } |
988 | |
989 | /// If Base is known to point to the start of an array, return the length of |
990 | /// that array. Return 0 if the length cannot be determined. |
991 | static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, |
992 | const Expr *Base, |
993 | QualType &IndexedType, |
994 | LangOptions::StrictFlexArraysLevelKind |
995 | StrictFlexArraysLevel) { |
996 | // For the vector indexing extension, the bound is the number of elements. |
997 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
998 | IndexedType = Base->getType(); |
999 | return CGF.Builder.getInt32(C: VT->getNumElements()); |
1000 | } |
1001 | |
1002 | Base = Base->IgnoreParens(); |
1003 | |
1004 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
1005 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
1006 | !CE->getSubExpr()->isFlexibleArrayMemberLike(Context: CGF.getContext(), |
1007 | StrictFlexArraysLevel)) { |
1008 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
1009 | |
1010 | IndexedType = CE->getSubExpr()->getType(); |
1011 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
1012 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
1013 | return CGF.Builder.getInt(AI: CAT->getSize()); |
1014 | |
1015 | if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) |
1016 | return CGF.getVLASize(vla: VAT).NumElts; |
1017 | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
1018 | } |
1019 | } |
1020 | |
1021 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
1022 | |
1023 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
1024 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(E: Base, EltTy)) { |
1025 | IndexedType = Base->getType(); |
1026 | return POS; |
1027 | } |
1028 | |
1029 | return nullptr; |
1030 | } |
1031 | |
1032 | namespace { |
1033 | |
1034 | /// \p StructAccessBase returns the base \p Expr of a field access. It returns |
1035 | /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: |
1036 | /// |
1037 | /// p in p-> a.b.c |
1038 | /// |
1039 | /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're |
1040 | /// looking for: |
1041 | /// |
1042 | /// struct s { |
1043 | /// struct s *ptr; |
1044 | /// int count; |
1045 | /// char array[] __attribute__((counted_by(count))); |
1046 | /// }; |
1047 | /// |
1048 | /// If we have an expression like \p p->ptr->array[index], we want the |
1049 | /// \p MemberExpr for \p p->ptr instead of \p p. |
1050 | class StructAccessBase |
1051 | : public ConstStmtVisitor<StructAccessBase, const Expr *> { |
1052 | const RecordDecl *ExpectedRD; |
1053 | |
1054 | bool IsExpectedRecordDecl(const Expr *E) const { |
1055 | QualType Ty = E->getType(); |
1056 | if (Ty->isPointerType()) |
1057 | Ty = Ty->getPointeeType(); |
1058 | return ExpectedRD == Ty->getAsRecordDecl(); |
1059 | } |
1060 | |
1061 | public: |
1062 | StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} |
1063 | |
1064 | //===--------------------------------------------------------------------===// |
1065 | // Visitor Methods |
1066 | //===--------------------------------------------------------------------===// |
1067 | |
1068 | // NOTE: If we build C++ support for counted_by, then we'll have to handle |
1069 | // horrors like this: |
1070 | // |
1071 | // struct S { |
1072 | // int x, y; |
1073 | // int blah[] __attribute__((counted_by(x))); |
1074 | // } s; |
1075 | // |
1076 | // int foo(int index, int val) { |
1077 | // int (S::*IHatePMDs)[] = &S::blah; |
1078 | // (s.*IHatePMDs)[index] = val; |
1079 | // } |
1080 | |
1081 | const Expr *Visit(const Expr *E) { |
1082 | return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(S: E); |
1083 | } |
1084 | |
1085 | const Expr *VisitStmt(const Stmt *S) { return nullptr; } |
1086 | |
1087 | // These are the types we expect to return (in order of most to least |
1088 | // likely): |
1089 | // |
1090 | // 1. DeclRefExpr - This is the expression for the base of the structure. |
1091 | // It's exactly what we want to build an access to the \p counted_by |
1092 | // field. |
1093 | // 2. MemberExpr - This is the expression that has the same \p RecordDecl |
1094 | // as the flexble array member's lexical enclosing \p RecordDecl. This |
1095 | // allows us to catch things like: "p->p->array" |
1096 | // 3. CompoundLiteralExpr - This is for people who create something |
1097 | // heretical like (struct foo has a flexible array member): |
1098 | // |
1099 | // (struct foo){ 1, 2 }.blah[idx]; |
1100 | const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { |
1101 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1102 | } |
1103 | const Expr *VisitMemberExpr(const MemberExpr *E) { |
1104 | if (IsExpectedRecordDecl(E) && E->isArrow()) |
1105 | return E; |
1106 | const Expr *Res = Visit(E: E->getBase()); |
1107 | return !Res && IsExpectedRecordDecl(E) ? E : Res; |
1108 | } |
1109 | const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
1110 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1111 | } |
1112 | const Expr *VisitCallExpr(const CallExpr *E) { |
1113 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1114 | } |
1115 | |
1116 | const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
1117 | if (IsExpectedRecordDecl(E)) |
1118 | return E; |
1119 | return Visit(E: E->getBase()); |
1120 | } |
1121 | const Expr *VisitCastExpr(const CastExpr *E) { |
1122 | if (E->getCastKind() == CK_LValueToRValue) |
1123 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1124 | return Visit(E: E->getSubExpr()); |
1125 | } |
1126 | const Expr *VisitParenExpr(const ParenExpr *E) { |
1127 | return Visit(E: E->getSubExpr()); |
1128 | } |
1129 | const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { |
1130 | return Visit(E: E->getSubExpr()); |
1131 | } |
1132 | const Expr *VisitUnaryDeref(const UnaryOperator *E) { |
1133 | return Visit(E: E->getSubExpr()); |
1134 | } |
1135 | }; |
1136 | |
1137 | } // end anonymous namespace |
1138 | |
1139 | using RecIndicesTy = SmallVector<llvm::Value *, 8>; |
1140 | |
1141 | static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, |
1142 | const FieldDecl *Field, |
1143 | RecIndicesTy &Indices) { |
1144 | const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); |
1145 | int64_t FieldNo = -1; |
1146 | for (const FieldDecl *FD : RD->fields()) { |
1147 | if (!Layout.containsFieldDecl(FD)) |
1148 | // This could happen if the field has a struct type that's empty. I don't |
1149 | // know why either. |
1150 | continue; |
1151 | |
1152 | FieldNo = Layout.getLLVMFieldNo(FD); |
1153 | if (FD == Field) { |
1154 | Indices.emplace_back(Args: CGF.Builder.getInt32(C: FieldNo)); |
1155 | return true; |
1156 | } |
1157 | |
1158 | QualType Ty = FD->getType(); |
1159 | if (Ty->isRecordType()) { |
1160 | if (getGEPIndicesToField(CGF, RD: Ty->getAsRecordDecl(), Field, Indices)) { |
1161 | if (RD->isUnion()) |
1162 | FieldNo = 0; |
1163 | Indices.emplace_back(Args: CGF.Builder.getInt32(C: FieldNo)); |
1164 | return true; |
1165 | } |
1166 | } |
1167 | } |
1168 | |
1169 | return false; |
1170 | } |
1171 | |
1172 | llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP( |
1173 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1174 | const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); |
1175 | |
1176 | // Find the base struct expr (i.e. p in p->a.b.c.d). |
1177 | const Expr *StructBase = StructAccessBase(RD).Visit(E: Base); |
1178 | if (!StructBase || StructBase->HasSideEffects(Ctx: getContext())) |
1179 | return nullptr; |
1180 | |
1181 | llvm::Value *Res = nullptr; |
1182 | if (StructBase->getType()->isPointerType()) { |
1183 | LValueBaseInfo BaseInfo; |
1184 | TBAAAccessInfo TBAAInfo; |
1185 | Address Addr = EmitPointerWithAlignment(Addr: StructBase, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
1186 | Res = Addr.emitRawPointer(CGF&: *this); |
1187 | } else if (StructBase->isLValue()) { |
1188 | LValue LV = EmitLValue(E: StructBase); |
1189 | Address Addr = LV.getAddress(); |
1190 | Res = Addr.emitRawPointer(CGF&: *this); |
1191 | } else { |
1192 | return nullptr; |
1193 | } |
1194 | |
1195 | RecIndicesTy Indices; |
1196 | getGEPIndicesToField(CGF&: *this, RD, Field: CountDecl, Indices); |
1197 | if (Indices.empty()) |
1198 | return nullptr; |
1199 | |
1200 | Indices.push_back(Elt: Builder.getInt32(C: 0)); |
1201 | return Builder.CreateInBoundsGEP( |
1202 | Ty: ConvertType(T: QualType(RD->getTypeForDecl(), 0)), Ptr: Res, |
1203 | IdxList: RecIndicesTy(llvm::reverse(C&: Indices)), Name: "counted_by.gep" ); |
1204 | } |
1205 | |
1206 | /// This method is typically called in contexts where we can't generate |
1207 | /// side-effects, like in __builtin_dynamic_object_size. When finding |
1208 | /// expressions, only choose those that have either already been emitted or can |
1209 | /// be loaded without side-effects. |
1210 | /// |
1211 | /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be |
1212 | /// within the top-level struct. |
1213 | /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. |
1214 | llvm::Value *CodeGenFunction::EmitLoadOfCountedByField( |
1215 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1216 | if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl)) |
1217 | return Builder.CreateAlignedLoad(Ty: ConvertType(T: CountDecl->getType()), Addr: GEP, |
1218 | Align: getIntAlign(), Name: "counted_by.load" ); |
1219 | return nullptr; |
1220 | } |
1221 | |
1222 | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
1223 | llvm::Value *Index, QualType IndexType, |
1224 | bool Accessed) { |
1225 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
1226 | "should not be called unless adding bounds checks" ); |
1227 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
1228 | getLangOpts().getStrictFlexArraysLevel(); |
1229 | QualType IndexedType; |
1230 | llvm::Value *Bound = |
1231 | getArrayIndexingBound(CGF&: *this, Base, IndexedType, StrictFlexArraysLevel); |
1232 | |
1233 | EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); |
1234 | } |
1235 | |
1236 | void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, |
1237 | llvm::Value *Index, |
1238 | QualType IndexType, |
1239 | QualType IndexedType, bool Accessed) { |
1240 | if (!Bound) |
1241 | return; |
1242 | |
1243 | auto CheckKind = SanitizerKind::SO_ArrayBounds; |
1244 | auto CheckHandler = SanitizerHandler::OutOfBounds; |
1245 | SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler); |
1246 | |
1247 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1248 | llvm::Value *IndexVal = Builder.CreateIntCast(V: Index, DestTy: SizeTy, isSigned: IndexSigned); |
1249 | llvm::Value *BoundVal = Builder.CreateIntCast(V: Bound, DestTy: SizeTy, isSigned: false); |
1250 | |
1251 | llvm::Constant *StaticData[] = { |
1252 | EmitCheckSourceLocation(Loc: E->getExprLoc()), |
1253 | EmitCheckTypeDescriptor(T: IndexedType), |
1254 | EmitCheckTypeDescriptor(T: IndexType) |
1255 | }; |
1256 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(LHS: IndexVal, RHS: BoundVal) |
1257 | : Builder.CreateICmpULE(LHS: IndexVal, RHS: BoundVal); |
1258 | EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckKind), Check: CheckHandler, StaticArgs: StaticData, DynamicArgs: Index); |
1259 | } |
1260 | |
1261 | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1262 | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1263 | bool isInc, bool isPre) { |
1264 | ComplexPairTy InVal = EmitLoadOfComplex(src: LV, loc: E->getExprLoc()); |
1265 | |
1266 | llvm::Value *NextVal; |
1267 | if (isa<llvm::IntegerType>(Val: InVal.first->getType())) { |
1268 | uint64_t AmountVal = isInc ? 1 : -1; |
1269 | NextVal = llvm::ConstantInt::get(Ty: InVal.first->getType(), V: AmountVal, IsSigned: true); |
1270 | |
1271 | // Add the inc/dec to the real part. |
1272 | NextVal = Builder.CreateAdd(LHS: InVal.first, RHS: NextVal, Name: isInc ? "inc" : "dec" ); |
1273 | } else { |
1274 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1275 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(T: ElemTy), 1); |
1276 | if (!isInc) |
1277 | FVal.changeSign(); |
1278 | NextVal = llvm::ConstantFP::get(Context&: getLLVMContext(), V: FVal); |
1279 | |
1280 | // Add the inc/dec to the real part. |
1281 | NextVal = Builder.CreateFAdd(L: InVal.first, R: NextVal, Name: isInc ? "inc" : "dec" ); |
1282 | } |
1283 | |
1284 | ComplexPairTy IncVal(NextVal, InVal.second); |
1285 | |
1286 | // Store the updated result through the lvalue. |
1287 | EmitStoreOfComplex(V: IncVal, dest: LV, /*init*/ isInit: false); |
1288 | if (getLangOpts().OpenMP) |
1289 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
1290 | LHS: E->getSubExpr()); |
1291 | |
1292 | // If this is a postinc, return the value read from memory, otherwise use the |
1293 | // updated value. |
1294 | return isPre ? IncVal : InVal; |
1295 | } |
1296 | |
1297 | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1298 | CodeGenFunction *CGF) { |
1299 | // Bind VLAs in the cast type. |
1300 | if (CGF && E->getType()->isVariablyModifiedType()) |
1301 | CGF->EmitVariablyModifiedType(Ty: E->getType()); |
1302 | |
1303 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1304 | DI->EmitExplicitCastType(Ty: E->getType()); |
1305 | } |
1306 | |
1307 | //===----------------------------------------------------------------------===// |
1308 | // LValue Expression Emission |
1309 | //===----------------------------------------------------------------------===// |
1310 | |
1311 | static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, |
1312 | TBAAAccessInfo *TBAAInfo, |
1313 | KnownNonNull_t IsKnownNonNull, |
1314 | CodeGenFunction &CGF) { |
1315 | // We allow this with ObjC object pointers because of fragile ABIs. |
1316 | assert(E->getType()->isPointerType() || |
1317 | E->getType()->isObjCObjectPointerType()); |
1318 | E = E->IgnoreParens(); |
1319 | |
1320 | // Casts: |
1321 | if (const CastExpr *CE = dyn_cast<CastExpr>(Val: E)) { |
1322 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: CE)) |
1323 | CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF); |
1324 | |
1325 | switch (CE->getCastKind()) { |
1326 | // Non-converting casts (but not C's implicit conversion from void*). |
1327 | case CK_BitCast: |
1328 | case CK_NoOp: |
1329 | case CK_AddressSpaceConversion: |
1330 | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1331 | if (PtrTy->getPointeeType()->isVoidType()) |
1332 | break; |
1333 | |
1334 | LValueBaseInfo InnerBaseInfo; |
1335 | TBAAAccessInfo InnerTBAAInfo; |
1336 | Address Addr = CGF.EmitPointerWithAlignment( |
1337 | Addr: CE->getSubExpr(), BaseInfo: &InnerBaseInfo, TBAAInfo: &InnerTBAAInfo, IsKnownNonNull); |
1338 | if (BaseInfo) *BaseInfo = InnerBaseInfo; |
1339 | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; |
1340 | |
1341 | if (isa<ExplicitCastExpr>(Val: CE)) { |
1342 | LValueBaseInfo TargetTypeBaseInfo; |
1343 | TBAAAccessInfo TargetTypeTBAAInfo; |
1344 | CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( |
1345 | T: E->getType(), BaseInfo: &TargetTypeBaseInfo, TBAAInfo: &TargetTypeTBAAInfo); |
1346 | if (TBAAInfo) |
1347 | *TBAAInfo = |
1348 | CGF.CGM.mergeTBAAInfoForCast(SourceInfo: *TBAAInfo, TargetInfo: TargetTypeTBAAInfo); |
1349 | // If the source l-value is opaque, honor the alignment of the |
1350 | // casted-to type. |
1351 | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1352 | if (BaseInfo) |
1353 | BaseInfo->mergeForCast(Info: TargetTypeBaseInfo); |
1354 | Addr.setAlignment(Align); |
1355 | } |
1356 | } |
1357 | |
1358 | if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast) && |
1359 | CE->getCastKind() == CK_BitCast) { |
1360 | if (auto PT = E->getType()->getAs<PointerType>()) |
1361 | CGF.EmitVTablePtrCheckForCast(T: PT->getPointeeType(), Derived: Addr, |
1362 | /*MayBeNull=*/true, |
1363 | TCK: CodeGenFunction::CFITCK_UnrelatedCast, |
1364 | Loc: CE->getBeginLoc()); |
1365 | } |
1366 | |
1367 | llvm::Type *ElemTy = |
1368 | CGF.ConvertTypeForMem(T: E->getType()->getPointeeType()); |
1369 | Addr = Addr.withElementType(ElemTy); |
1370 | if (CE->getCastKind() == CK_AddressSpaceConversion) |
1371 | Addr = CGF.Builder.CreateAddrSpaceCast( |
1372 | Addr, Ty: CGF.ConvertType(T: E->getType()), ElementTy: ElemTy); |
1373 | return CGF.authPointerToPointerCast(Ptr: Addr, SourceType: CE->getSubExpr()->getType(), |
1374 | DestType: CE->getType()); |
1375 | } |
1376 | break; |
1377 | |
1378 | // Array-to-pointer decay. |
1379 | case CK_ArrayToPointerDecay: |
1380 | return CGF.EmitArrayToPointerDecay(Array: CE->getSubExpr(), BaseInfo, TBAAInfo); |
1381 | |
1382 | // Derived-to-base conversions. |
1383 | case CK_UncheckedDerivedToBase: |
1384 | case CK_DerivedToBase: { |
1385 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1386 | // conservatively pretend that the complete object is of the base class |
1387 | // type. |
1388 | if (TBAAInfo) |
1389 | *TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: E->getType()); |
1390 | Address Addr = CGF.EmitPointerWithAlignment( |
1391 | Addr: CE->getSubExpr(), BaseInfo, TBAAInfo: nullptr, |
1392 | IsKnownNonNull: (KnownNonNull_t)(IsKnownNonNull || |
1393 | CE->getCastKind() == CK_UncheckedDerivedToBase)); |
1394 | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1395 | return CGF.GetAddressOfBaseClass( |
1396 | Value: Addr, Derived, PathBegin: CE->path_begin(), PathEnd: CE->path_end(), |
1397 | NullCheckValue: CGF.ShouldNullCheckClassCastValue(Cast: CE), Loc: CE->getExprLoc()); |
1398 | } |
1399 | |
1400 | // TODO: Is there any reason to treat base-to-derived conversions |
1401 | // specially? |
1402 | default: |
1403 | break; |
1404 | } |
1405 | } |
1406 | |
1407 | // Unary &. |
1408 | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) { |
1409 | if (UO->getOpcode() == UO_AddrOf) { |
1410 | LValue LV = CGF.EmitLValue(E: UO->getSubExpr(), IsKnownNonNull); |
1411 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1412 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1413 | return LV.getAddress(); |
1414 | } |
1415 | } |
1416 | |
1417 | // std::addressof and variants. |
1418 | if (auto *Call = dyn_cast<CallExpr>(Val: E)) { |
1419 | switch (Call->getBuiltinCallee()) { |
1420 | default: |
1421 | break; |
1422 | case Builtin::BIaddressof: |
1423 | case Builtin::BI__addressof: |
1424 | case Builtin::BI__builtin_addressof: { |
1425 | LValue LV = CGF.EmitLValue(E: Call->getArg(Arg: 0), IsKnownNonNull); |
1426 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1427 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1428 | return LV.getAddress(); |
1429 | } |
1430 | } |
1431 | } |
1432 | |
1433 | // TODO: conditional operators, comma. |
1434 | |
1435 | // Otherwise, use the alignment of the type. |
1436 | return CGF.makeNaturalAddressForPointer( |
1437 | Ptr: CGF.EmitScalarExpr(E), T: E->getType()->getPointeeType(), Alignment: CharUnits(), |
1438 | /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); |
1439 | } |
1440 | |
1441 | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1442 | /// derive a more accurate bound on the alignment of the pointer. |
1443 | Address CodeGenFunction::EmitPointerWithAlignment( |
1444 | const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, |
1445 | KnownNonNull_t IsKnownNonNull) { |
1446 | Address Addr = |
1447 | ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, CGF&: *this); |
1448 | if (IsKnownNonNull && !Addr.isKnownNonNull()) |
1449 | Addr.setKnownNonNull(); |
1450 | return Addr; |
1451 | } |
1452 | |
1453 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1454 | llvm::Value *V = RV.getScalarVal(); |
1455 | if (auto MPT = T->getAs<MemberPointerType>()) |
1456 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr: V, MPT); |
1457 | return Builder.CreateICmpNE(LHS: V, RHS: llvm::Constant::getNullValue(Ty: V->getType())); |
1458 | } |
1459 | |
1460 | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1461 | if (Ty->isVoidType()) |
1462 | return RValue::get(V: nullptr); |
1463 | |
1464 | switch (getEvaluationKind(T: Ty)) { |
1465 | case TEK_Complex: { |
1466 | llvm::Type *EltTy = |
1467 | ConvertType(T: Ty->castAs<ComplexType>()->getElementType()); |
1468 | llvm::Value *U = llvm::UndefValue::get(T: EltTy); |
1469 | return RValue::getComplex(C: std::make_pair(x&: U, y&: U)); |
1470 | } |
1471 | |
1472 | // If this is a use of an undefined aggregate type, the aggregate must have an |
1473 | // identifiable address. Just because the contents of the value are undefined |
1474 | // doesn't mean that the address can't be taken and compared. |
1475 | case TEK_Aggregate: { |
1476 | Address DestPtr = CreateMemTemp(Ty, Name: "undef.agg.tmp" ); |
1477 | return RValue::getAggregate(addr: DestPtr); |
1478 | } |
1479 | |
1480 | case TEK_Scalar: |
1481 | return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: Ty))); |
1482 | } |
1483 | llvm_unreachable("bad evaluation kind" ); |
1484 | } |
1485 | |
1486 | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1487 | const char *Name) { |
1488 | ErrorUnsupported(S: E, Type: Name); |
1489 | return GetUndefRValue(Ty: E->getType()); |
1490 | } |
1491 | |
1492 | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1493 | const char *Name) { |
1494 | ErrorUnsupported(S: E, Type: Name); |
1495 | llvm::Type *ElTy = ConvertType(T: E->getType()); |
1496 | llvm::Type *Ty = UnqualPtrTy; |
1497 | return MakeAddrLValue( |
1498 | Addr: Address(llvm::UndefValue::get(T: Ty), ElTy, CharUnits::One()), T: E->getType()); |
1499 | } |
1500 | |
1501 | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1502 | const Expr *Base = Obj; |
1503 | while (!isa<CXXThisExpr>(Val: Base)) { |
1504 | // The result of a dynamic_cast can be null. |
1505 | if (isa<CXXDynamicCastExpr>(Val: Base)) |
1506 | return false; |
1507 | |
1508 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
1509 | Base = CE->getSubExpr(); |
1510 | } else if (const auto *PE = dyn_cast<ParenExpr>(Val: Base)) { |
1511 | Base = PE->getSubExpr(); |
1512 | } else if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base)) { |
1513 | if (UO->getOpcode() == UO_Extension) |
1514 | Base = UO->getSubExpr(); |
1515 | else |
1516 | return false; |
1517 | } else { |
1518 | return false; |
1519 | } |
1520 | } |
1521 | return true; |
1522 | } |
1523 | |
1524 | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1525 | LValue LV; |
1526 | if (SanOpts.has(K: SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(Val: E)) |
1527 | LV = EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E), /*Accessed*/true); |
1528 | else |
1529 | LV = EmitLValue(E); |
1530 | if (!isa<DeclRefExpr>(Val: E) && !LV.isBitField() && LV.isSimple()) { |
1531 | SanitizerSet SkippedChecks; |
1532 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) { |
1533 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: ME->getBase()); |
1534 | if (IsBaseCXXThis) |
1535 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
1536 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: ME->getBase())) |
1537 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
1538 | } |
1539 | EmitTypeCheck(TCK, Loc: E->getExprLoc(), LV, Type: E->getType(), SkippedChecks); |
1540 | } |
1541 | return LV; |
1542 | } |
1543 | |
1544 | /// EmitLValue - Emit code to compute a designator that specifies the location |
1545 | /// of the expression. |
1546 | /// |
1547 | /// This can return one of two things: a simple address or a bitfield reference. |
1548 | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1549 | /// an LLVM pointer type. |
1550 | /// |
1551 | /// If this returns a bitfield reference, nothing about the pointee type of the |
1552 | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1553 | /// |
1554 | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1555 | /// this method guarantees that the returned pointer type will point to an LLVM |
1556 | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1557 | /// length type, this is not possible. |
1558 | /// |
1559 | LValue CodeGenFunction::EmitLValue(const Expr *E, |
1560 | KnownNonNull_t IsKnownNonNull) { |
1561 | // Running with sufficient stack space to avoid deeply nested expressions |
1562 | // cause a stack overflow. |
1563 | LValue LV; |
1564 | CGM.runWithSufficientStackSpace( |
1565 | Loc: E->getExprLoc(), Fn: [&] { LV = EmitLValueHelper(E, IsKnownNonNull); }); |
1566 | |
1567 | if (IsKnownNonNull && !LV.isKnownNonNull()) |
1568 | LV.setKnownNonNull(); |
1569 | return LV; |
1570 | } |
1571 | |
1572 | static QualType getConstantExprReferredType(const FullExpr *E, |
1573 | const ASTContext &Ctx) { |
1574 | const Expr *SE = E->getSubExpr()->IgnoreImplicit(); |
1575 | if (isa<OpaqueValueExpr>(Val: SE)) |
1576 | return SE->getType(); |
1577 | return cast<CallExpr>(Val: SE)->getCallReturnType(Ctx)->getPointeeType(); |
1578 | } |
1579 | |
1580 | LValue CodeGenFunction::EmitLValueHelper(const Expr *E, |
1581 | KnownNonNull_t IsKnownNonNull) { |
1582 | ApplyDebugLocation DL(*this, E); |
1583 | switch (E->getStmtClass()) { |
1584 | default: return EmitUnsupportedLValue(E, Name: "l-value expression" ); |
1585 | |
1586 | case Expr::ObjCPropertyRefExprClass: |
1587 | llvm_unreachable("cannot emit a property reference directly" ); |
1588 | |
1589 | case Expr::ObjCSelectorExprClass: |
1590 | return EmitObjCSelectorLValue(E: cast<ObjCSelectorExpr>(Val: E)); |
1591 | case Expr::ObjCIsaExprClass: |
1592 | return EmitObjCIsaExpr(E: cast<ObjCIsaExpr>(Val: E)); |
1593 | case Expr::BinaryOperatorClass: |
1594 | return EmitBinaryOperatorLValue(E: cast<BinaryOperator>(Val: E)); |
1595 | case Expr::CompoundAssignOperatorClass: { |
1596 | QualType Ty = E->getType(); |
1597 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1598 | Ty = AT->getValueType(); |
1599 | if (!Ty->isAnyComplexType()) |
1600 | return EmitCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1601 | return EmitComplexCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1602 | } |
1603 | case Expr::CallExprClass: |
1604 | case Expr::CXXMemberCallExprClass: |
1605 | case Expr::CXXOperatorCallExprClass: |
1606 | case Expr::UserDefinedLiteralClass: |
1607 | return EmitCallExprLValue(E: cast<CallExpr>(Val: E)); |
1608 | case Expr::CXXRewrittenBinaryOperatorClass: |
1609 | return EmitLValue(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(), |
1610 | IsKnownNonNull); |
1611 | case Expr::VAArgExprClass: |
1612 | return EmitVAArgExprLValue(E: cast<VAArgExpr>(Val: E)); |
1613 | case Expr::DeclRefExprClass: |
1614 | return EmitDeclRefLValue(E: cast<DeclRefExpr>(Val: E)); |
1615 | case Expr::ConstantExprClass: { |
1616 | const ConstantExpr *CE = cast<ConstantExpr>(Val: E); |
1617 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1618 | QualType RetType = getConstantExprReferredType(E: CE, Ctx: getContext()); |
1619 | return MakeNaturalAlignAddrLValue(V: Result, T: RetType); |
1620 | } |
1621 | return EmitLValue(E: cast<ConstantExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1622 | } |
1623 | case Expr::ParenExprClass: |
1624 | return EmitLValue(E: cast<ParenExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1625 | case Expr::GenericSelectionExprClass: |
1626 | return EmitLValue(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), |
1627 | IsKnownNonNull); |
1628 | case Expr::PredefinedExprClass: |
1629 | return EmitPredefinedLValue(E: cast<PredefinedExpr>(Val: E)); |
1630 | case Expr::StringLiteralClass: |
1631 | return EmitStringLiteralLValue(E: cast<StringLiteral>(Val: E)); |
1632 | case Expr::ObjCEncodeExprClass: |
1633 | return EmitObjCEncodeExprLValue(E: cast<ObjCEncodeExpr>(Val: E)); |
1634 | case Expr::PseudoObjectExprClass: |
1635 | return EmitPseudoObjectLValue(e: cast<PseudoObjectExpr>(Val: E)); |
1636 | case Expr::InitListExprClass: |
1637 | return EmitInitListLValue(E: cast<InitListExpr>(Val: E)); |
1638 | case Expr::CXXTemporaryObjectExprClass: |
1639 | case Expr::CXXConstructExprClass: |
1640 | return EmitCXXConstructLValue(E: cast<CXXConstructExpr>(Val: E)); |
1641 | case Expr::CXXBindTemporaryExprClass: |
1642 | return EmitCXXBindTemporaryLValue(E: cast<CXXBindTemporaryExpr>(Val: E)); |
1643 | case Expr::CXXUuidofExprClass: |
1644 | return EmitCXXUuidofLValue(E: cast<CXXUuidofExpr>(Val: E)); |
1645 | case Expr::LambdaExprClass: |
1646 | return EmitAggExprToLValue(E); |
1647 | |
1648 | case Expr::ExprWithCleanupsClass: { |
1649 | const auto *cleanups = cast<ExprWithCleanups>(Val: E); |
1650 | RunCleanupsScope Scope(*this); |
1651 | LValue LV = EmitLValue(E: cleanups->getSubExpr(), IsKnownNonNull); |
1652 | if (LV.isSimple()) { |
1653 | // Defend against branches out of gnu statement expressions surrounded by |
1654 | // cleanups. |
1655 | Address Addr = LV.getAddress(); |
1656 | llvm::Value *V = Addr.getBasePointer(); |
1657 | Scope.ForceCleanup(ValuesToReload: {&V}); |
1658 | Addr.replaceBasePointer(P: V); |
1659 | return LValue::MakeAddr(Addr, type: LV.getType(), Context&: getContext(), |
1660 | BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
1661 | } |
1662 | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1663 | // bitfield lvalue or some other non-simple lvalue? |
1664 | return LV; |
1665 | } |
1666 | |
1667 | case Expr::CXXDefaultArgExprClass: { |
1668 | auto *DAE = cast<CXXDefaultArgExpr>(Val: E); |
1669 | CXXDefaultArgExprScope Scope(*this, DAE); |
1670 | return EmitLValue(E: DAE->getExpr(), IsKnownNonNull); |
1671 | } |
1672 | case Expr::CXXDefaultInitExprClass: { |
1673 | auto *DIE = cast<CXXDefaultInitExpr>(Val: E); |
1674 | CXXDefaultInitExprScope Scope(*this, DIE); |
1675 | return EmitLValue(E: DIE->getExpr(), IsKnownNonNull); |
1676 | } |
1677 | case Expr::CXXTypeidExprClass: |
1678 | return EmitCXXTypeidLValue(E: cast<CXXTypeidExpr>(Val: E)); |
1679 | |
1680 | case Expr::ObjCMessageExprClass: |
1681 | return EmitObjCMessageExprLValue(E: cast<ObjCMessageExpr>(Val: E)); |
1682 | case Expr::ObjCIvarRefExprClass: |
1683 | return EmitObjCIvarRefLValue(E: cast<ObjCIvarRefExpr>(Val: E)); |
1684 | case Expr::StmtExprClass: |
1685 | return EmitStmtExprLValue(E: cast<StmtExpr>(Val: E)); |
1686 | case Expr::UnaryOperatorClass: |
1687 | return EmitUnaryOpLValue(E: cast<UnaryOperator>(Val: E)); |
1688 | case Expr::ArraySubscriptExprClass: |
1689 | return EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E)); |
1690 | case Expr::MatrixSubscriptExprClass: |
1691 | return EmitMatrixSubscriptExpr(E: cast<MatrixSubscriptExpr>(Val: E)); |
1692 | case Expr::ArraySectionExprClass: |
1693 | return EmitArraySectionExpr(E: cast<ArraySectionExpr>(Val: E)); |
1694 | case Expr::ExtVectorElementExprClass: |
1695 | return EmitExtVectorElementExpr(E: cast<ExtVectorElementExpr>(Val: E)); |
1696 | case Expr::CXXThisExprClass: |
1697 | return MakeAddrLValue(Addr: LoadCXXThisAddress(), T: E->getType()); |
1698 | case Expr::MemberExprClass: |
1699 | return EmitMemberExpr(E: cast<MemberExpr>(Val: E)); |
1700 | case Expr::CompoundLiteralExprClass: |
1701 | return EmitCompoundLiteralLValue(E: cast<CompoundLiteralExpr>(Val: E)); |
1702 | case Expr::ConditionalOperatorClass: |
1703 | return EmitConditionalOperatorLValue(E: cast<ConditionalOperator>(Val: E)); |
1704 | case Expr::BinaryConditionalOperatorClass: |
1705 | return EmitConditionalOperatorLValue(E: cast<BinaryConditionalOperator>(Val: E)); |
1706 | case Expr::ChooseExprClass: |
1707 | return EmitLValue(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), IsKnownNonNull); |
1708 | case Expr::OpaqueValueExprClass: |
1709 | return EmitOpaqueValueLValue(e: cast<OpaqueValueExpr>(Val: E)); |
1710 | case Expr::SubstNonTypeTemplateParmExprClass: |
1711 | return EmitLValue(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), |
1712 | IsKnownNonNull); |
1713 | case Expr::ImplicitCastExprClass: |
1714 | case Expr::CStyleCastExprClass: |
1715 | case Expr::CXXFunctionalCastExprClass: |
1716 | case Expr::CXXStaticCastExprClass: |
1717 | case Expr::CXXDynamicCastExprClass: |
1718 | case Expr::CXXReinterpretCastExprClass: |
1719 | case Expr::CXXConstCastExprClass: |
1720 | case Expr::CXXAddrspaceCastExprClass: |
1721 | case Expr::ObjCBridgedCastExprClass: |
1722 | return EmitCastLValue(E: cast<CastExpr>(Val: E)); |
1723 | |
1724 | case Expr::MaterializeTemporaryExprClass: |
1725 | return EmitMaterializeTemporaryExpr(M: cast<MaterializeTemporaryExpr>(Val: E)); |
1726 | |
1727 | case Expr::CoawaitExprClass: |
1728 | return EmitCoawaitLValue(E: cast<CoawaitExpr>(Val: E)); |
1729 | case Expr::CoyieldExprClass: |
1730 | return EmitCoyieldLValue(E: cast<CoyieldExpr>(Val: E)); |
1731 | case Expr::PackIndexingExprClass: |
1732 | return EmitLValue(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr()); |
1733 | case Expr::HLSLOutArgExprClass: |
1734 | llvm_unreachable("cannot emit a HLSL out argument directly" ); |
1735 | } |
1736 | } |
1737 | |
1738 | /// Given an object of the given canonical type, can we safely copy a |
1739 | /// value out of it based on its initializer? |
1740 | static bool isConstantEmittableObjectType(QualType type) { |
1741 | assert(type.isCanonical()); |
1742 | assert(!type->isReferenceType()); |
1743 | |
1744 | // Must be const-qualified but non-volatile. |
1745 | Qualifiers qs = type.getLocalQualifiers(); |
1746 | if (!qs.hasConst() || qs.hasVolatile()) return false; |
1747 | |
1748 | // Otherwise, all object types satisfy this except C++ classes with |
1749 | // mutable subobjects or non-trivial copy/destroy behavior. |
1750 | if (const auto *RT = dyn_cast<RecordType>(Val&: type)) |
1751 | if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1752 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1753 | return false; |
1754 | |
1755 | return true; |
1756 | } |
1757 | |
1758 | /// Can we constant-emit a load of a reference to a variable of the |
1759 | /// given type? This is different from predicates like |
1760 | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1761 | /// in situations that don't necessarily satisfy the language's rules |
1762 | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1763 | /// to do this with const float variables even if those variables |
1764 | /// aren't marked 'constexpr'. |
1765 | enum ConstantEmissionKind { |
1766 | CEK_None, |
1767 | CEK_AsReferenceOnly, |
1768 | CEK_AsValueOrReference, |
1769 | CEK_AsValueOnly |
1770 | }; |
1771 | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1772 | type = type.getCanonicalType(); |
1773 | if (const auto *ref = dyn_cast<ReferenceType>(Val&: type)) { |
1774 | if (isConstantEmittableObjectType(type: ref->getPointeeType())) |
1775 | return CEK_AsValueOrReference; |
1776 | return CEK_AsReferenceOnly; |
1777 | } |
1778 | if (isConstantEmittableObjectType(type)) |
1779 | return CEK_AsValueOnly; |
1780 | return CEK_None; |
1781 | } |
1782 | |
1783 | /// Try to emit a reference to the given value without producing it as |
1784 | /// an l-value. This is just an optimization, but it avoids us needing |
1785 | /// to emit global copies of variables if they're named without triggering |
1786 | /// a formal use in a context where we can't emit a direct reference to them, |
1787 | /// for instance if a block or lambda or a member of a local class uses a |
1788 | /// const int variable or constexpr variable from an enclosing function. |
1789 | CodeGenFunction::ConstantEmission |
1790 | CodeGenFunction::tryEmitAsConstant(const DeclRefExpr *RefExpr) { |
1791 | const ValueDecl *Value = RefExpr->getDecl(); |
1792 | |
1793 | // The value needs to be an enum constant or a constant variable. |
1794 | ConstantEmissionKind CEK; |
1795 | if (isa<ParmVarDecl>(Val: Value)) { |
1796 | CEK = CEK_None; |
1797 | } else if (const auto *var = dyn_cast<VarDecl>(Val: Value)) { |
1798 | CEK = checkVarTypeForConstantEmission(type: var->getType()); |
1799 | } else if (isa<EnumConstantDecl>(Val: Value)) { |
1800 | CEK = CEK_AsValueOnly; |
1801 | } else { |
1802 | CEK = CEK_None; |
1803 | } |
1804 | if (CEK == CEK_None) return ConstantEmission(); |
1805 | |
1806 | Expr::EvalResult result; |
1807 | bool resultIsReference; |
1808 | QualType resultType; |
1809 | |
1810 | // It's best to evaluate all the way as an r-value if that's permitted. |
1811 | if (CEK != CEK_AsReferenceOnly && |
1812 | RefExpr->EvaluateAsRValue(Result&: result, Ctx: getContext())) { |
1813 | resultIsReference = false; |
1814 | resultType = RefExpr->getType().getUnqualifiedType(); |
1815 | |
1816 | // Otherwise, try to evaluate as an l-value. |
1817 | } else if (CEK != CEK_AsValueOnly && |
1818 | RefExpr->EvaluateAsLValue(Result&: result, Ctx: getContext())) { |
1819 | resultIsReference = true; |
1820 | resultType = Value->getType(); |
1821 | |
1822 | // Failure. |
1823 | } else { |
1824 | return ConstantEmission(); |
1825 | } |
1826 | |
1827 | // In any case, if the initializer has side-effects, abandon ship. |
1828 | if (result.HasSideEffects) |
1829 | return ConstantEmission(); |
1830 | |
1831 | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1832 | // referencing a global host variable by copy. In this case the lambda should |
1833 | // make a copy of the value of the global host variable. The DRE of the |
1834 | // captured reference variable cannot be emitted as load from the host |
1835 | // global variable as compile time constant, since the host variable is not |
1836 | // accessible on device. The DRE of the captured reference variable has to be |
1837 | // loaded from captures. |
1838 | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && |
1839 | RefExpr->refersToEnclosingVariableOrCapture()) { |
1840 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: CurCodeDecl); |
1841 | if (isLambdaMethod(DC: MD) && MD->getOverloadedOperator() == OO_Call) { |
1842 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1843 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1844 | if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D)) { |
1845 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1846 | return ConstantEmission(); |
1847 | } |
1848 | } |
1849 | } |
1850 | } |
1851 | } |
1852 | |
1853 | // Emit as a constant. |
1854 | llvm::Constant *C = ConstantEmitter(*this).emitAbstract( |
1855 | loc: RefExpr->getLocation(), value: result.Val, T: resultType); |
1856 | |
1857 | // Make sure we emit a debug reference to the global variable. |
1858 | // This should probably fire even for |
1859 | if (isa<VarDecl>(Val: Value)) { |
1860 | if (!getContext().DeclMustBeEmitted(D: cast<VarDecl>(Val: Value))) |
1861 | EmitDeclRefExprDbgValue(E: RefExpr, Init: result.Val); |
1862 | } else { |
1863 | assert(isa<EnumConstantDecl>(Value)); |
1864 | EmitDeclRefExprDbgValue(E: RefExpr, Init: result.Val); |
1865 | } |
1866 | |
1867 | // If we emitted a reference constant, we need to dereference that. |
1868 | if (resultIsReference) |
1869 | return ConstantEmission::forReference(C); |
1870 | |
1871 | return ConstantEmission::forValue(C); |
1872 | } |
1873 | |
1874 | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1875 | const MemberExpr *ME) { |
1876 | if (auto *VD = dyn_cast<VarDecl>(Val: ME->getMemberDecl())) { |
1877 | // Try to emit static variable member expressions as DREs. |
1878 | return DeclRefExpr::Create( |
1879 | Context: CGF.getContext(), QualifierLoc: NestedNameSpecifierLoc(), TemplateKWLoc: SourceLocation(), D: VD, |
1880 | /*RefersToEnclosingVariableOrCapture=*/false, NameLoc: ME->getExprLoc(), |
1881 | T: ME->getType(), VK: ME->getValueKind(), FoundD: nullptr, TemplateArgs: nullptr, NOUR: ME->isNonOdrUse()); |
1882 | } |
1883 | return nullptr; |
1884 | } |
1885 | |
1886 | CodeGenFunction::ConstantEmission |
1887 | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1888 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME)) |
1889 | return tryEmitAsConstant(RefExpr: DRE); |
1890 | return ConstantEmission(); |
1891 | } |
1892 | |
1893 | llvm::Value *CodeGenFunction::emitScalarConstant( |
1894 | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1895 | assert(Constant && "not a constant" ); |
1896 | if (Constant.isReference()) |
1897 | return EmitLoadOfLValue(V: Constant.getReferenceLValue(CGF&: *this, RefExpr: E), |
1898 | Loc: E->getExprLoc()) |
1899 | .getScalarVal(); |
1900 | return Constant.getValue(); |
1901 | } |
1902 | |
1903 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1904 | SourceLocation Loc) { |
1905 | return EmitLoadOfScalar(Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
1906 | Ty: lvalue.getType(), Loc, BaseInfo: lvalue.getBaseInfo(), |
1907 | TBAAInfo: lvalue.getTBAAInfo(), isNontemporal: lvalue.isNontemporal()); |
1908 | } |
1909 | |
1910 | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1911 | llvm::APInt &Min, llvm::APInt &End, |
1912 | bool StrictEnums, bool IsBool) { |
1913 | const EnumType *ET = Ty->getAs<EnumType>(); |
1914 | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && |
1915 | ET && !ET->getDecl()->isFixed(); |
1916 | if (!IsBool && !IsRegularCPlusPlusEnum) |
1917 | return false; |
1918 | |
1919 | if (IsBool) { |
1920 | Min = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 0); |
1921 | End = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 2); |
1922 | } else { |
1923 | const EnumDecl *ED = ET->getDecl(); |
1924 | ED->getValueRange(Max&: End, Min); |
1925 | } |
1926 | return true; |
1927 | } |
1928 | |
1929 | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1930 | llvm::APInt Min, End; |
1931 | if (!getRangeForType(CGF&: *this, Ty, Min, End, StrictEnums: CGM.getCodeGenOpts().StrictEnums, |
1932 | IsBool: Ty->hasBooleanRepresentation() && !Ty->isVectorType())) |
1933 | return nullptr; |
1934 | |
1935 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1936 | return MDHelper.createRange(Lo: Min, Hi: End); |
1937 | } |
1938 | |
1939 | void CodeGenFunction::maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, |
1940 | SourceLocation Loc) { |
1941 | if (EmitScalarRangeCheck(Value: Load, Ty, Loc)) { |
1942 | // In order to prevent the optimizer from throwing away the check, don't |
1943 | // attach range metadata to the load. |
1944 | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
1945 | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { |
1946 | Load->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RangeInfo); |
1947 | Load->setMetadata(KindID: llvm::LLVMContext::MD_noundef, |
1948 | Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: {})); |
1949 | } |
1950 | } |
1951 | } |
1952 | |
1953 | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1954 | SourceLocation Loc) { |
1955 | bool HasBoolCheck = SanOpts.has(K: SanitizerKind::Bool); |
1956 | bool HasEnumCheck = SanOpts.has(K: SanitizerKind::Enum); |
1957 | if (!HasBoolCheck && !HasEnumCheck) |
1958 | return false; |
1959 | |
1960 | bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) || |
1961 | NSAPI(CGM.getContext()).isObjCBOOLType(T: Ty); |
1962 | bool NeedsBoolCheck = HasBoolCheck && IsBool; |
1963 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); |
1964 | if (!NeedsBoolCheck && !NeedsEnumCheck) |
1965 | return false; |
1966 | |
1967 | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1968 | // a bit width mismatch when handling bitfield values. This is handled by |
1969 | // EmitFromMemory for the non-bitfield case. |
1970 | if (IsBool && |
1971 | cast<llvm::IntegerType>(Val: Value->getType())->getBitWidth() == 1) |
1972 | return false; |
1973 | |
1974 | if (NeedsEnumCheck && |
1975 | getContext().isTypeIgnoredBySanitizer(Mask: SanitizerKind::Enum, Ty)) |
1976 | return false; |
1977 | |
1978 | llvm::APInt Min, End; |
1979 | if (!getRangeForType(CGF&: *this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1980 | return true; |
1981 | |
1982 | SanitizerKind::SanitizerOrdinal Kind = |
1983 | NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool; |
1984 | |
1985 | auto &Ctx = getLLVMContext(); |
1986 | auto CheckHandler = SanitizerHandler::LoadInvalidValue; |
1987 | SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler); |
1988 | llvm::Value *Check; |
1989 | --End; |
1990 | if (!Min) { |
1991 | Check = Builder.CreateICmpULE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1992 | } else { |
1993 | llvm::Value *Upper = |
1994 | Builder.CreateICmpSLE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1995 | llvm::Value *Lower = |
1996 | Builder.CreateICmpSGE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: Min)); |
1997 | Check = Builder.CreateAnd(LHS: Upper, RHS: Lower); |
1998 | } |
1999 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
2000 | EmitCheckTypeDescriptor(T: Ty)}; |
2001 | EmitCheck(Checked: std::make_pair(x&: Check, y&: Kind), Check: CheckHandler, StaticArgs, DynamicArgs: Value); |
2002 | return true; |
2003 | } |
2004 | |
2005 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
2006 | QualType Ty, |
2007 | SourceLocation Loc, |
2008 | LValueBaseInfo BaseInfo, |
2009 | TBAAAccessInfo TBAAInfo, |
2010 | bool isNontemporal) { |
2011 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
2012 | if (GV->isThreadLocal()) |
2013 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
2014 | IsKnownNonNull: NotKnownNonNull); |
2015 | |
2016 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2017 | // Boolean vectors use `iN` as storage type. |
2018 | if (ClangVecTy->isPackedVectorBoolType(ctx: getContext())) { |
2019 | llvm::Type *ValTy = ConvertType(T: Ty); |
2020 | unsigned ValNumElems = |
2021 | cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
2022 | // Load the `iP` storage object (P is the padded vector size). |
2023 | auto *RawIntV = Builder.CreateLoad(Addr, IsVolatile: Volatile, Name: "load_bits" ); |
2024 | const auto *RawIntTy = RawIntV->getType(); |
2025 | assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors" ); |
2026 | // Bitcast iP --> <P x i1>. |
2027 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2028 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
2029 | llvm::Value *V = Builder.CreateBitCast(V: RawIntV, DestTy: PaddedVecTy); |
2030 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2031 | V = emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
2032 | |
2033 | return EmitFromMemory(Value: V, Ty); |
2034 | } |
2035 | |
2036 | // Handles vectors of sizes that are likely to be expanded to a larger size |
2037 | // to optimize performance. |
2038 | auto *VTy = cast<llvm::FixedVectorType>(Val: Addr.getElementType()); |
2039 | auto *NewVecTy = |
2040 | CGM.getABIInfo().getOptimalVectorMemoryType(T: VTy, Opt: getLangOpts()); |
2041 | |
2042 | if (VTy != NewVecTy) { |
2043 | Address Cast = Addr.withElementType(ElemTy: NewVecTy); |
2044 | llvm::Value *V = Builder.CreateLoad(Addr: Cast, IsVolatile: Volatile, Name: "loadVecN" ); |
2045 | unsigned OldNumElements = VTy->getNumElements(); |
2046 | SmallVector<int, 16> Mask(OldNumElements); |
2047 | std::iota(first: Mask.begin(), last: Mask.end(), value: 0); |
2048 | V = Builder.CreateShuffleVector(V, Mask, Name: "extractVec" ); |
2049 | return EmitFromMemory(Value: V, Ty); |
2050 | } |
2051 | } |
2052 | |
2053 | // Atomic operations have to be done on integral types. |
2054 | LValue AtomicLValue = |
2055 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
2056 | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(Src: AtomicLValue)) { |
2057 | return EmitAtomicLoad(LV: AtomicLValue, SL: Loc).getScalarVal(); |
2058 | } |
2059 | |
2060 | Addr = |
2061 | Addr.withElementType(ElemTy: convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Addr.getElementType())); |
2062 | |
2063 | llvm::LoadInst *Load = Builder.CreateLoad(Addr, IsVolatile: Volatile); |
2064 | if (isNontemporal) { |
2065 | llvm::MDNode *Node = llvm::MDNode::get( |
2066 | Context&: Load->getContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2067 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2068 | } |
2069 | |
2070 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo); |
2071 | |
2072 | maybeAttachRangeForLoad(Load, Ty, Loc); |
2073 | |
2074 | return EmitFromMemory(Value: Load, Ty); |
2075 | } |
2076 | |
2077 | /// Converts a scalar value from its primary IR type (as returned |
2078 | /// by ConvertType) to its load/store type (as returned by |
2079 | /// convertTypeForLoadStore). |
2080 | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
2081 | if (auto *AtomicTy = Ty->getAs<AtomicType>()) |
2082 | Ty = AtomicTy->getValueType(); |
2083 | |
2084 | if (Ty->isExtVectorBoolType()) { |
2085 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2086 | if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() > |
2087 | Value->getType()->getScalarSizeInBits()) |
2088 | return Builder.CreateZExt(V: Value, DestTy: StoreTy); |
2089 | |
2090 | // Expand to the memory bit width. |
2091 | unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits(); |
2092 | // <N x i1> --> <P x i1>. |
2093 | Value = emitBoolVecConversion(SrcVec: Value, NumElementsDst: MemNumElems, Name: "insertvec" ); |
2094 | // <P x i1> --> iP. |
2095 | Value = Builder.CreateBitCast(V: Value, DestTy: StoreTy); |
2096 | } |
2097 | |
2098 | if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) { |
2099 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2100 | bool Signed = Ty->isSignedIntegerOrEnumerationType(); |
2101 | return Builder.CreateIntCast(V: Value, DestTy: StoreTy, isSigned: Signed, Name: "storedv" ); |
2102 | } |
2103 | |
2104 | return Value; |
2105 | } |
2106 | |
2107 | /// Converts a scalar value from its load/store type (as returned |
2108 | /// by convertTypeForLoadStore) to its primary IR type (as returned |
2109 | /// by ConvertType). |
2110 | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
2111 | if (auto *AtomicTy = Ty->getAs<AtomicType>()) |
2112 | Ty = AtomicTy->getValueType(); |
2113 | |
2114 | if (Ty->isPackedVectorBoolType(ctx: getContext())) { |
2115 | const auto *RawIntTy = Value->getType(); |
2116 | |
2117 | // Bitcast iP --> <P x i1>. |
2118 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2119 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
2120 | auto *V = Builder.CreateBitCast(V: Value, DestTy: PaddedVecTy); |
2121 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2122 | llvm::Type *ValTy = ConvertType(T: Ty); |
2123 | unsigned ValNumElems = cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
2124 | return emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
2125 | } |
2126 | |
2127 | llvm::Type *ResTy = ConvertType(T: Ty); |
2128 | if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() || |
2129 | Ty->isExtVectorBoolType()) |
2130 | return Builder.CreateTrunc(V: Value, DestTy: ResTy, Name: "loadedv" ); |
2131 | |
2132 | return Value; |
2133 | } |
2134 | |
2135 | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
2136 | // MatrixType), if it points to a array (the memory type of MatrixType). |
2137 | static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, |
2138 | CodeGenFunction &CGF, |
2139 | bool IsVector = true) { |
2140 | auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: Addr.getElementType()); |
2141 | if (ArrayTy && IsVector) { |
2142 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
2143 | NumElts: ArrayTy->getNumElements()); |
2144 | |
2145 | return Addr.withElementType(ElemTy: VectorTy); |
2146 | } |
2147 | auto *VectorTy = dyn_cast<llvm::VectorType>(Val: Addr.getElementType()); |
2148 | if (VectorTy && !IsVector) { |
2149 | auto *ArrayTy = llvm::ArrayType::get( |
2150 | ElementType: VectorTy->getElementType(), |
2151 | NumElements: cast<llvm::FixedVectorType>(Val: VectorTy)->getNumElements()); |
2152 | |
2153 | return Addr.withElementType(ElemTy: ArrayTy); |
2154 | } |
2155 | |
2156 | return Addr; |
2157 | } |
2158 | |
2159 | // Emit a store of a matrix LValue. This may require casting the original |
2160 | // pointer to memory address (ArrayType) to a pointer to the value type |
2161 | // (VectorType). |
2162 | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
2163 | bool isInit, CodeGenFunction &CGF) { |
2164 | Address Addr = MaybeConvertMatrixAddress(Addr: lvalue.getAddress(), CGF, |
2165 | IsVector: value->getType()->isVectorTy()); |
2166 | CGF.EmitStoreOfScalar(Value: value, Addr, Volatile: lvalue.isVolatile(), Ty: lvalue.getType(), |
2167 | BaseInfo: lvalue.getBaseInfo(), TBAAInfo: lvalue.getTBAAInfo(), isInit, |
2168 | isNontemporal: lvalue.isNontemporal()); |
2169 | } |
2170 | |
2171 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
2172 | bool Volatile, QualType Ty, |
2173 | LValueBaseInfo BaseInfo, |
2174 | TBAAAccessInfo TBAAInfo, |
2175 | bool isInit, bool isNontemporal) { |
2176 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
2177 | if (GV->isThreadLocal()) |
2178 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
2179 | IsKnownNonNull: NotKnownNonNull); |
2180 | |
2181 | // Handles vectors of sizes that are likely to be expanded to a larger size |
2182 | // to optimize performance. |
2183 | llvm::Type *SrcTy = Value->getType(); |
2184 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2185 | if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) { |
2186 | auto *NewVecTy = |
2187 | CGM.getABIInfo().getOptimalVectorMemoryType(T: VecTy, Opt: getLangOpts()); |
2188 | if (!ClangVecTy->isPackedVectorBoolType(ctx: getContext()) && |
2189 | VecTy != NewVecTy) { |
2190 | SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1); |
2191 | std::iota(first: Mask.begin(), last: Mask.begin() + VecTy->getNumElements(), value: 0); |
2192 | Value = Builder.CreateShuffleVector(V: Value, Mask, Name: "extractVec" ); |
2193 | SrcTy = NewVecTy; |
2194 | } |
2195 | if (Addr.getElementType() != SrcTy) |
2196 | Addr = Addr.withElementType(ElemTy: SrcTy); |
2197 | } |
2198 | } |
2199 | |
2200 | Value = EmitToMemory(Value, Ty); |
2201 | |
2202 | LValue AtomicLValue = |
2203 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
2204 | if (Ty->isAtomicType() || |
2205 | (!isInit && LValueIsSuitableForInlineAtomic(Src: AtomicLValue))) { |
2206 | EmitAtomicStore(rvalue: RValue::get(V: Value), lvalue: AtomicLValue, isInit); |
2207 | return; |
2208 | } |
2209 | |
2210 | llvm::StoreInst *Store = Builder.CreateStore(Val: Value, Addr, IsVolatile: Volatile); |
2211 | addInstToCurrentSourceAtom(KeyInstruction: Store, Backup: Value); |
2212 | |
2213 | if (isNontemporal) { |
2214 | llvm::MDNode *Node = |
2215 | llvm::MDNode::get(Context&: Store->getContext(), |
2216 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2217 | Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2218 | } |
2219 | |
2220 | CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo); |
2221 | } |
2222 | |
2223 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
2224 | bool isInit) { |
2225 | if (lvalue.getType()->isConstantMatrixType()) { |
2226 | EmitStoreOfMatrixScalar(value, lvalue, isInit, CGF&: *this); |
2227 | return; |
2228 | } |
2229 | |
2230 | EmitStoreOfScalar(Value: value, Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
2231 | Ty: lvalue.getType(), BaseInfo: lvalue.getBaseInfo(), |
2232 | TBAAInfo: lvalue.getTBAAInfo(), isInit, isNontemporal: lvalue.isNontemporal()); |
2233 | } |
2234 | |
2235 | // Emit a load of a LValue of matrix type. This may require casting the pointer |
2236 | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
2237 | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
2238 | CodeGenFunction &CGF) { |
2239 | assert(LV.getType()->isConstantMatrixType()); |
2240 | Address Addr = MaybeConvertMatrixAddress(Addr: LV.getAddress(), CGF); |
2241 | LV.setAddress(Addr); |
2242 | return RValue::get(V: CGF.EmitLoadOfScalar(lvalue: LV, Loc)); |
2243 | } |
2244 | |
2245 | RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot, |
2246 | SourceLocation Loc) { |
2247 | QualType Ty = LV.getType(); |
2248 | switch (getEvaluationKind(T: Ty)) { |
2249 | case TEK_Scalar: |
2250 | return EmitLoadOfLValue(V: LV, Loc); |
2251 | case TEK_Complex: |
2252 | return RValue::getComplex(C: EmitLoadOfComplex(src: LV, loc: Loc)); |
2253 | case TEK_Aggregate: |
2254 | EmitAggFinalDestCopy(Type: Ty, Dest: Slot, Src: LV, SrcKind: EVK_NonRValue); |
2255 | return Slot.asRValue(); |
2256 | } |
2257 | llvm_unreachable("bad evaluation kind" ); |
2258 | } |
2259 | |
2260 | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
2261 | /// method emits the address of the lvalue, then loads the result as an rvalue, |
2262 | /// returning the rvalue. |
2263 | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
2264 | // Load from __ptrauth. |
2265 | if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) { |
2266 | LV.getQuals().removePointerAuth(); |
2267 | llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal(); |
2268 | return RValue::get(V: EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: Value, PointerType: LV.getType(), |
2269 | StorageAddress: LV.getAddress(), |
2270 | /*known nonnull*/ IsKnownNonNull: false)); |
2271 | } |
2272 | |
2273 | if (LV.isObjCWeak()) { |
2274 | // load of a __weak object. |
2275 | Address AddrWeakObj = LV.getAddress(); |
2276 | return RValue::get(V: CGM.getObjCRuntime().EmitObjCWeakRead(CGF&: *this, |
2277 | AddrWeakObj)); |
2278 | } |
2279 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
2280 | // In MRC mode, we do a load+autorelease. |
2281 | if (!getLangOpts().ObjCAutoRefCount) { |
2282 | return RValue::get(V: EmitARCLoadWeak(addr: LV.getAddress())); |
2283 | } |
2284 | |
2285 | // In ARC mode, we load retained and then consume the value. |
2286 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: LV.getAddress()); |
2287 | Object = EmitObjCConsumeObject(T: LV.getType(), Ptr: Object); |
2288 | return RValue::get(V: Object); |
2289 | } |
2290 | |
2291 | if (LV.isSimple()) { |
2292 | assert(!LV.getType()->isFunctionType()); |
2293 | |
2294 | if (LV.getType()->isConstantMatrixType()) |
2295 | return EmitLoadOfMatrixLValue(LV, Loc, CGF&: *this); |
2296 | |
2297 | // Everything needs a load. |
2298 | return RValue::get(V: EmitLoadOfScalar(lvalue: LV, Loc)); |
2299 | } |
2300 | |
2301 | if (LV.isVectorElt()) { |
2302 | llvm::LoadInst *Load = Builder.CreateLoad(Addr: LV.getVectorAddress(), |
2303 | IsVolatile: LV.isVolatileQualified()); |
2304 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx: LV.getVectorIdx(), |
2305 | Name: "vecext" )); |
2306 | } |
2307 | |
2308 | // If this is a reference to a subset of the elements of a vector, either |
2309 | // shuffle the input or extract/insert them as appropriate. |
2310 | if (LV.isExtVectorElt()) { |
2311 | return EmitLoadOfExtVectorElementLValue(V: LV); |
2312 | } |
2313 | |
2314 | // Global Register variables always invoke intrinsics |
2315 | if (LV.isGlobalReg()) |
2316 | return EmitLoadOfGlobalRegLValue(LV); |
2317 | |
2318 | if (LV.isMatrixElt()) { |
2319 | llvm::Value *Idx = LV.getMatrixIdx(); |
2320 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2321 | const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); |
2322 | llvm::MatrixBuilder MB(Builder); |
2323 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2324 | } |
2325 | llvm::LoadInst *Load = |
2326 | Builder.CreateLoad(Addr: LV.getMatrixAddress(), IsVolatile: LV.isVolatileQualified()); |
2327 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx, Name: "matrixext" )); |
2328 | } |
2329 | |
2330 | assert(LV.isBitField() && "Unknown LValue type!" ); |
2331 | return EmitLoadOfBitfieldLValue(LV, Loc); |
2332 | } |
2333 | |
2334 | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
2335 | SourceLocation Loc) { |
2336 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
2337 | |
2338 | // Get the output type. |
2339 | llvm::Type *ResLTy = ConvertType(T: LV.getType()); |
2340 | |
2341 | Address Ptr = LV.getBitFieldAddress(); |
2342 | llvm::Value *Val = |
2343 | Builder.CreateLoad(Addr: Ptr, IsVolatile: LV.isVolatileQualified(), Name: "bf.load" ); |
2344 | |
2345 | bool UseVolatile = LV.isVolatileQualified() && |
2346 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2347 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2348 | const unsigned StorageSize = |
2349 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2350 | if (Info.IsSigned) { |
2351 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
2352 | unsigned HighBits = StorageSize - Offset - Info.Size; |
2353 | if (HighBits) |
2354 | Val = Builder.CreateShl(LHS: Val, RHS: HighBits, Name: "bf.shl" ); |
2355 | if (Offset + HighBits) |
2356 | Val = Builder.CreateAShr(LHS: Val, RHS: Offset + HighBits, Name: "bf.ashr" ); |
2357 | } else { |
2358 | if (Offset) |
2359 | Val = Builder.CreateLShr(LHS: Val, RHS: Offset, Name: "bf.lshr" ); |
2360 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
2361 | Val = Builder.CreateAnd( |
2362 | LHS: Val, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), Name: "bf.clear" ); |
2363 | } |
2364 | Val = Builder.CreateIntCast(V: Val, DestTy: ResLTy, isSigned: Info.IsSigned, Name: "bf.cast" ); |
2365 | EmitScalarRangeCheck(Value: Val, Ty: LV.getType(), Loc); |
2366 | return RValue::get(V: Val); |
2367 | } |
2368 | |
2369 | // If this is a reference to a subset of the elements of a vector, create an |
2370 | // appropriate shufflevector. |
2371 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
2372 | llvm::Value *Vec = Builder.CreateLoad(Addr: LV.getExtVectorAddress(), |
2373 | IsVolatile: LV.isVolatileQualified()); |
2374 | |
2375 | // HLSL allows treating scalars as one-element vectors. Converting the scalar |
2376 | // IR value to a vector here allows the rest of codegen to behave as normal. |
2377 | if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { |
2378 | llvm::Type *DstTy = llvm::FixedVectorType::get(ElementType: Vec->getType(), NumElts: 1); |
2379 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
2380 | Vec = Builder.CreateInsertElement(VecTy: DstTy, NewElt: Vec, Idx: Zero, Name: "cast.splat" ); |
2381 | } |
2382 | |
2383 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2384 | |
2385 | // If the result of the expression is a non-vector type, we must be extracting |
2386 | // a single element. Just codegen as an extractelement. |
2387 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
2388 | if (!ExprVT) { |
2389 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2390 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2391 | |
2392 | llvm::Value *Element = Builder.CreateExtractElement(Vec, Idx: Elt); |
2393 | |
2394 | llvm::Type *LVTy = ConvertType(T: LV.getType()); |
2395 | if (Element->getType()->getPrimitiveSizeInBits() > |
2396 | LVTy->getPrimitiveSizeInBits()) |
2397 | Element = Builder.CreateTrunc(V: Element, DestTy: LVTy); |
2398 | |
2399 | return RValue::get(V: Element); |
2400 | } |
2401 | |
2402 | // Always use shuffle vector to try to retain the original program structure |
2403 | unsigned NumResultElts = ExprVT->getNumElements(); |
2404 | |
2405 | SmallVector<int, 4> Mask; |
2406 | for (unsigned i = 0; i != NumResultElts; ++i) |
2407 | Mask.push_back(Elt: getAccessedFieldNo(Idx: i, Elts)); |
2408 | |
2409 | Vec = Builder.CreateShuffleVector(V: Vec, Mask); |
2410 | |
2411 | if (LV.getType()->isExtVectorBoolType()) |
2412 | Vec = Builder.CreateTrunc(V: Vec, DestTy: ConvertType(T: LV.getType()), Name: "truncv" ); |
2413 | |
2414 | return RValue::get(V: Vec); |
2415 | } |
2416 | |
2417 | /// Generates lvalue for partial ext_vector access. |
2418 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2419 | Address VectorAddress = LV.getExtVectorAddress(); |
2420 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2421 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(T: EQT); |
2422 | |
2423 | Address CastToPointerElement = VectorAddress.withElementType(ElemTy: VectorElementTy); |
2424 | |
2425 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2426 | unsigned ix = getAccessedFieldNo(Idx: 0, Elts); |
2427 | |
2428 | Address VectorBasePtrPlusIx = |
2429 | Builder.CreateConstInBoundsGEP(Addr: CastToPointerElement, Index: ix, |
2430 | Name: "vector.elt" ); |
2431 | |
2432 | return VectorBasePtrPlusIx; |
2433 | } |
2434 | |
2435 | /// Load of global named registers are always calls to intrinsics. |
2436 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2437 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2438 | "Bad type for register variable" ); |
2439 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2440 | Val: cast<llvm::MetadataAsValue>(Val: LV.getGlobalReg())->getMetadata()); |
2441 | |
2442 | // We accept integer and pointer types only |
2443 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: LV.getType()); |
2444 | llvm::Type *Ty = OrigTy; |
2445 | if (OrigTy->isPointerTy()) |
2446 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2447 | llvm::Type *Types[] = { Ty }; |
2448 | |
2449 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: Types); |
2450 | llvm::Value *Call = Builder.CreateCall( |
2451 | Callee: F, Args: llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName)); |
2452 | if (OrigTy->isPointerTy()) |
2453 | Call = Builder.CreateIntToPtr(V: Call, DestTy: OrigTy); |
2454 | return RValue::get(V: Call); |
2455 | } |
2456 | |
2457 | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2458 | /// lvalue, where both are guaranteed to the have the same type, and that type |
2459 | /// is 'Ty'. |
2460 | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2461 | bool isInit) { |
2462 | if (!Dst.isSimple()) { |
2463 | if (Dst.isVectorElt()) { |
2464 | // Read/modify/write the vector, inserting the new element. |
2465 | llvm::Value *Vec = Builder.CreateLoad(Addr: Dst.getVectorAddress(), |
2466 | IsVolatile: Dst.isVolatileQualified()); |
2467 | llvm::Type *VecTy = Vec->getType(); |
2468 | llvm::Value *SrcVal = Src.getScalarVal(); |
2469 | |
2470 | if (SrcVal->getType()->getPrimitiveSizeInBits() < |
2471 | VecTy->getScalarSizeInBits()) |
2472 | SrcVal = Builder.CreateZExt(V: SrcVal, DestTy: VecTy->getScalarType()); |
2473 | |
2474 | auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Val: Vec->getType()); |
2475 | if (IRStoreTy) { |
2476 | auto *IRVecTy = llvm::FixedVectorType::get( |
2477 | ElementType: Builder.getInt1Ty(), NumElts: IRStoreTy->getPrimitiveSizeInBits()); |
2478 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRVecTy); |
2479 | // iN --> <N x i1>. |
2480 | } |
2481 | |
2482 | // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar |
2483 | // types which are mapped to vector LLVM IR types (e.g. for implementing |
2484 | // an ABI). |
2485 | if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(Val: SrcVal->getType()); |
2486 | EltTy && EltTy->getNumElements() == 1) |
2487 | SrcVal = Builder.CreateBitCast(V: SrcVal, DestTy: EltTy->getElementType()); |
2488 | |
2489 | Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Dst.getVectorIdx(), |
2490 | Name: "vecins" ); |
2491 | if (IRStoreTy) { |
2492 | // <N x i1> --> <iN>. |
2493 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRStoreTy); |
2494 | } |
2495 | |
2496 | auto *I = Builder.CreateStore(Val: Vec, Addr: Dst.getVectorAddress(), |
2497 | IsVolatile: Dst.isVolatileQualified()); |
2498 | addInstToCurrentSourceAtom(KeyInstruction: I, Backup: Vec); |
2499 | return; |
2500 | } |
2501 | |
2502 | // If this is an update of extended vector elements, insert them as |
2503 | // appropriate. |
2504 | if (Dst.isExtVectorElt()) |
2505 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2506 | |
2507 | if (Dst.isGlobalReg()) |
2508 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2509 | |
2510 | if (Dst.isMatrixElt()) { |
2511 | llvm::Value *Idx = Dst.getMatrixIdx(); |
2512 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2513 | const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); |
2514 | llvm::MatrixBuilder MB(Builder); |
2515 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2516 | } |
2517 | llvm::Instruction *Load = Builder.CreateLoad(Addr: Dst.getMatrixAddress()); |
2518 | llvm::Value *Vec = |
2519 | Builder.CreateInsertElement(Vec: Load, NewElt: Src.getScalarVal(), Idx, Name: "matins" ); |
2520 | auto *I = Builder.CreateStore(Val: Vec, Addr: Dst.getMatrixAddress(), |
2521 | IsVolatile: Dst.isVolatileQualified()); |
2522 | addInstToCurrentSourceAtom(KeyInstruction: I, Backup: Vec); |
2523 | return; |
2524 | } |
2525 | |
2526 | assert(Dst.isBitField() && "Unknown LValue type" ); |
2527 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2528 | } |
2529 | |
2530 | // Handle __ptrauth qualification by re-signing the value. |
2531 | if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) { |
2532 | Src = RValue::get(V: EmitPointerAuthQualify(Qualifier: PointerAuth, Pointer: Src.getScalarVal(), |
2533 | ValueType: Dst.getType(), StorageAddress: Dst.getAddress(), |
2534 | /*known nonnull*/ IsKnownNonNull: false)); |
2535 | } |
2536 | |
2537 | // There's special magic for assigning into an ARC-qualified l-value. |
2538 | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2539 | switch (Lifetime) { |
2540 | case Qualifiers::OCL_None: |
2541 | llvm_unreachable("present but none" ); |
2542 | |
2543 | case Qualifiers::OCL_ExplicitNone: |
2544 | // nothing special |
2545 | break; |
2546 | |
2547 | case Qualifiers::OCL_Strong: |
2548 | if (isInit) { |
2549 | Src = RValue::get(V: EmitARCRetain(type: Dst.getType(), value: Src.getScalarVal())); |
2550 | break; |
2551 | } |
2552 | EmitARCStoreStrong(lvalue: Dst, value: Src.getScalarVal(), /*ignore*/ resultIgnored: true); |
2553 | return; |
2554 | |
2555 | case Qualifiers::OCL_Weak: |
2556 | if (isInit) |
2557 | // Initialize and then skip the primitive store. |
2558 | EmitARCInitWeak(addr: Dst.getAddress(), value: Src.getScalarVal()); |
2559 | else |
2560 | EmitARCStoreWeak(addr: Dst.getAddress(), value: Src.getScalarVal(), |
2561 | /*ignore*/ ignored: true); |
2562 | return; |
2563 | |
2564 | case Qualifiers::OCL_Autoreleasing: |
2565 | Src = RValue::get(V: EmitObjCExtendObjectLifetime(T: Dst.getType(), |
2566 | Ptr: Src.getScalarVal())); |
2567 | // fall into the normal path |
2568 | break; |
2569 | } |
2570 | } |
2571 | |
2572 | if (Dst.isObjCWeak() && !Dst.isNonGC()) { |
2573 | // load of a __weak object. |
2574 | Address LvalueDst = Dst.getAddress(); |
2575 | llvm::Value *src = Src.getScalarVal(); |
2576 | CGM.getObjCRuntime().EmitObjCWeakAssign(CGF&: *this, src, dest: LvalueDst); |
2577 | return; |
2578 | } |
2579 | |
2580 | if (Dst.isObjCStrong() && !Dst.isNonGC()) { |
2581 | // load of a __strong object. |
2582 | Address LvalueDst = Dst.getAddress(); |
2583 | llvm::Value *src = Src.getScalarVal(); |
2584 | if (Dst.isObjCIvar()) { |
2585 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL" ); |
2586 | llvm::Type *ResultType = IntPtrTy; |
2587 | Address dst = EmitPointerWithAlignment(E: Dst.getBaseIvarExp()); |
2588 | llvm::Value *RHS = dst.emitRawPointer(CGF&: *this); |
2589 | RHS = Builder.CreatePtrToInt(V: RHS, DestTy: ResultType, Name: "sub.ptr.rhs.cast" ); |
2590 | llvm::Value *LHS = Builder.CreatePtrToInt(V: LvalueDst.emitRawPointer(CGF&: *this), |
2591 | DestTy: ResultType, Name: "sub.ptr.lhs.cast" ); |
2592 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, Name: "ivar.offset" ); |
2593 | CGM.getObjCRuntime().EmitObjCIvarAssign(CGF&: *this, src, dest: dst, ivarOffset: BytesBetween); |
2594 | } else if (Dst.isGlobalObjCRef()) { |
2595 | CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF&: *this, src, dest: LvalueDst, |
2596 | threadlocal: Dst.isThreadLocalRef()); |
2597 | } |
2598 | else |
2599 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(CGF&: *this, src, dest: LvalueDst); |
2600 | return; |
2601 | } |
2602 | |
2603 | assert(Src.isScalar() && "Can't emit an agg store with this method" ); |
2604 | EmitStoreOfScalar(value: Src.getScalarVal(), lvalue: Dst, isInit); |
2605 | } |
2606 | |
2607 | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2608 | llvm::Value **Result) { |
2609 | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2610 | llvm::Type *ResLTy = convertTypeForLoadStore(ASTTy: Dst.getType()); |
2611 | Address Ptr = Dst.getBitFieldAddress(); |
2612 | |
2613 | // Get the source value, truncated to the width of the bit-field. |
2614 | llvm::Value *SrcVal = Src.getScalarVal(); |
2615 | |
2616 | // Cast the source to the storage type and shift it into place. |
2617 | SrcVal = Builder.CreateIntCast(V: SrcVal, DestTy: Ptr.getElementType(), |
2618 | /*isSigned=*/false); |
2619 | llvm::Value *MaskedVal = SrcVal; |
2620 | |
2621 | const bool UseVolatile = |
2622 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && |
2623 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2624 | const unsigned StorageSize = |
2625 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2626 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2627 | // See if there are other bits in the bitfield's storage we'll need to load |
2628 | // and mask together with source before storing. |
2629 | if (StorageSize != Info.Size) { |
2630 | assert(StorageSize > Info.Size && "Invalid bitfield size." ); |
2631 | llvm::Value *Val = |
2632 | Builder.CreateLoad(Addr: Ptr, IsVolatile: Dst.isVolatileQualified(), Name: "bf.load" ); |
2633 | |
2634 | // Mask the source value as needed. |
2635 | if (!Dst.getType()->hasBooleanRepresentation()) |
2636 | SrcVal = Builder.CreateAnd( |
2637 | LHS: SrcVal, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), |
2638 | Name: "bf.value" ); |
2639 | MaskedVal = SrcVal; |
2640 | if (Offset) |
2641 | SrcVal = Builder.CreateShl(LHS: SrcVal, RHS: Offset, Name: "bf.shl" ); |
2642 | |
2643 | // Mask out the original value. |
2644 | Val = Builder.CreateAnd( |
2645 | LHS: Val, RHS: ~llvm::APInt::getBitsSet(numBits: StorageSize, loBit: Offset, hiBit: Offset + Info.Size), |
2646 | Name: "bf.clear" ); |
2647 | |
2648 | // Or together the unchanged values and the source value. |
2649 | SrcVal = Builder.CreateOr(LHS: Val, RHS: SrcVal, Name: "bf.set" ); |
2650 | } else { |
2651 | assert(Offset == 0); |
2652 | // According to the AACPS: |
2653 | // When a volatile bit-field is written, and its container does not overlap |
2654 | // with any non-bit-field member, its container must be read exactly once |
2655 | // and written exactly once using the access width appropriate to the type |
2656 | // of the container. The two accesses are not atomic. |
2657 | if (Dst.isVolatileQualified() && isAAPCS(TargetInfo: CGM.getTarget()) && |
2658 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2659 | Builder.CreateLoad(Addr: Ptr, IsVolatile: true, Name: "bf.load" ); |
2660 | } |
2661 | |
2662 | // Write the new value back out. |
2663 | auto *I = Builder.CreateStore(Val: SrcVal, Addr: Ptr, IsVolatile: Dst.isVolatileQualified()); |
2664 | addInstToCurrentSourceAtom(KeyInstruction: I, Backup: SrcVal); |
2665 | |
2666 | // Return the new value of the bit-field, if requested. |
2667 | if (Result) { |
2668 | llvm::Value *ResultVal = MaskedVal; |
2669 | |
2670 | // Sign extend the value if needed. |
2671 | if (Info.IsSigned) { |
2672 | assert(Info.Size <= StorageSize); |
2673 | unsigned HighBits = StorageSize - Info.Size; |
2674 | if (HighBits) { |
2675 | ResultVal = Builder.CreateShl(LHS: ResultVal, RHS: HighBits, Name: "bf.result.shl" ); |
2676 | ResultVal = Builder.CreateAShr(LHS: ResultVal, RHS: HighBits, Name: "bf.result.ashr" ); |
2677 | } |
2678 | } |
2679 | |
2680 | ResultVal = Builder.CreateIntCast(V: ResultVal, DestTy: ResLTy, isSigned: Info.IsSigned, |
2681 | Name: "bf.result.cast" ); |
2682 | *Result = EmitFromMemory(Value: ResultVal, Ty: Dst.getType()); |
2683 | } |
2684 | } |
2685 | |
2686 | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2687 | LValue Dst) { |
2688 | llvm::Value *SrcVal = Src.getScalarVal(); |
2689 | Address DstAddr = Dst.getExtVectorAddress(); |
2690 | if (DstAddr.getElementType()->getScalarSizeInBits() > |
2691 | SrcVal->getType()->getScalarSizeInBits()) |
2692 | SrcVal = Builder.CreateZExt( |
2693 | V: SrcVal, DestTy: convertTypeForLoadStore(ASTTy: Dst.getType(), LLVMTy: SrcVal->getType())); |
2694 | |
2695 | // HLSL allows storing to scalar values through ExtVector component LValues. |
2696 | // To support this we need to handle the case where the destination address is |
2697 | // a scalar. |
2698 | if (!DstAddr.getElementType()->isVectorTy()) { |
2699 | assert(!Dst.getType()->isVectorType() && |
2700 | "this should only occur for non-vector l-values" ); |
2701 | Builder.CreateStore(Val: SrcVal, Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2702 | return; |
2703 | } |
2704 | |
2705 | // This access turns into a read/modify/write of the vector. Load the input |
2706 | // value now. |
2707 | llvm::Value *Vec = Builder.CreateLoad(Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2708 | llvm::Type *VecTy = Vec->getType(); |
2709 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2710 | |
2711 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2712 | unsigned NumSrcElts = VTy->getNumElements(); |
2713 | unsigned NumDstElts = cast<llvm::FixedVectorType>(Val: VecTy)->getNumElements(); |
2714 | if (NumDstElts == NumSrcElts) { |
2715 | // Use shuffle vector is the src and destination are the same number of |
2716 | // elements and restore the vector mask since it is on the side it will be |
2717 | // stored. |
2718 | SmallVector<int, 4> Mask(NumDstElts); |
2719 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2720 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i; |
2721 | |
2722 | Vec = Builder.CreateShuffleVector(V: SrcVal, Mask); |
2723 | } else if (NumDstElts > NumSrcElts) { |
2724 | // Extended the source vector to the same length and then shuffle it |
2725 | // into the destination. |
2726 | // FIXME: since we're shuffling with undef, can we just use the indices |
2727 | // into that? This could be simpler. |
2728 | SmallVector<int, 4> ExtMask; |
2729 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2730 | ExtMask.push_back(Elt: i); |
2731 | ExtMask.resize(N: NumDstElts, NV: -1); |
2732 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(V: SrcVal, Mask: ExtMask); |
2733 | // build identity |
2734 | SmallVector<int, 4> Mask; |
2735 | for (unsigned i = 0; i != NumDstElts; ++i) |
2736 | Mask.push_back(Elt: i); |
2737 | |
2738 | // When the vector size is odd and .odd or .hi is used, the last element |
2739 | // of the Elts constant array will be one past the size of the vector. |
2740 | // Ignore the last element here, if it is greater than the mask size. |
2741 | if (getAccessedFieldNo(Idx: NumSrcElts - 1, Elts) == Mask.size()) |
2742 | NumSrcElts--; |
2743 | |
2744 | // modify when what gets shuffled in |
2745 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2746 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i + NumDstElts; |
2747 | Vec = Builder.CreateShuffleVector(V1: Vec, V2: ExtSrcVal, Mask); |
2748 | } else { |
2749 | // We should never shorten the vector |
2750 | llvm_unreachable("unexpected shorten vector length" ); |
2751 | } |
2752 | } else { |
2753 | // If the Src is a scalar (not a vector), and the target is a vector it must |
2754 | // be updating one element. |
2755 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2756 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2757 | |
2758 | Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Elt); |
2759 | } |
2760 | |
2761 | Builder.CreateStore(Val: Vec, Addr: Dst.getExtVectorAddress(), |
2762 | IsVolatile: Dst.isVolatileQualified()); |
2763 | } |
2764 | |
2765 | /// Store of global named registers are always calls to intrinsics. |
2766 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2767 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2768 | "Bad type for register variable" ); |
2769 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2770 | Val: cast<llvm::MetadataAsValue>(Val: Dst.getGlobalReg())->getMetadata()); |
2771 | assert(RegName && "Register LValue is not metadata" ); |
2772 | |
2773 | // We accept integer and pointer types only |
2774 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: Dst.getType()); |
2775 | llvm::Type *Ty = OrigTy; |
2776 | if (OrigTy->isPointerTy()) |
2777 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2778 | llvm::Type *Types[] = { Ty }; |
2779 | |
2780 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types); |
2781 | llvm::Value *Value = Src.getScalarVal(); |
2782 | if (OrigTy->isPointerTy()) |
2783 | Value = Builder.CreatePtrToInt(V: Value, DestTy: Ty); |
2784 | Builder.CreateCall( |
2785 | Callee: F, Args: {llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName), Value}); |
2786 | } |
2787 | |
2788 | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2789 | // generating write-barries API. It is currently a global, ivar, |
2790 | // or neither. |
2791 | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2792 | LValue &LV, |
2793 | bool IsMemberAccess=false) { |
2794 | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2795 | return; |
2796 | |
2797 | if (isa<ObjCIvarRefExpr>(Val: E)) { |
2798 | QualType ExpTy = E->getType(); |
2799 | if (IsMemberAccess && ExpTy->isPointerType()) { |
2800 | // If ivar is a structure pointer, assigning to field of |
2801 | // this struct follows gcc's behavior and makes it a non-ivar |
2802 | // writer-barrier conservatively. |
2803 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2804 | if (ExpTy->isRecordType()) { |
2805 | LV.setObjCIvar(false); |
2806 | return; |
2807 | } |
2808 | } |
2809 | LV.setObjCIvar(true); |
2810 | auto *Exp = cast<ObjCIvarRefExpr>(Val: const_cast<Expr *>(E)); |
2811 | LV.setBaseIvarExp(Exp->getBase()); |
2812 | LV.setObjCArray(E->getType()->isArrayType()); |
2813 | return; |
2814 | } |
2815 | |
2816 | if (const auto *Exp = dyn_cast<DeclRefExpr>(Val: E)) { |
2817 | if (const auto *VD = dyn_cast<VarDecl>(Val: Exp->getDecl())) { |
2818 | if (VD->hasGlobalStorage()) { |
2819 | LV.setGlobalObjCRef(true); |
2820 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2821 | } |
2822 | } |
2823 | LV.setObjCArray(E->getType()->isArrayType()); |
2824 | return; |
2825 | } |
2826 | |
2827 | if (const auto *Exp = dyn_cast<UnaryOperator>(Val: E)) { |
2828 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2829 | return; |
2830 | } |
2831 | |
2832 | if (const auto *Exp = dyn_cast<ParenExpr>(Val: E)) { |
2833 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2834 | if (LV.isObjCIvar()) { |
2835 | // If cast is to a structure pointer, follow gcc's behavior and make it |
2836 | // a non-ivar write-barrier. |
2837 | QualType ExpTy = E->getType(); |
2838 | if (ExpTy->isPointerType()) |
2839 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2840 | if (ExpTy->isRecordType()) |
2841 | LV.setObjCIvar(false); |
2842 | } |
2843 | return; |
2844 | } |
2845 | |
2846 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(Val: E)) { |
2847 | setObjCGCLValueClass(Ctx, E: Exp->getResultExpr(), LV); |
2848 | return; |
2849 | } |
2850 | |
2851 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(Val: E)) { |
2852 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2853 | return; |
2854 | } |
2855 | |
2856 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(Val: E)) { |
2857 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2858 | return; |
2859 | } |
2860 | |
2861 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(Val: E)) { |
2862 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2863 | return; |
2864 | } |
2865 | |
2866 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
2867 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV); |
2868 | if (LV.isObjCIvar() && !LV.isObjCArray()) |
2869 | // Using array syntax to assigning to what an ivar points to is not |
2870 | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2871 | LV.setObjCIvar(false); |
2872 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) |
2873 | // Using array syntax to assigning to what global points to is not |
2874 | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2875 | LV.setGlobalObjCRef(false); |
2876 | return; |
2877 | } |
2878 | |
2879 | if (const auto *Exp = dyn_cast<MemberExpr>(Val: E)) { |
2880 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV, IsMemberAccess: true); |
2881 | // We don't know if member is an 'ivar', but this flag is looked at |
2882 | // only in the context of LV.isObjCIvar(). |
2883 | LV.setObjCArray(E->getType()->isArrayType()); |
2884 | return; |
2885 | } |
2886 | } |
2887 | |
2888 | static LValue EmitThreadPrivateVarDeclLValue( |
2889 | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2890 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2891 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2892 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2893 | CGF, VD, VDAddr: Addr, Loc); |
2894 | else |
2895 | Addr = |
2896 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, VDAddr: Addr, Loc); |
2897 | |
2898 | Addr = Addr.withElementType(ElemTy: RealVarTy); |
2899 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2900 | } |
2901 | |
2902 | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2903 | const VarDecl *VD, QualType T) { |
2904 | std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2905 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2906 | // Return an invalid address if variable is MT_To (or MT_Enter starting with |
2907 | // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link |
2908 | // and MT_To (or MT_Enter) with unified memory, return a valid address. |
2909 | if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2910 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2911 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2912 | return Address::invalid(); |
2913 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2914 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2915 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2916 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2917 | "Expected link clause OR to clause with unified memory enabled." ); |
2918 | QualType PtrTy = CGF.getContext().getPointerType(T: VD->getType()); |
2919 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2920 | return CGF.EmitLoadOfPointer(Ptr: Addr, PtrTy: PtrTy->castAs<PointerType>()); |
2921 | } |
2922 | |
2923 | Address |
2924 | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2925 | LValueBaseInfo *PointeeBaseInfo, |
2926 | TBAAAccessInfo *PointeeTBAAInfo) { |
2927 | llvm::LoadInst *Load = |
2928 | Builder.CreateLoad(Addr: RefLVal.getAddress(), IsVolatile: RefLVal.isVolatile()); |
2929 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo: RefLVal.getTBAAInfo()); |
2930 | QualType PTy = RefLVal.getType()->getPointeeType(); |
2931 | CharUnits Align = CGM.getNaturalTypeAlignment( |
2932 | T: PTy, BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo, /*ForPointeeType=*/forPointeeType: true); |
2933 | if (!PTy->isIncompleteType()) { |
2934 | llvm::LLVMContext &Ctx = getLLVMContext(); |
2935 | llvm::MDBuilder MDB(Ctx); |
2936 | // Emit !nonnull metadata |
2937 | if (CGM.getTypes().getTargetAddressSpace(T: PTy) == 0 && |
2938 | !CGM.getCodeGenOpts().NullPointerIsValid) |
2939 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nonnull, |
2940 | Node: llvm::MDNode::get(Context&: Ctx, MDs: {})); |
2941 | // Emit !align metadata |
2942 | if (PTy->isObjectType()) { |
2943 | auto AlignVal = Align.getQuantity(); |
2944 | if (AlignVal > 1) { |
2945 | Load->setMetadata( |
2946 | KindID: llvm::LLVMContext::MD_align, |
2947 | Node: llvm::MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: llvm::ConstantInt::get( |
2948 | Ty: Builder.getInt64Ty(), V: AlignVal)))); |
2949 | } |
2950 | } |
2951 | } |
2952 | return makeNaturalAddressForPointer(Ptr: Load, T: PTy, Alignment: Align, |
2953 | /*ForPointeeType=*/true, BaseInfo: PointeeBaseInfo, |
2954 | TBAAInfo: PointeeTBAAInfo); |
2955 | } |
2956 | |
2957 | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2958 | LValueBaseInfo PointeeBaseInfo; |
2959 | TBAAAccessInfo PointeeTBAAInfo; |
2960 | Address PointeeAddr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &PointeeBaseInfo, |
2961 | PointeeTBAAInfo: &PointeeTBAAInfo); |
2962 | return MakeAddrLValue(Addr: PointeeAddr, T: RefLVal.getType()->getPointeeType(), |
2963 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2964 | } |
2965 | |
2966 | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2967 | const PointerType *PtrTy, |
2968 | LValueBaseInfo *BaseInfo, |
2969 | TBAAAccessInfo *TBAAInfo) { |
2970 | llvm::Value *Addr = Builder.CreateLoad(Addr: Ptr); |
2971 | return makeNaturalAddressForPointer(Ptr: Addr, T: PtrTy->getPointeeType(), |
2972 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2973 | BaseInfo, TBAAInfo); |
2974 | } |
2975 | |
2976 | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2977 | const PointerType *PtrTy) { |
2978 | LValueBaseInfo BaseInfo; |
2979 | TBAAAccessInfo TBAAInfo; |
2980 | Address Addr = EmitLoadOfPointer(Ptr: PtrAddr, PtrTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
2981 | return MakeAddrLValue(Addr, T: PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2982 | } |
2983 | |
2984 | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2985 | const Expr *E, const VarDecl *VD) { |
2986 | QualType T = E->getType(); |
2987 | |
2988 | // If it's thread_local, emit a call to its wrapper function instead. |
2989 | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2990 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2991 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, LValType: T); |
2992 | // Check if the variable is marked as declare target with link clause in |
2993 | // device codegen. |
2994 | if (CGF.getLangOpts().OpenMPIsTargetDevice) { |
2995 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2996 | if (Addr.isValid()) |
2997 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2998 | } |
2999 | |
3000 | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(D: VD); |
3001 | |
3002 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3003 | V = CGF.Builder.CreateThreadLocalAddress(Ptr: V); |
3004 | |
3005 | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(T: VD->getType()); |
3006 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: VD); |
3007 | Address Addr(V, RealVarTy, Alignment); |
3008 | // Emit reference to the private copy of the variable if it is an OpenMP |
3009 | // threadprivate variable. |
3010 | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && |
3011 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3012 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
3013 | Loc: E->getExprLoc()); |
3014 | } |
3015 | LValue LV = VD->getType()->isReferenceType() ? |
3016 | CGF.EmitLoadOfReferenceLValue(RefAddr: Addr, RefTy: VD->getType(), |
3017 | Source: AlignmentSource::Decl) : |
3018 | CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
3019 | setObjCGCLValueClass(Ctx: CGF.getContext(), E, LV); |
3020 | return LV; |
3021 | } |
3022 | |
3023 | llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD, |
3024 | llvm::Type *Ty) { |
3025 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
3026 | if (FD->hasAttr<WeakRefAttr>()) { |
3027 | ConstantAddress aliasee = GetWeakRefReference(VD: FD); |
3028 | return aliasee.getPointer(); |
3029 | } |
3030 | |
3031 | llvm::Constant *V = GetAddrOfFunction(GD, Ty); |
3032 | return V; |
3033 | } |
3034 | |
3035 | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
3036 | GlobalDecl GD) { |
3037 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
3038 | llvm::Constant *V = CGF.CGM.getFunctionPointer(GD); |
3039 | QualType ETy = E->getType(); |
3040 | if (ETy->isCFIUncheckedCalleeFunctionType()) { |
3041 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: V)) |
3042 | V = llvm::NoCFIValue::get(GV); |
3043 | } |
3044 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: FD); |
3045 | return CGF.MakeAddrLValue(V, T: ETy, Alignment, Source: AlignmentSource::Decl); |
3046 | } |
3047 | |
3048 | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
3049 | llvm::Value *ThisValue) { |
3050 | |
3051 | return CGF.EmitLValueForLambdaField(Field: FD, ThisValue); |
3052 | } |
3053 | |
3054 | /// Named Registers are named metadata pointing to the register name |
3055 | /// which will be read from/written to as an argument to the intrinsic |
3056 | /// @llvm.read/write_register. |
3057 | /// So far, only the name is being passed down, but other options such as |
3058 | /// register type, allocation type or even optimization options could be |
3059 | /// passed down via the metadata node. |
3060 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
3061 | SmallString<64> Name("llvm.named.register." ); |
3062 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
3063 | assert(Asm->getLabel().size() < 64-Name.size() && |
3064 | "Register name too big" ); |
3065 | Name.append(RHS: Asm->getLabel()); |
3066 | llvm::NamedMDNode *M = |
3067 | CGM.getModule().getOrInsertNamedMetadata(Name); |
3068 | if (M->getNumOperands() == 0) { |
3069 | llvm::MDString *Str = llvm::MDString::get(Context&: CGM.getLLVMContext(), |
3070 | Str: Asm->getLabel()); |
3071 | llvm::Metadata *Ops[] = {Str}; |
3072 | M->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
3073 | } |
3074 | |
3075 | CharUnits Alignment = CGM.getContext().getDeclAlign(D: VD); |
3076 | |
3077 | llvm::Value *Ptr = |
3078 | llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD: M->getOperand(i: 0)); |
3079 | return LValue::MakeGlobalReg(V: Ptr, alignment: Alignment, type: VD->getType()); |
3080 | } |
3081 | |
3082 | /// Determine whether we can emit a reference to \p VD from the current |
3083 | /// context, despite not necessarily having seen an odr-use of the variable in |
3084 | /// this context. |
3085 | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
3086 | const DeclRefExpr *E, |
3087 | const VarDecl *VD) { |
3088 | // For a variable declared in an enclosing scope, do not emit a spurious |
3089 | // reference even if we have a capture, as that will emit an unwarranted |
3090 | // reference to our capture state, and will likely generate worse code than |
3091 | // emitting a local copy. |
3092 | if (E->refersToEnclosingVariableOrCapture()) |
3093 | return false; |
3094 | |
3095 | // For a local declaration declared in this function, we can always reference |
3096 | // it even if we don't have an odr-use. |
3097 | if (VD->hasLocalStorage()) { |
3098 | return VD->getDeclContext() == |
3099 | dyn_cast_or_null<DeclContext>(Val: CGF.CurCodeDecl); |
3100 | } |
3101 | |
3102 | // For a global declaration, we can emit a reference to it if we know |
3103 | // for sure that we are able to emit a definition of it. |
3104 | VD = VD->getDefinition(C&: CGF.getContext()); |
3105 | if (!VD) |
3106 | return false; |
3107 | |
3108 | // Don't emit a spurious reference if it might be to a variable that only |
3109 | // exists on a different device / target. |
3110 | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
3111 | // cross-target reference. |
3112 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || |
3113 | CGF.getLangOpts().OpenCL) { |
3114 | return false; |
3115 | } |
3116 | |
3117 | // We can emit a spurious reference only if the linkage implies that we'll |
3118 | // be emitting a non-interposable symbol that will be retained until link |
3119 | // time. |
3120 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { |
3121 | case llvm::GlobalValue::ExternalLinkage: |
3122 | case llvm::GlobalValue::LinkOnceODRLinkage: |
3123 | case llvm::GlobalValue::WeakODRLinkage: |
3124 | case llvm::GlobalValue::InternalLinkage: |
3125 | case llvm::GlobalValue::PrivateLinkage: |
3126 | return true; |
3127 | default: |
3128 | return false; |
3129 | } |
3130 | } |
3131 | |
3132 | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
3133 | const NamedDecl *ND = E->getDecl(); |
3134 | QualType T = E->getType(); |
3135 | |
3136 | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
3137 | "should not emit an unevaluated operand" ); |
3138 | |
3139 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
3140 | // Global Named registers access via intrinsics only |
3141 | if (VD->getStorageClass() == SC_Register && |
3142 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) |
3143 | return EmitGlobalNamedRegister(VD, CGM); |
3144 | |
3145 | // If this DeclRefExpr does not constitute an odr-use of the variable, |
3146 | // we're not permitted to emit a reference to it in general, and it might |
3147 | // not be captured if capture would be necessary for a use. Emit the |
3148 | // constant value directly instead. |
3149 | if (E->isNonOdrUse() == NOUR_Constant && |
3150 | (VD->getType()->isReferenceType() || |
3151 | !canEmitSpuriousReferenceToVariable(CGF&: *this, E, VD))) { |
3152 | VD->getAnyInitializer(D&: VD); |
3153 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
3154 | loc: E->getLocation(), value: *VD->evaluateValue(), T: VD->getType()); |
3155 | assert(Val && "failed to emit constant expression" ); |
3156 | |
3157 | Address Addr = Address::invalid(); |
3158 | if (!VD->getType()->isReferenceType()) { |
3159 | // Spill the constant value to a global. |
3160 | Addr = CGM.createUnnamedGlobalFrom(D: *VD, Constant: Val, |
3161 | Align: getContext().getDeclAlign(D: VD)); |
3162 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3163 | auto *PTy = llvm::PointerType::get( |
3164 | C&: getLLVMContext(), AddressSpace: getTypes().getTargetAddressSpace(T: VD->getType())); |
3165 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty: PTy, ElementTy: VarTy); |
3166 | } else { |
3167 | // Should we be using the alignment of the constant pointer we emitted? |
3168 | CharUnits Alignment = |
3169 | CGM.getNaturalTypeAlignment(T: E->getType(), |
3170 | /* BaseInfo= */ nullptr, |
3171 | /* TBAAInfo= */ nullptr, |
3172 | /* forPointeeType= */ true); |
3173 | Addr = makeNaturalAddressForPointer(Ptr: Val, T, Alignment); |
3174 | } |
3175 | return MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
3176 | } |
3177 | |
3178 | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
3179 | |
3180 | // Check for captured variables. |
3181 | if (E->refersToEnclosingVariableOrCapture()) { |
3182 | VD = VD->getCanonicalDecl(); |
3183 | if (auto *FD = LambdaCaptureFields.lookup(Val: VD)) |
3184 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3185 | if (CapturedStmtInfo) { |
3186 | auto I = LocalDeclMap.find(Val: VD); |
3187 | if (I != LocalDeclMap.end()) { |
3188 | LValue CapLVal; |
3189 | if (VD->getType()->isReferenceType()) |
3190 | CapLVal = EmitLoadOfReferenceLValue(RefAddr: I->second, RefTy: VD->getType(), |
3191 | Source: AlignmentSource::Decl); |
3192 | else |
3193 | CapLVal = MakeAddrLValue(Addr: I->second, T); |
3194 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3195 | // in simd context. |
3196 | if (getLangOpts().OpenMP && |
3197 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3198 | CapLVal.setNontemporal(/*Value=*/true); |
3199 | return CapLVal; |
3200 | } |
3201 | LValue CapLVal = |
3202 | EmitCapturedFieldLValue(CGF&: *this, FD: CapturedStmtInfo->lookup(VD), |
3203 | ThisValue: CapturedStmtInfo->getContextValue()); |
3204 | Address LValueAddress = CapLVal.getAddress(); |
3205 | CapLVal = MakeAddrLValue(Addr: Address(LValueAddress.emitRawPointer(CGF&: *this), |
3206 | LValueAddress.getElementType(), |
3207 | getContext().getDeclAlign(D: VD)), |
3208 | T: CapLVal.getType(), |
3209 | BaseInfo: LValueBaseInfo(AlignmentSource::Decl), |
3210 | TBAAInfo: CapLVal.getTBAAInfo()); |
3211 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3212 | // in simd context. |
3213 | if (getLangOpts().OpenMP && |
3214 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3215 | CapLVal.setNontemporal(/*Value=*/true); |
3216 | return CapLVal; |
3217 | } |
3218 | |
3219 | assert(isa<BlockDecl>(CurCodeDecl)); |
3220 | Address addr = GetAddrOfBlockDecl(var: VD); |
3221 | return MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3222 | } |
3223 | } |
3224 | |
3225 | // FIXME: We should be able to assert this for FunctionDecls as well! |
3226 | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
3227 | // those with a valid source location. |
3228 | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
3229 | !E->getLocation().isValid()) && |
3230 | "Should not use decl without marking it used!" ); |
3231 | |
3232 | if (ND->hasAttr<WeakRefAttr>()) { |
3233 | const auto *VD = cast<ValueDecl>(Val: ND); |
3234 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); |
3235 | return MakeAddrLValue(Addr: Aliasee, T, Source: AlignmentSource::Decl); |
3236 | } |
3237 | |
3238 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
3239 | // Check if this is a global variable. |
3240 | if (VD->hasLinkage() || VD->isStaticDataMember()) |
3241 | return EmitGlobalVarDeclLValue(CGF&: *this, E, VD); |
3242 | |
3243 | Address addr = Address::invalid(); |
3244 | |
3245 | // The variable should generally be present in the local decl map. |
3246 | auto iter = LocalDeclMap.find(Val: VD); |
3247 | if (iter != LocalDeclMap.end()) { |
3248 | addr = iter->second; |
3249 | |
3250 | // Otherwise, it might be static local we haven't emitted yet for |
3251 | // some reason; most likely, because it's in an outer function. |
3252 | } else if (VD->isStaticLocal()) { |
3253 | llvm::Constant *var = CGM.getOrCreateStaticVarDecl( |
3254 | D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD)); |
3255 | addr = Address( |
3256 | var, ConvertTypeForMem(T: VD->getType()), getContext().getDeclAlign(D: VD)); |
3257 | |
3258 | // No other cases for now. |
3259 | } else { |
3260 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?" ); |
3261 | } |
3262 | |
3263 | // Handle threadlocal function locals. |
3264 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3265 | addr = addr.withPointer( |
3266 | NewPointer: Builder.CreateThreadLocalAddress(Ptr: addr.getBasePointer()), |
3267 | IsKnownNonNull: NotKnownNonNull); |
3268 | |
3269 | // Check for OpenMP threadprivate variables. |
3270 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
3271 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3272 | return EmitThreadPrivateVarDeclLValue( |
3273 | CGF&: *this, VD, T, Addr: addr, RealVarTy: getTypes().ConvertTypeForMem(T: VD->getType()), |
3274 | Loc: E->getExprLoc()); |
3275 | } |
3276 | |
3277 | // Drill into block byref variables. |
3278 | bool isBlockByref = VD->isEscapingByref(); |
3279 | if (isBlockByref) { |
3280 | addr = emitBlockByrefAddress(baseAddr: addr, V: VD); |
3281 | } |
3282 | |
3283 | // Drill into reference types. |
3284 | LValue LV = VD->getType()->isReferenceType() ? |
3285 | EmitLoadOfReferenceLValue(RefAddr: addr, RefTy: VD->getType(), Source: AlignmentSource::Decl) : |
3286 | MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3287 | |
3288 | bool isLocalStorage = VD->hasLocalStorage(); |
3289 | |
3290 | bool NonGCable = isLocalStorage && |
3291 | !VD->getType()->isReferenceType() && |
3292 | !isBlockByref; |
3293 | if (NonGCable) { |
3294 | LV.getQuals().removeObjCGCAttr(); |
3295 | LV.setNonGC(true); |
3296 | } |
3297 | |
3298 | bool isImpreciseLifetime = |
3299 | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); |
3300 | if (isImpreciseLifetime) |
3301 | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
3302 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
3303 | return LV; |
3304 | } |
3305 | |
3306 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
3307 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
3308 | |
3309 | // FIXME: While we're emitting a binding from an enclosing scope, all other |
3310 | // DeclRefExprs we see should be implicitly treated as if they also refer to |
3311 | // an enclosing scope. |
3312 | if (const auto *BD = dyn_cast<BindingDecl>(Val: ND)) { |
3313 | if (E->refersToEnclosingVariableOrCapture()) { |
3314 | auto *FD = LambdaCaptureFields.lookup(Val: BD); |
3315 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3316 | } |
3317 | return EmitLValue(E: BD->getBinding()); |
3318 | } |
3319 | |
3320 | // We can form DeclRefExprs naming GUID declarations when reconstituting |
3321 | // non-type template parameters into expressions. |
3322 | if (const auto *GD = dyn_cast<MSGuidDecl>(Val: ND)) |
3323 | return MakeAddrLValue(Addr: CGM.GetAddrOfMSGuidDecl(GD), T, |
3324 | Source: AlignmentSource::Decl); |
3325 | |
3326 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: ND)) { |
3327 | auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO); |
3328 | auto AS = getLangASFromTargetAS(TargetAS: ATPO.getAddressSpace()); |
3329 | |
3330 | if (AS != T.getAddressSpace()) { |
3331 | auto TargetAS = getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
3332 | auto PtrTy = llvm::PointerType::get(C&: CGM.getLLVMContext(), AddressSpace: TargetAS); |
3333 | auto ASC = getTargetHooks().performAddrSpaceCast(CGM, V: ATPO.getPointer(), |
3334 | SrcAddr: AS, DestTy: PtrTy); |
3335 | ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); |
3336 | } |
3337 | |
3338 | return MakeAddrLValue(Addr: ATPO, T, Source: AlignmentSource::Decl); |
3339 | } |
3340 | |
3341 | llvm_unreachable("Unhandled DeclRefExpr" ); |
3342 | } |
3343 | |
3344 | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
3345 | // __extension__ doesn't affect lvalue-ness. |
3346 | if (E->getOpcode() == UO_Extension) |
3347 | return EmitLValue(E: E->getSubExpr()); |
3348 | |
3349 | QualType ExprTy = getContext().getCanonicalType(T: E->getSubExpr()->getType()); |
3350 | switch (E->getOpcode()) { |
3351 | default: llvm_unreachable("Unknown unary operator lvalue!" ); |
3352 | case UO_Deref: { |
3353 | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
3354 | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type" ); |
3355 | |
3356 | LValueBaseInfo BaseInfo; |
3357 | TBAAAccessInfo TBAAInfo; |
3358 | Address Addr = EmitPointerWithAlignment(E: E->getSubExpr(), BaseInfo: &BaseInfo, |
3359 | TBAAInfo: &TBAAInfo); |
3360 | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
3361 | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
3362 | |
3363 | // We should not generate __weak write barrier on indirect reference |
3364 | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
3365 | // But, we continue to generate __strong write barrier on indirect write |
3366 | // into a pointer to object. |
3367 | if (getLangOpts().ObjC && |
3368 | getLangOpts().getGC() != LangOptions::NonGC && |
3369 | LV.isObjCWeak()) |
3370 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
3371 | return LV; |
3372 | } |
3373 | case UO_Real: |
3374 | case UO_Imag: { |
3375 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3376 | assert(LV.isSimple() && "real/imag on non-ordinary l-value" ); |
3377 | |
3378 | // __real is valid on scalars. This is a faster way of testing that. |
3379 | // __imag can only produce an rvalue on scalars. |
3380 | if (E->getOpcode() == UO_Real && |
3381 | !LV.getAddress().getElementType()->isStructTy()) { |
3382 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
3383 | return LV; |
3384 | } |
3385 | |
3386 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
3387 | |
3388 | Address Component = |
3389 | (E->getOpcode() == UO_Real |
3390 | ? emitAddrOfRealComponent(complex: LV.getAddress(), complexType: LV.getType()) |
3391 | : emitAddrOfImagComponent(complex: LV.getAddress(), complexType: LV.getType())); |
3392 | LValue ElemLV = MakeAddrLValue(Addr: Component, T, BaseInfo: LV.getBaseInfo(), |
3393 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: T)); |
3394 | ElemLV.getQuals().addQualifiers(Q: LV.getQuals()); |
3395 | return ElemLV; |
3396 | } |
3397 | case UO_PreInc: |
3398 | case UO_PreDec: { |
3399 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3400 | bool isInc = E->getOpcode() == UO_PreInc; |
3401 | |
3402 | if (E->getType()->isAnyComplexType()) |
3403 | EmitComplexPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3404 | else |
3405 | EmitScalarPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3406 | return LV; |
3407 | } |
3408 | } |
3409 | } |
3410 | |
3411 | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
3412 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromLiteral(S: E), |
3413 | T: E->getType(), Source: AlignmentSource::Decl); |
3414 | } |
3415 | |
3416 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
3417 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromObjCEncode(E), |
3418 | T: E->getType(), Source: AlignmentSource::Decl); |
3419 | } |
3420 | |
3421 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
3422 | auto SL = E->getFunctionName(); |
3423 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr" ); |
3424 | StringRef FnName = CurFn->getName(); |
3425 | FnName.consume_front(Prefix: "\01" ); |
3426 | StringRef NameItems[] = { |
3427 | PredefinedExpr::getIdentKindName(IK: E->getIdentKind()), FnName}; |
3428 | std::string GVName = llvm::join(Begin: NameItems, End: NameItems + 2, Separator: "." ); |
3429 | if (auto *BD = dyn_cast_or_null<BlockDecl>(Val: CurCodeDecl)) { |
3430 | std::string Name = std::string(SL->getString()); |
3431 | if (!Name.empty()) { |
3432 | unsigned Discriminator = |
3433 | CGM.getCXXABI().getMangleContext().getBlockId(BD, Local: true); |
3434 | if (Discriminator) |
3435 | Name += "_" + Twine(Discriminator + 1).str(); |
3436 | auto C = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: GVName.c_str()); |
3437 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3438 | } else { |
3439 | auto C = |
3440 | CGM.GetAddrOfConstantCString(Str: std::string(FnName), GlobalName: GVName.c_str()); |
3441 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3442 | } |
3443 | } |
3444 | auto C = CGM.GetAddrOfConstantStringFromLiteral(S: SL, Name: GVName); |
3445 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3446 | } |
3447 | |
3448 | /// Emit a type description suitable for use by a runtime sanitizer library. The |
3449 | /// format of a type descriptor is |
3450 | /// |
3451 | /// \code |
3452 | /// { i16 TypeKind, i16 TypeInfo } |
3453 | /// \endcode |
3454 | /// |
3455 | /// followed by an array of i8 containing the type name with extra information |
3456 | /// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a |
3457 | /// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for |
3458 | /// anything else. |
3459 | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
3460 | // Only emit each type's descriptor once. |
3461 | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(Ty: T)) |
3462 | return C; |
3463 | |
3464 | uint16_t TypeKind = TK_Unknown; |
3465 | uint16_t TypeInfo = 0; |
3466 | bool IsBitInt = false; |
3467 | |
3468 | if (T->isIntegerType()) { |
3469 | TypeKind = TK_Integer; |
3470 | TypeInfo = (llvm::Log2_32(Value: getContext().getTypeSize(T)) << 1) | |
3471 | (T->isSignedIntegerType() ? 1 : 0); |
3472 | // Follow suggestion from discussion of issue 64100. |
3473 | // So we can write the exact amount of bits in TypeName after '\0' |
3474 | // making it <diagnostic-like type name>.'\0'.<32-bit width>. |
3475 | if (T->isSignedIntegerType() && T->getAs<BitIntType>()) { |
3476 | // Do a sanity checks as we are using 32-bit type to store bit length. |
3477 | assert(getContext().getTypeSize(T) > 0 && |
3478 | " non positive amount of bits in __BitInt type" ); |
3479 | assert(getContext().getTypeSize(T) <= 0xFFFFFFFF && |
3480 | " too many bits in __BitInt type" ); |
3481 | |
3482 | // Redefine TypeKind with the actual __BitInt type if we have signed |
3483 | // BitInt. |
3484 | TypeKind = TK_BitInt; |
3485 | IsBitInt = true; |
3486 | } |
3487 | } else if (T->isFloatingType()) { |
3488 | TypeKind = TK_Float; |
3489 | TypeInfo = getContext().getTypeSize(T); |
3490 | } |
3491 | |
3492 | // Format the type name as if for a diagnostic, including quotes and |
3493 | // optionally an 'aka'. |
3494 | SmallString<32> Buffer; |
3495 | CGM.getDiags().ConvertArgToString(Kind: DiagnosticsEngine::ak_qualtype, |
3496 | Val: (intptr_t)T.getAsOpaquePtr(), Modifier: StringRef(), |
3497 | Argument: StringRef(), PrevArgs: {}, Output&: Buffer, QualTypeVals: {}); |
3498 | |
3499 | if (IsBitInt) { |
3500 | // The Structure is: 0 to end the string, 32 bit unsigned integer in target |
3501 | // endianness, zero. |
3502 | char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'}; |
3503 | const auto *EIT = T->castAs<BitIntType>(); |
3504 | uint32_t Bits = EIT->getNumBits(); |
3505 | llvm::support::endian::write32(P: S + 1, V: Bits, |
3506 | E: getTarget().isBigEndian() |
3507 | ? llvm::endianness::big |
3508 | : llvm::endianness::little); |
3509 | StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0]))); |
3510 | Buffer.append(RHS: Str); |
3511 | } |
3512 | |
3513 | llvm::Constant *Components[] = { |
3514 | Builder.getInt16(C: TypeKind), Builder.getInt16(C: TypeInfo), |
3515 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Buffer) |
3516 | }; |
3517 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(V: Components); |
3518 | |
3519 | auto *GV = new llvm::GlobalVariable( |
3520 | CGM.getModule(), Descriptor->getType(), |
3521 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3522 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3523 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3524 | |
3525 | // Remember the descriptor for this type. |
3526 | CGM.setTypeDescriptorInMap(Ty: T, C: GV); |
3527 | |
3528 | return GV; |
3529 | } |
3530 | |
3531 | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3532 | llvm::Type *TargetTy = IntPtrTy; |
3533 | |
3534 | if (V->getType() == TargetTy) |
3535 | return V; |
3536 | |
3537 | // Floating-point types which fit into intptr_t are bitcast to integers |
3538 | // and then passed directly (after zero-extension, if necessary). |
3539 | if (V->getType()->isFloatingPointTy()) { |
3540 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); |
3541 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3542 | V = Builder.CreateBitCast(V, DestTy: llvm::Type::getIntNTy(C&: getLLVMContext(), |
3543 | N: Bits)); |
3544 | } |
3545 | |
3546 | // Integers which fit in intptr_t are zero-extended and passed directly. |
3547 | if (V->getType()->isIntegerTy() && |
3548 | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3549 | return Builder.CreateZExt(V, DestTy: TargetTy); |
3550 | |
3551 | // Pointers are passed directly, everything else is passed by address. |
3552 | if (!V->getType()->isPointerTy()) { |
3553 | RawAddress Ptr = CreateDefaultAlignTempAlloca(Ty: V->getType()); |
3554 | Builder.CreateStore(Val: V, Addr: Ptr); |
3555 | V = Ptr.getPointer(); |
3556 | } |
3557 | return Builder.CreatePtrToInt(V, DestTy: TargetTy); |
3558 | } |
3559 | |
3560 | /// Emit a representation of a SourceLocation for passing to a handler |
3561 | /// in a sanitizer runtime library. The format for this data is: |
3562 | /// \code |
3563 | /// struct SourceLocation { |
3564 | /// const char *Filename; |
3565 | /// int32_t Line, Column; |
3566 | /// }; |
3567 | /// \endcode |
3568 | /// For an invalid SourceLocation, the Filename pointer is null. |
3569 | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3570 | llvm::Constant *Filename; |
3571 | int Line, Column; |
3572 | |
3573 | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3574 | if (PLoc.isValid()) { |
3575 | StringRef FilenameString = PLoc.getFilename(); |
3576 | |
3577 | int PathComponentsToStrip = |
3578 | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3579 | if (PathComponentsToStrip < 0) { |
3580 | assert(PathComponentsToStrip != INT_MIN); |
3581 | int PathComponentsToKeep = -PathComponentsToStrip; |
3582 | auto I = llvm::sys::path::rbegin(path: FilenameString); |
3583 | auto E = llvm::sys::path::rend(path: FilenameString); |
3584 | while (I != E && --PathComponentsToKeep) |
3585 | ++I; |
3586 | |
3587 | FilenameString = FilenameString.substr(Start: I - E); |
3588 | } else if (PathComponentsToStrip > 0) { |
3589 | auto I = llvm::sys::path::begin(path: FilenameString); |
3590 | auto E = llvm::sys::path::end(path: FilenameString); |
3591 | while (I != E && PathComponentsToStrip--) |
3592 | ++I; |
3593 | |
3594 | if (I != E) |
3595 | FilenameString = |
3596 | FilenameString.substr(Start: I - llvm::sys::path::begin(path: FilenameString)); |
3597 | else |
3598 | FilenameString = llvm::sys::path::filename(path: FilenameString); |
3599 | } |
3600 | |
3601 | auto FilenameGV = |
3602 | CGM.GetAddrOfConstantCString(Str: std::string(FilenameString), GlobalName: ".src" ); |
3603 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3604 | GV: cast<llvm::GlobalVariable>( |
3605 | Val: FilenameGV.getPointer()->stripPointerCasts())); |
3606 | Filename = FilenameGV.getPointer(); |
3607 | Line = PLoc.getLine(); |
3608 | Column = PLoc.getColumn(); |
3609 | } else { |
3610 | Filename = llvm::Constant::getNullValue(Ty: Int8PtrTy); |
3611 | Line = Column = 0; |
3612 | } |
3613 | |
3614 | llvm::Constant *Data[] = {Filename, Builder.getInt32(C: Line), |
3615 | Builder.getInt32(C: Column)}; |
3616 | |
3617 | return llvm::ConstantStruct::getAnon(V: Data); |
3618 | } |
3619 | |
3620 | namespace { |
3621 | /// Specify under what conditions this check can be recovered |
3622 | enum class CheckRecoverableKind { |
3623 | /// Always terminate program execution if this check fails. |
3624 | Unrecoverable, |
3625 | /// Check supports recovering, runtime has both fatal (noreturn) and |
3626 | /// non-fatal handlers for this check. |
3627 | Recoverable, |
3628 | /// Runtime conditionally aborts, always need to support recovery. |
3629 | AlwaysRecoverable |
3630 | }; |
3631 | } |
3632 | |
3633 | static CheckRecoverableKind |
3634 | getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal) { |
3635 | if (Ordinal == SanitizerKind::SO_Vptr) |
3636 | return CheckRecoverableKind::AlwaysRecoverable; |
3637 | else if (Ordinal == SanitizerKind::SO_Return || |
3638 | Ordinal == SanitizerKind::SO_Unreachable) |
3639 | return CheckRecoverableKind::Unrecoverable; |
3640 | else |
3641 | return CheckRecoverableKind::Recoverable; |
3642 | } |
3643 | |
3644 | namespace { |
3645 | struct SanitizerHandlerInfo { |
3646 | char const *const Name; |
3647 | unsigned Version; |
3648 | }; |
3649 | } |
3650 | |
3651 | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3652 | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3653 | LIST_SANITIZER_CHECKS |
3654 | #undef SANITIZER_CHECK |
3655 | }; |
3656 | |
3657 | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3658 | llvm::FunctionType *FnType, |
3659 | ArrayRef<llvm::Value *> FnArgs, |
3660 | SanitizerHandler CheckHandler, |
3661 | CheckRecoverableKind RecoverKind, bool IsFatal, |
3662 | llvm::BasicBlock *ContBB, bool NoMerge) { |
3663 | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3664 | std::optional<ApplyDebugLocation> DL; |
3665 | if (!CGF.Builder.getCurrentDebugLocation()) { |
3666 | // Ensure that the call has at least an artificial debug location. |
3667 | DL.emplace(args&: CGF, args: SourceLocation()); |
3668 | } |
3669 | bool NeedsAbortSuffix = |
3670 | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; |
3671 | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3672 | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3673 | const StringRef CheckName = CheckInfo.Name; |
3674 | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3675 | if (CheckInfo.Version && !MinimalRuntime) |
3676 | FnName += "_v" + llvm::utostr(X: CheckInfo.Version); |
3677 | if (MinimalRuntime) |
3678 | FnName += "_minimal" ; |
3679 | if (NeedsAbortSuffix) |
3680 | FnName += "_abort" ; |
3681 | bool MayReturn = |
3682 | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; |
3683 | |
3684 | llvm::AttrBuilder B(CGF.getLLVMContext()); |
3685 | if (!MayReturn) { |
3686 | B.addAttribute(Val: llvm::Attribute::NoReturn) |
3687 | .addAttribute(Val: llvm::Attribute::NoUnwind); |
3688 | } |
3689 | B.addUWTableAttr(Kind: llvm::UWTableKind::Default); |
3690 | |
3691 | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3692 | Ty: FnType, Name: FnName, |
3693 | ExtraAttrs: llvm::AttributeList::get(C&: CGF.getLLVMContext(), |
3694 | Index: llvm::AttributeList::FunctionIndex, B), |
3695 | /*Local=*/true); |
3696 | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(callee: Fn, args: FnArgs); |
3697 | NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel || |
3698 | (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>()); |
3699 | if (NoMerge) |
3700 | HandlerCall->addFnAttr(Kind: llvm::Attribute::NoMerge); |
3701 | if (!MayReturn) { |
3702 | HandlerCall->setDoesNotReturn(); |
3703 | CGF.Builder.CreateUnreachable(); |
3704 | } else { |
3705 | CGF.Builder.CreateBr(Dest: ContBB); |
3706 | } |
3707 | } |
3708 | |
3709 | void CodeGenFunction::EmitCheck( |
3710 | ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked, |
3711 | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3712 | ArrayRef<llvm::Value *> DynamicArgs) { |
3713 | assert(IsSanitizerScope); |
3714 | assert(Checked.size() > 0); |
3715 | assert(CheckHandler >= 0 && |
3716 | size_t(CheckHandler) < std::size(SanitizerHandlers)); |
3717 | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3718 | |
3719 | llvm::Value *FatalCond = nullptr; |
3720 | llvm::Value *RecoverableCond = nullptr; |
3721 | llvm::Value *TrapCond = nullptr; |
3722 | bool NoMerge = false; |
3723 | // Expand checks into: |
3724 | // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ... |
3725 | // We need separate allow_ubsan_check intrinsics because they have separately |
3726 | // specified cutoffs. |
3727 | // This expression looks expensive but will be simplified after |
3728 | // LowerAllowCheckPass. |
3729 | for (auto &[Check, Ord] : Checked) { |
3730 | llvm::Value *GuardedCheck = Check; |
3731 | if (ClSanitizeGuardChecks || |
3732 | (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) { |
3733 | llvm::Value *Allow = Builder.CreateCall( |
3734 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::allow_ubsan_check), |
3735 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Ord)); |
3736 | GuardedCheck = Builder.CreateOr(LHS: Check, RHS: Builder.CreateNot(V: Allow)); |
3737 | } |
3738 | |
3739 | // -fsanitize-trap= overrides -fsanitize-recover=. |
3740 | llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(O: Ord) ? TrapCond |
3741 | : CGM.getCodeGenOpts().SanitizeRecover.has(O: Ord) |
3742 | ? RecoverableCond |
3743 | : FatalCond; |
3744 | Cond = Cond ? Builder.CreateAnd(LHS: Cond, RHS: GuardedCheck) : GuardedCheck; |
3745 | |
3746 | if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(O: Ord)) |
3747 | NoMerge = true; |
3748 | } |
3749 | |
3750 | if (TrapCond) |
3751 | EmitTrapCheck(Checked: TrapCond, CheckHandlerID: CheckHandler, NoMerge); |
3752 | if (!FatalCond && !RecoverableCond) |
3753 | return; |
3754 | |
3755 | llvm::Value *JointCond; |
3756 | if (FatalCond && RecoverableCond) |
3757 | JointCond = Builder.CreateAnd(LHS: FatalCond, RHS: RecoverableCond); |
3758 | else |
3759 | JointCond = FatalCond ? FatalCond : RecoverableCond; |
3760 | assert(JointCond); |
3761 | |
3762 | CheckRecoverableKind RecoverKind = getRecoverableKind(Ordinal: Checked[0].second); |
3763 | assert(SanOpts.has(Checked[0].second)); |
3764 | #ifndef NDEBUG |
3765 | for (int i = 1, n = Checked.size(); i < n; ++i) { |
3766 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3767 | "All recoverable kinds in a single check must be same!" ); |
3768 | assert(SanOpts.has(Checked[i].second)); |
3769 | } |
3770 | #endif |
3771 | |
3772 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3773 | llvm::BasicBlock *Handlers = createBasicBlock(name: "handler." + CheckName); |
3774 | llvm::Instruction *Branch = Builder.CreateCondBr(Cond: JointCond, True: Cont, False: Handlers); |
3775 | // Give hint that we very much don't expect to execute the handler |
3776 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3777 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3778 | Branch->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3779 | EmitBlock(BB: Handlers); |
3780 | |
3781 | // Handler functions take an i8* pointing to the (handler-specific) static |
3782 | // information block, followed by a sequence of intptr_t arguments |
3783 | // representing operand values. |
3784 | SmallVector<llvm::Value *, 4> Args; |
3785 | SmallVector<llvm::Type *, 4> ArgTypes; |
3786 | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3787 | Args.reserve(N: DynamicArgs.size() + 1); |
3788 | ArgTypes.reserve(N: DynamicArgs.size() + 1); |
3789 | |
3790 | // Emit handler arguments and create handler function type. |
3791 | if (!StaticArgs.empty()) { |
3792 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3793 | auto *InfoPtr = new llvm::GlobalVariable( |
3794 | CGM.getModule(), Info->getType(), false, |
3795 | llvm::GlobalVariable::PrivateLinkage, Info, "" , nullptr, |
3796 | llvm::GlobalVariable::NotThreadLocal, |
3797 | CGM.getDataLayout().getDefaultGlobalsAddressSpace()); |
3798 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3799 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3800 | Args.push_back(Elt: InfoPtr); |
3801 | ArgTypes.push_back(Elt: Args.back()->getType()); |
3802 | } |
3803 | |
3804 | for (llvm::Value *DynamicArg : DynamicArgs) { |
3805 | Args.push_back(Elt: EmitCheckValue(V: DynamicArg)); |
3806 | ArgTypes.push_back(Elt: IntPtrTy); |
3807 | } |
3808 | } |
3809 | |
3810 | llvm::FunctionType *FnType = |
3811 | llvm::FunctionType::get(Result: CGM.VoidTy, Params: ArgTypes, isVarArg: false); |
3812 | |
3813 | if (!FatalCond || !RecoverableCond) { |
3814 | // Simple case: we need to generate a single handler call, either |
3815 | // fatal, or non-fatal. |
3816 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, |
3817 | IsFatal: (FatalCond != nullptr), ContBB: Cont, NoMerge); |
3818 | } else { |
3819 | // Emit two handler calls: first one for set of unrecoverable checks, |
3820 | // another one for recoverable. |
3821 | llvm::BasicBlock *NonFatalHandlerBB = |
3822 | createBasicBlock(name: "non_fatal." + CheckName); |
3823 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock(name: "fatal." + CheckName); |
3824 | Builder.CreateCondBr(Cond: FatalCond, True: NonFatalHandlerBB, False: FatalHandlerBB); |
3825 | EmitBlock(BB: FatalHandlerBB); |
3826 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: true, |
3827 | ContBB: NonFatalHandlerBB, NoMerge); |
3828 | EmitBlock(BB: NonFatalHandlerBB); |
3829 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: false, |
3830 | ContBB: Cont, NoMerge); |
3831 | } |
3832 | |
3833 | EmitBlock(BB: Cont); |
3834 | } |
3835 | |
3836 | void CodeGenFunction::EmitCfiSlowPathCheck( |
3837 | SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, |
3838 | llvm::ConstantInt *TypeId, llvm::Value *Ptr, |
3839 | ArrayRef<llvm::Constant *> StaticArgs) { |
3840 | llvm::BasicBlock *Cont = createBasicBlock(name: "cfi.cont" ); |
3841 | |
3842 | llvm::BasicBlock *CheckBB = createBasicBlock(name: "cfi.slowpath" ); |
3843 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, True: Cont, False: CheckBB); |
3844 | |
3845 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3846 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3847 | BI->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3848 | |
3849 | EmitBlock(BB: CheckBB); |
3850 | |
3851 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(O: Ordinal); |
3852 | |
3853 | llvm::CallInst *CheckCall; |
3854 | llvm::FunctionCallee SlowPathFn; |
3855 | if (WithDiag) { |
3856 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3857 | auto *InfoPtr = |
3858 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3859 | llvm::GlobalVariable::PrivateLinkage, Info); |
3860 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3861 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3862 | |
3863 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3864 | Name: "__cfi_slowpath_diag" , |
3865 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3866 | isVarArg: false)); |
3867 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr, InfoPtr}); |
3868 | } else { |
3869 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3870 | Name: "__cfi_slowpath" , |
3871 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy}, isVarArg: false)); |
3872 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr}); |
3873 | } |
3874 | |
3875 | CGM.setDSOLocal( |
3876 | cast<llvm::GlobalValue>(Val: SlowPathFn.getCallee()->stripPointerCasts())); |
3877 | CheckCall->setDoesNotThrow(); |
3878 | |
3879 | EmitBlock(BB: Cont); |
3880 | } |
3881 | |
3882 | // Emit a stub for __cfi_check function so that the linker knows about this |
3883 | // symbol in LTO mode. |
3884 | void CodeGenFunction::EmitCfiCheckStub() { |
3885 | llvm::Module *M = &CGM.getModule(); |
3886 | ASTContext &C = getContext(); |
3887 | QualType QInt64Ty = C.getIntTypeForBitwidth(DestWidth: 64, Signed: false); |
3888 | |
3889 | FunctionArgList FnArgs; |
3890 | ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other); |
3891 | ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other); |
3892 | ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy, |
3893 | ImplicitParamKind::Other); |
3894 | FnArgs.push_back(Elt: &ArgCallsiteTypeId); |
3895 | FnArgs.push_back(Elt: &ArgAddr); |
3896 | FnArgs.push_back(Elt: &ArgCFICheckFailData); |
3897 | const CGFunctionInfo &FI = |
3898 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: C.VoidTy, args: FnArgs); |
3899 | |
3900 | llvm::Function *F = llvm::Function::Create( |
3901 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3902 | Linkage: llvm::GlobalValue::WeakAnyLinkage, N: "__cfi_check" , M); |
3903 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3904 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3905 | F->setAlignment(llvm::Align(4096)); |
3906 | CGM.setDSOLocal(F); |
3907 | |
3908 | llvm::LLVMContext &Ctx = M->getContext(); |
3909 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Context&: Ctx, Name: "entry" , Parent: F); |
3910 | // CrossDSOCFI pass is not executed if there is no executable code. |
3911 | SmallVector<llvm::Value*> Args{F->getArg(i: 2), F->getArg(i: 1)}; |
3912 | llvm::CallInst::Create(Func: M->getFunction(Name: "__cfi_check_fail" ), Args, NameStr: "" , InsertBefore: BB); |
3913 | llvm::ReturnInst::Create(C&: Ctx, retVal: nullptr, InsertBefore: BB); |
3914 | } |
3915 | |
3916 | // This function is basically a switch over the CFI failure kind, which is |
3917 | // extracted from CFICheckFailData (1st function argument). Each case is either |
3918 | // llvm.trap or a call to one of the two runtime handlers, based on |
3919 | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3920 | // failure kind) traps, but this should really never happen. CFICheckFailData |
3921 | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3922 | // check kind; in this case __cfi_check_fail traps as well. |
3923 | void CodeGenFunction::EmitCfiCheckFail() { |
3924 | auto CheckHandler = SanitizerHandler::CFICheckFail; |
3925 | // TODO: the SanitizerKind is not yet determined for this check (and might |
3926 | // not even be available, if Data == nullptr). However, we still want to |
3927 | // annotate the instrumentation. We approximate this by using all the CFI |
3928 | // kinds. |
3929 | SanitizerDebugLocation SanScope( |
3930 | this, |
3931 | {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall, |
3932 | SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast, |
3933 | SanitizerKind::SO_CFIICall}, |
3934 | CheckHandler); |
3935 | FunctionArgList Args; |
3936 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3937 | ImplicitParamKind::Other); |
3938 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3939 | ImplicitParamKind::Other); |
3940 | Args.push_back(Elt: &ArgData); |
3941 | Args.push_back(Elt: &ArgAddr); |
3942 | |
3943 | const CGFunctionInfo &FI = |
3944 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: getContext().VoidTy, args: Args); |
3945 | |
3946 | llvm::Function *F = llvm::Function::Create( |
3947 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3948 | Linkage: llvm::GlobalValue::WeakODRLinkage, N: "__cfi_check_fail" , M: &CGM.getModule()); |
3949 | |
3950 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3951 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3952 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3953 | |
3954 | StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: F, FnInfo: FI, Args, |
3955 | Loc: SourceLocation()); |
3956 | |
3957 | // This function is not affected by NoSanitizeList. This function does |
3958 | // not have a source location, but "src:*" would still apply. Revert any |
3959 | // changes to SanOpts made in StartFunction. |
3960 | SanOpts = CGM.getLangOpts().Sanitize; |
3961 | |
3962 | llvm::Value *Data = |
3963 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgData), /*Volatile=*/false, |
3964 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgData.getLocation()); |
3965 | llvm::Value *Addr = |
3966 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgAddr), /*Volatile=*/false, |
3967 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgAddr.getLocation()); |
3968 | |
3969 | // Data == nullptr means the calling module has trap behaviour for this check. |
3970 | llvm::Value *DataIsNotNullPtr = |
3971 | Builder.CreateICmpNE(LHS: Data, RHS: llvm::ConstantPointerNull::get(T: Int8PtrTy)); |
3972 | // TODO: since there is no data, we don't know the CheckKind, and therefore |
3973 | // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to |
3974 | // NoMerge = false. Users can disable merging by disabling optimization. |
3975 | EmitTrapCheck(Checked: DataIsNotNullPtr, CheckHandlerID: SanitizerHandler::CFICheckFail, |
3976 | /*NoMerge=*/false); |
3977 | |
3978 | llvm::StructType *SourceLocationTy = |
3979 | llvm::StructType::get(elt1: VoidPtrTy, elts: Int32Ty, elts: Int32Ty); |
3980 | llvm::StructType *CfiCheckFailDataTy = |
3981 | llvm::StructType::get(elt1: Int8Ty, elts: SourceLocationTy, elts: VoidPtrTy); |
3982 | |
3983 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3984 | Ty: CfiCheckFailDataTy, Ptr: Builder.CreatePointerCast(V: Data, DestTy: UnqualPtrTy), Idx0: 0, Idx1: 0); |
3985 | |
3986 | Address CheckKindAddr(V, Int8Ty, getIntAlign()); |
3987 | llvm::Value *CheckKind = Builder.CreateLoad(Addr: CheckKindAddr); |
3988 | |
3989 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3990 | Context&: CGM.getLLVMContext(), |
3991 | MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables" )); |
3992 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3993 | V: Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), |
3994 | Args: {Addr, AllVtables}), |
3995 | DestTy: IntPtrTy); |
3996 | |
3997 | const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = { |
3998 | {CFITCK_VCall, SanitizerKind::SO_CFIVCall}, |
3999 | {CFITCK_NVCall, SanitizerKind::SO_CFINVCall}, |
4000 | {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast}, |
4001 | {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast}, |
4002 | {CFITCK_ICall, SanitizerKind::SO_CFIICall}}; |
4003 | |
4004 | for (auto CheckKindOrdinalPair : CheckKinds) { |
4005 | int Kind = CheckKindOrdinalPair.first; |
4006 | SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second; |
4007 | |
4008 | // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of |
4009 | // relying on the SanitizerScope with all CFI ordinals |
4010 | |
4011 | llvm::Value *Cond = |
4012 | Builder.CreateICmpNE(LHS: CheckKind, RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: Kind)); |
4013 | if (CGM.getLangOpts().Sanitize.has(O: Ordinal)) |
4014 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: Ordinal), CheckHandler: SanitizerHandler::CFICheckFail, |
4015 | StaticArgs: {}, DynamicArgs: {Data, Addr, ValidVtable}); |
4016 | else |
4017 | // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers. |
4018 | // Although the compiler allows SanitizeMergeHandlers to be set |
4019 | // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp |
4020 | // requires that SanitizeMergeHandlers is a subset of Sanitize. |
4021 | EmitTrapCheck(Checked: Cond, CheckHandlerID: CheckHandler, /*NoMerge=*/false); |
4022 | } |
4023 | |
4024 | FinishFunction(); |
4025 | // The only reference to this function will be created during LTO link. |
4026 | // Make sure it survives until then. |
4027 | CGM.addUsedGlobal(GV: F); |
4028 | } |
4029 | |
4030 | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
4031 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
4032 | auto CheckOrdinal = SanitizerKind::SO_Unreachable; |
4033 | auto CheckHandler = SanitizerHandler::BuiltinUnreachable; |
4034 | SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); |
4035 | EmitCheck(Checked: std::make_pair(x: static_cast<llvm::Value *>(Builder.getFalse()), |
4036 | y&: CheckOrdinal), |
4037 | CheckHandler, StaticArgs: EmitCheckSourceLocation(Loc), DynamicArgs: {}); |
4038 | } |
4039 | Builder.CreateUnreachable(); |
4040 | } |
4041 | |
4042 | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
4043 | SanitizerHandler CheckHandlerID, |
4044 | bool NoMerge) { |
4045 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
4046 | |
4047 | // If we're optimizing, collapse all calls to trap down to just one per |
4048 | // check-type per function to save on code size. |
4049 | if ((int)TrapBBs.size() <= CheckHandlerID) |
4050 | TrapBBs.resize(N: CheckHandlerID + 1); |
4051 | |
4052 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
4053 | |
4054 | NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel || |
4055 | (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>()); |
4056 | |
4057 | llvm::MDBuilder MDHelper(getLLVMContext()); |
4058 | if (TrapBB && !NoMerge) { |
4059 | auto Call = TrapBB->begin(); |
4060 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB" ); |
4061 | |
4062 | Call->applyMergedLocation(LocA: Call->getDebugLoc(), |
4063 | LocB: Builder.getCurrentDebugLocation()); |
4064 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB, |
4065 | BranchWeights: MDHelper.createLikelyBranchWeights()); |
4066 | } else { |
4067 | TrapBB = createBasicBlock(name: "trap" ); |
4068 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB, |
4069 | BranchWeights: MDHelper.createLikelyBranchWeights()); |
4070 | EmitBlock(BB: TrapBB); |
4071 | |
4072 | llvm::CallInst *TrapCall = |
4073 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::ubsantrap), |
4074 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: CheckHandlerID)); |
4075 | |
4076 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
4077 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
4078 | Val: CGM.getCodeGenOpts().TrapFuncName); |
4079 | TrapCall->addFnAttr(Attr: A); |
4080 | } |
4081 | if (NoMerge) |
4082 | TrapCall->addFnAttr(Kind: llvm::Attribute::NoMerge); |
4083 | TrapCall->setDoesNotReturn(); |
4084 | TrapCall->setDoesNotThrow(); |
4085 | Builder.CreateUnreachable(); |
4086 | } |
4087 | |
4088 | EmitBlock(BB: Cont); |
4089 | } |
4090 | |
4091 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
4092 | llvm::CallInst *TrapCall = |
4093 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntrID)); |
4094 | |
4095 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
4096 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
4097 | Val: CGM.getCodeGenOpts().TrapFuncName); |
4098 | TrapCall->addFnAttr(Attr: A); |
4099 | } |
4100 | |
4101 | if (InNoMergeAttributedStmt) |
4102 | TrapCall->addFnAttr(Kind: llvm::Attribute::NoMerge); |
4103 | return TrapCall; |
4104 | } |
4105 | |
4106 | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
4107 | LValueBaseInfo *BaseInfo, |
4108 | TBAAAccessInfo *TBAAInfo) { |
4109 | assert(E->getType()->isArrayType() && |
4110 | "Array to pointer decay must have array source type!" ); |
4111 | |
4112 | // Expressions of array type can't be bitfields or vector elements. |
4113 | LValue LV = EmitLValue(E); |
4114 | Address Addr = LV.getAddress(); |
4115 | |
4116 | // If the array type was an incomplete type, we need to make sure |
4117 | // the decay ends up being the right type. |
4118 | llvm::Type *NewTy = ConvertType(T: E->getType()); |
4119 | Addr = Addr.withElementType(ElemTy: NewTy); |
4120 | |
4121 | // Note that VLA pointers are always decayed, so we don't need to do |
4122 | // anything here. |
4123 | if (!E->getType()->isVariableArrayType()) { |
4124 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4125 | "Expected pointer to array" ); |
4126 | Addr = Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
4127 | } |
4128 | |
4129 | // The result of this decay conversion points to an array element within the |
4130 | // base lvalue. However, since TBAA currently does not support representing |
4131 | // accesses to elements of member arrays, we conservatively represent accesses |
4132 | // to the pointee object as if it had no any base lvalue specified. |
4133 | // TODO: Support TBAA for member arrays. |
4134 | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
4135 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
4136 | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(AccessType: EltType); |
4137 | |
4138 | return Addr.withElementType(ElemTy: ConvertTypeForMem(T: EltType)); |
4139 | } |
4140 | |
4141 | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
4142 | /// array to pointer, return the array subexpression. |
4143 | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
4144 | // If this isn't just an array->pointer decay, bail out. |
4145 | const auto *CE = dyn_cast<CastExpr>(Val: E); |
4146 | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) |
4147 | return nullptr; |
4148 | |
4149 | // If this is a decay from variable width array, bail out. |
4150 | const Expr *SubExpr = CE->getSubExpr(); |
4151 | if (SubExpr->getType()->isVariableArrayType()) |
4152 | return nullptr; |
4153 | |
4154 | return SubExpr; |
4155 | } |
4156 | |
4157 | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
4158 | llvm::Type *elemType, |
4159 | llvm::Value *ptr, |
4160 | ArrayRef<llvm::Value*> indices, |
4161 | bool inbounds, |
4162 | bool signedIndices, |
4163 | SourceLocation loc, |
4164 | const llvm::Twine &name = "arrayidx" ) { |
4165 | if (inbounds) { |
4166 | return CGF.EmitCheckedInBoundsGEP(ElemTy: elemType, Ptr: ptr, IdxList: indices, SignedIndices: signedIndices, |
4167 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
4168 | Name: name); |
4169 | } else { |
4170 | return CGF.Builder.CreateGEP(Ty: elemType, Ptr: ptr, IdxList: indices, Name: name); |
4171 | } |
4172 | } |
4173 | |
4174 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4175 | ArrayRef<llvm::Value *> indices, |
4176 | llvm::Type *elementType, bool inbounds, |
4177 | bool signedIndices, SourceLocation loc, |
4178 | CharUnits align, |
4179 | const llvm::Twine &name = "arrayidx" ) { |
4180 | if (inbounds) { |
4181 | return CGF.EmitCheckedInBoundsGEP(Addr: addr, IdxList: indices, elementType, SignedIndices: signedIndices, |
4182 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
4183 | Align: align, Name: name); |
4184 | } else { |
4185 | return CGF.Builder.CreateGEP(Addr: addr, IdxList: indices, ElementType: elementType, Align: align, Name: name); |
4186 | } |
4187 | } |
4188 | |
4189 | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
4190 | llvm::Value *idx, |
4191 | CharUnits eltSize) { |
4192 | // If we have a constant index, we can use the exact offset of the |
4193 | // element we're accessing. |
4194 | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(Val: idx)) { |
4195 | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
4196 | return arrayAlign.alignmentAtOffset(offset); |
4197 | |
4198 | // Otherwise, use the worst-case alignment for any element. |
4199 | } else { |
4200 | return arrayAlign.alignmentOfArrayElement(elementSize: eltSize); |
4201 | } |
4202 | } |
4203 | |
4204 | static QualType getFixedSizeElementType(const ASTContext &ctx, |
4205 | const VariableArrayType *vla) { |
4206 | QualType eltType; |
4207 | do { |
4208 | eltType = vla->getElementType(); |
4209 | } while ((vla = ctx.getAsVariableArrayType(T: eltType))); |
4210 | return eltType; |
4211 | } |
4212 | |
4213 | static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { |
4214 | return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); |
4215 | } |
4216 | |
4217 | static bool hasBPFPreserveStaticOffset(const Expr *E) { |
4218 | if (!E) |
4219 | return false; |
4220 | QualType PointeeType = E->getType()->getPointeeType(); |
4221 | if (PointeeType.isNull()) |
4222 | return false; |
4223 | if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) |
4224 | return hasBPFPreserveStaticOffset(D: BaseDecl); |
4225 | return false; |
4226 | } |
4227 | |
4228 | // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. |
4229 | static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, |
4230 | Address &Addr) { |
4231 | if (!CGF.getTarget().getTriple().isBPF()) |
4232 | return Addr; |
4233 | |
4234 | llvm::Function *Fn = |
4235 | CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::preserve_static_offset); |
4236 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: Fn, Args: {Addr.emitRawPointer(CGF)}); |
4237 | return Address(Call, Addr.getElementType(), Addr.getAlignment()); |
4238 | } |
4239 | |
4240 | /// Given an array base, check whether its member access belongs to a record |
4241 | /// with preserve_access_index attribute or not. |
4242 | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
4243 | if (!ArrayBase || !CGF.getDebugInfo()) |
4244 | return false; |
4245 | |
4246 | // Only support base as either a MemberExpr or DeclRefExpr. |
4247 | // DeclRefExpr to cover cases like: |
4248 | // struct s { int a; int b[10]; }; |
4249 | // struct s *p; |
4250 | // p[1].a |
4251 | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
4252 | // p->b[5] is a MemberExpr example. |
4253 | const Expr *E = ArrayBase->IgnoreImpCasts(); |
4254 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) |
4255 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4256 | |
4257 | if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
4258 | const auto *VarDef = dyn_cast<VarDecl>(Val: DRE->getDecl()); |
4259 | if (!VarDef) |
4260 | return false; |
4261 | |
4262 | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
4263 | if (!PtrT) |
4264 | return false; |
4265 | |
4266 | const auto *PointeeT = PtrT->getPointeeType() |
4267 | ->getUnqualifiedDesugaredType(); |
4268 | if (const auto *RecT = dyn_cast<RecordType>(Val: PointeeT)) |
4269 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4270 | return false; |
4271 | } |
4272 | |
4273 | return false; |
4274 | } |
4275 | |
4276 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4277 | ArrayRef<llvm::Value *> indices, |
4278 | QualType eltType, bool inbounds, |
4279 | bool signedIndices, SourceLocation loc, |
4280 | QualType *arrayType = nullptr, |
4281 | const Expr *Base = nullptr, |
4282 | const llvm::Twine &name = "arrayidx" ) { |
4283 | // All the indices except that last must be zero. |
4284 | #ifndef NDEBUG |
4285 | for (auto *idx : indices.drop_back()) |
4286 | assert(isa<llvm::ConstantInt>(idx) && |
4287 | cast<llvm::ConstantInt>(idx)->isZero()); |
4288 | #endif |
4289 | |
4290 | // Determine the element size of the statically-sized base. This is |
4291 | // the thing that the indices are expressed in terms of. |
4292 | if (auto vla = CGF.getContext().getAsVariableArrayType(T: eltType)) { |
4293 | eltType = getFixedSizeElementType(ctx: CGF.getContext(), vla); |
4294 | } |
4295 | |
4296 | // We can use that to compute the best alignment of the element. |
4297 | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: eltType); |
4298 | CharUnits eltAlign = |
4299 | getArrayElementAlign(arrayAlign: addr.getAlignment(), idx: indices.back(), eltSize); |
4300 | |
4301 | if (hasBPFPreserveStaticOffset(E: Base)) |
4302 | addr = wrapWithBPFPreserveStaticOffset(CGF, Addr&: addr); |
4303 | |
4304 | llvm::Value *eltPtr; |
4305 | auto LastIndex = dyn_cast<llvm::ConstantInt>(Val: indices.back()); |
4306 | if (!LastIndex || |
4307 | (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, ArrayBase: Base))) { |
4308 | addr = emitArraySubscriptGEP(CGF, addr, indices, |
4309 | elementType: CGF.ConvertTypeForMem(T: eltType), inbounds, |
4310 | signedIndices, loc, align: eltAlign, name); |
4311 | return addr; |
4312 | } else { |
4313 | // Remember the original array subscript for bpf target |
4314 | unsigned idx = LastIndex->getZExtValue(); |
4315 | llvm::DIType *DbgInfo = nullptr; |
4316 | if (arrayType) |
4317 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(Ty: *arrayType, Loc: loc); |
4318 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( |
4319 | ElTy: addr.getElementType(), Base: addr.emitRawPointer(CGF), Dimension: indices.size() - 1, |
4320 | LastIndex: idx, DbgInfo); |
4321 | } |
4322 | |
4323 | return Address(eltPtr, CGF.ConvertTypeForMem(T: eltType), eltAlign); |
4324 | } |
4325 | |
4326 | namespace { |
4327 | |
4328 | /// StructFieldAccess is a simple visitor class to grab the first l-value to |
4329 | /// r-value cast Expr. |
4330 | struct StructFieldAccess |
4331 | : public ConstStmtVisitor<StructFieldAccess, const Expr *> { |
4332 | const Expr *VisitCastExpr(const CastExpr *E) { |
4333 | if (E->getCastKind() == CK_LValueToRValue) |
4334 | return E; |
4335 | return Visit(S: E->getSubExpr()); |
4336 | } |
4337 | const Expr *VisitParenExpr(const ParenExpr *E) { |
4338 | return Visit(S: E->getSubExpr()); |
4339 | } |
4340 | }; |
4341 | |
4342 | } // end anonymous namespace |
4343 | |
4344 | /// The offset of a field from the beginning of the record. |
4345 | static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, |
4346 | const FieldDecl *Field, int64_t &Offset) { |
4347 | ASTContext &Ctx = CGF.getContext(); |
4348 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
4349 | unsigned FieldNo = 0; |
4350 | |
4351 | for (const FieldDecl *FD : RD->fields()) { |
4352 | if (FD == Field) { |
4353 | Offset += Layout.getFieldOffset(FieldNo); |
4354 | return true; |
4355 | } |
4356 | |
4357 | QualType Ty = FD->getType(); |
4358 | if (Ty->isRecordType()) |
4359 | if (getFieldOffsetInBits(CGF, RD: Ty->getAsRecordDecl(), Field, Offset)) { |
4360 | Offset += Layout.getFieldOffset(FieldNo); |
4361 | return true; |
4362 | } |
4363 | |
4364 | if (!RD->isUnion()) |
4365 | ++FieldNo; |
4366 | } |
4367 | |
4368 | return false; |
4369 | } |
4370 | |
4371 | /// Returns the relative offset difference between \p FD1 and \p FD2. |
4372 | /// \code |
4373 | /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) |
4374 | /// \endcode |
4375 | /// Both fields must be within the same struct. |
4376 | static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, |
4377 | const FieldDecl *FD1, |
4378 | const FieldDecl *FD2) { |
4379 | const RecordDecl *FD1OuterRec = |
4380 | FD1->getParent()->getOuterLexicalRecordContext(); |
4381 | const RecordDecl *FD2OuterRec = |
4382 | FD2->getParent()->getOuterLexicalRecordContext(); |
4383 | |
4384 | if (FD1OuterRec != FD2OuterRec) |
4385 | // Fields must be within the same RecordDecl. |
4386 | return std::optional<int64_t>(); |
4387 | |
4388 | int64_t FD1Offset = 0; |
4389 | if (!getFieldOffsetInBits(CGF, RD: FD1OuterRec, Field: FD1, Offset&: FD1Offset)) |
4390 | return std::optional<int64_t>(); |
4391 | |
4392 | int64_t FD2Offset = 0; |
4393 | if (!getFieldOffsetInBits(CGF, RD: FD2OuterRec, Field: FD2, Offset&: FD2Offset)) |
4394 | return std::optional<int64_t>(); |
4395 | |
4396 | return std::make_optional<int64_t>(t: FD1Offset - FD2Offset); |
4397 | } |
4398 | |
4399 | /// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" |
4400 | /// attribute, generate bounds checking code. The "count" field is at the top |
4401 | /// level of the struct or in an anonymous struct, that's also at the top level. |
4402 | /// Future expansions may allow the "count" to reside at any place in the |
4403 | /// struct, but the value of "counted_by" will be a "simple" path to the count, |
4404 | /// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or |
4405 | /// similar to emit the correct GEP. |
4406 | void CodeGenFunction::EmitCountedByBoundsChecking( |
4407 | const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy, |
4408 | QualType ArrayTy, bool Accessed, bool FlexibleArray) { |
4409 | const auto *ME = dyn_cast<MemberExpr>(Val: E->IgnoreImpCasts()); |
4410 | if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType()) |
4411 | return; |
4412 | |
4413 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
4414 | getLangOpts().getStrictFlexArraysLevel(); |
4415 | if (FlexibleArray && |
4416 | !ME->isFlexibleArrayMemberLike(Context: getContext(), StrictFlexArraysLevel)) |
4417 | return; |
4418 | |
4419 | const FieldDecl *FD = cast<FieldDecl>(Val: ME->getMemberDecl()); |
4420 | const FieldDecl *CountFD = FD->findCountedByField(); |
4421 | if (!CountFD) |
4422 | return; |
4423 | |
4424 | if (std::optional<int64_t> Diff = |
4425 | getOffsetDifferenceInBits(CGF&: *this, FD1: CountFD, FD2: FD)) { |
4426 | if (!Addr.isValid()) { |
4427 | // An invalid Address indicates we're checking a pointer array access. |
4428 | // Emit the checked L-Value here. |
4429 | LValue LV = EmitCheckedLValue(E, TCK: TCK_MemberAccess); |
4430 | Addr = LV.getAddress(); |
4431 | } |
4432 | |
4433 | // FIXME: The 'static_cast' is necessary, otherwise the result turns into a |
4434 | // uint64_t, which messes things up if we have a negative offset difference. |
4435 | Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth()); |
4436 | |
4437 | // Create a GEP with the byte offset between the counted object and the |
4438 | // count and use that to load the count value. |
4439 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty: Int8PtrTy, ElementTy: Int8Ty); |
4440 | |
4441 | llvm::Type *CountTy = ConvertType(T: CountFD->getType()); |
4442 | llvm::Value *Res = |
4443 | Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: Addr.emitRawPointer(CGF&: *this), |
4444 | IdxList: Builder.getInt32(C: *Diff), Name: ".counted_by.gep" ); |
4445 | Res = Builder.CreateAlignedLoad(Ty: CountTy, Addr: Res, Align: getIntAlign(), |
4446 | Name: ".counted_by.load" ); |
4447 | |
4448 | // Now emit the bounds checking. |
4449 | EmitBoundsCheckImpl(E, Bound: Res, Index: Idx, IndexType: IdxTy, IndexedType: ArrayTy, Accessed); |
4450 | } |
4451 | } |
4452 | |
4453 | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
4454 | bool Accessed) { |
4455 | // The index must always be an integer, which is not an aggregate. Emit it |
4456 | // in lexical order (this complexity is, sadly, required by C++17). |
4457 | llvm::Value *IdxPre = |
4458 | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E: E->getIdx()) : nullptr; |
4459 | bool SignedIndices = false; |
4460 | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
4461 | auto *Idx = IdxPre; |
4462 | if (E->getLHS() != E->getIdx()) { |
4463 | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS" ); |
4464 | Idx = EmitScalarExpr(E: E->getIdx()); |
4465 | } |
4466 | |
4467 | QualType IdxTy = E->getIdx()->getType(); |
4468 | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
4469 | SignedIndices |= IdxSigned; |
4470 | |
4471 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) |
4472 | EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, Accessed); |
4473 | |
4474 | // Extend or truncate the index type to 32 or 64-bits. |
4475 | if (Promote && Idx->getType() != IntPtrTy) |
4476 | Idx = Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IdxSigned, Name: "idxprom" ); |
4477 | |
4478 | return Idx; |
4479 | }; |
4480 | IdxPre = nullptr; |
4481 | |
4482 | // If the base is a vector type, then we are forming a vector element lvalue |
4483 | // with this subscript. |
4484 | if (E->getBase()->getType()->isSubscriptableVectorType() && |
4485 | !isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4486 | // Emit the vector as an lvalue to get its address. |
4487 | LValue LHS = EmitLValue(E: E->getBase()); |
4488 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
4489 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!" ); |
4490 | return LValue::MakeVectorElt(vecAddress: LHS.getAddress(), Idx, type: E->getBase()->getType(), |
4491 | BaseInfo: LHS.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4492 | } |
4493 | |
4494 | // All the other cases basically behave like simple offsetting. |
4495 | |
4496 | // Handle the extvector case we ignored above. |
4497 | if (isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4498 | LValue LV = EmitLValue(E: E->getBase()); |
4499 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4500 | Address Addr = EmitExtVectorElementLValue(LV); |
4501 | |
4502 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
4503 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: EltType, /*inbounds*/ true, |
4504 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4505 | return MakeAddrLValue(Addr, T: EltType, BaseInfo: LV.getBaseInfo(), |
4506 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: EltType)); |
4507 | } |
4508 | |
4509 | LValueBaseInfo EltBaseInfo; |
4510 | TBAAAccessInfo EltTBAAInfo; |
4511 | Address Addr = Address::invalid(); |
4512 | if (const VariableArrayType *vla = |
4513 | getContext().getAsVariableArrayType(T: E->getType())) { |
4514 | // The base must be a pointer, which is not an aggregate. Emit |
4515 | // it. It needs to be emitted first in case it's what captures |
4516 | // the VLA bounds. |
4517 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4518 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4519 | |
4520 | // The element count here is the total number of non-VLA elements. |
4521 | llvm::Value *numElements = getVLASize(vla).NumElts; |
4522 | |
4523 | // Effectively, the multiply by the VLA size is part of the GEP. |
4524 | // GEP indexes are signed, and scaling an index isn't permitted to |
4525 | // signed-overflow, so we use the same semantics for our explicit |
4526 | // multiply. We suppress this if overflow is not undefined behavior. |
4527 | if (getLangOpts().PointerOverflowDefined) { |
4528 | Idx = Builder.CreateMul(LHS: Idx, RHS: numElements); |
4529 | } else { |
4530 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: numElements); |
4531 | } |
4532 | |
4533 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: vla->getElementType(), |
4534 | inbounds: !getLangOpts().PointerOverflowDefined, |
4535 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4536 | |
4537 | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
4538 | // Indexing over an interface, as in "NSString *P; P[4];" |
4539 | |
4540 | // Emit the base pointer. |
4541 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4542 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4543 | |
4544 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(T: OIT); |
4545 | llvm::Value *InterfaceSizeVal = |
4546 | llvm::ConstantInt::get(Ty: Idx->getType(), V: InterfaceSize.getQuantity()); |
4547 | |
4548 | llvm::Value *ScaledIdx = Builder.CreateMul(LHS: Idx, RHS: InterfaceSizeVal); |
4549 | |
4550 | // We don't necessarily build correct LLVM struct types for ObjC |
4551 | // interfaces, so we can't rely on GEP to do this scaling |
4552 | // correctly, so we need to cast to i8*. FIXME: is this actually |
4553 | // true? A lot of other things in the fragile ABI would break... |
4554 | llvm::Type *OrigBaseElemTy = Addr.getElementType(); |
4555 | |
4556 | // Do the GEP. |
4557 | CharUnits EltAlign = |
4558 | getArrayElementAlign(arrayAlign: Addr.getAlignment(), idx: Idx, eltSize: InterfaceSize); |
4559 | llvm::Value *EltPtr = |
4560 | emitArraySubscriptGEP(CGF&: *this, elemType: Int8Ty, ptr: Addr.emitRawPointer(CGF&: *this), |
4561 | indices: ScaledIdx, inbounds: false, signedIndices: SignedIndices, loc: E->getExprLoc()); |
4562 | Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); |
4563 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4564 | // If this is A[i] where A is an array, the frontend will have decayed the |
4565 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4566 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4567 | // "gep x, i" here. Emit one "gep A, 0, i". |
4568 | assert(Array->getType()->isArrayType() && |
4569 | "Array to pointer decay must have array source type!" ); |
4570 | LValue ArrayLV; |
4571 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4572 | // better bounds-checking of the base expression. |
4573 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4574 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4575 | else |
4576 | ArrayLV = EmitLValue(E: Array); |
4577 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4578 | |
4579 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) |
4580 | EmitCountedByBoundsChecking(E: Array, Idx, Addr: ArrayLV.getAddress(), |
4581 | IdxTy: E->getIdx()->getType(), ArrayTy: Array->getType(), |
4582 | Accessed, /*FlexibleArray=*/true); |
4583 | |
4584 | // Propagate the alignment from the array itself to the result. |
4585 | QualType arrayType = Array->getType(); |
4586 | Addr = emitArraySubscriptGEP( |
4587 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4588 | eltType: E->getType(), inbounds: !getLangOpts().PointerOverflowDefined, signedIndices: SignedIndices, |
4589 | loc: E->getExprLoc(), arrayType: &arrayType, Base: E->getBase()); |
4590 | EltBaseInfo = ArrayLV.getBaseInfo(); |
4591 | if (!CGM.getCodeGenOpts().NewStructPathTBAA) { |
4592 | // Since CodeGenTBAA::getTypeInfoHelper only handles array types for |
4593 | // new struct path TBAA, we must a use a plain access. |
4594 | EltTBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: E->getType()); |
4595 | } else if (ArrayLV.getTBAAInfo().isMayAlias()) { |
4596 | EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4597 | } else if (ArrayLV.getTBAAInfo().isIncomplete()) { |
4598 | // The array element is complete, even if the array is not. |
4599 | EltTBAAInfo = CGM.getTBAAAccessInfo(AccessType: E->getType()); |
4600 | } else { |
4601 | // The TBAA access info from the array (base) lvalue is ordinary. We will |
4602 | // adapt it to create access info for the element. |
4603 | EltTBAAInfo = ArrayLV.getTBAAInfo(); |
4604 | |
4605 | // We retain the TBAA struct path (BaseType and Offset members) from the |
4606 | // array. In the TBAA representation, we map any array access to the |
4607 | // element at index 0, as the index is generally a runtime value. This |
4608 | // element has the same offset in the base type as the array itself. |
4609 | // If the array lvalue had no base type, there is no point trying to |
4610 | // generate one, since an array itself is not a valid base type. |
4611 | |
4612 | // We also retain the access type from the base lvalue, but the access |
4613 | // size must be updated to the size of an individual element. |
4614 | EltTBAAInfo.Size = |
4615 | getContext().getTypeSizeInChars(T: E->getType()).getQuantity(); |
4616 | } |
4617 | } else { |
4618 | // The base must be a pointer; emit it with an estimate of its alignment. |
4619 | Address BaseAddr = |
4620 | EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4621 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4622 | QualType ptrType = E->getBase()->getType(); |
4623 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: BaseAddr, indices: Idx, eltType: E->getType(), |
4624 | inbounds: !getLangOpts().PointerOverflowDefined, |
4625 | signedIndices: SignedIndices, loc: E->getExprLoc(), arrayType: &ptrType, |
4626 | Base: E->getBase()); |
4627 | |
4628 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) { |
4629 | StructFieldAccess Visitor; |
4630 | const Expr *Base = Visitor.Visit(S: E->getBase()); |
4631 | |
4632 | if (const auto *CE = dyn_cast_if_present<CastExpr>(Val: Base); |
4633 | CE && CE->getCastKind() == CK_LValueToRValue) |
4634 | EmitCountedByBoundsChecking(E: CE, Idx, Addr: Address::invalid(), |
4635 | IdxTy: E->getIdx()->getType(), ArrayTy: ptrType, Accessed, |
4636 | /*FlexibleArray=*/false); |
4637 | } |
4638 | } |
4639 | |
4640 | LValue LV = MakeAddrLValue(Addr, T: E->getType(), BaseInfo: EltBaseInfo, TBAAInfo: EltTBAAInfo); |
4641 | |
4642 | if (getLangOpts().ObjC && |
4643 | getLangOpts().getGC() != LangOptions::NonGC) { |
4644 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
4645 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4646 | } |
4647 | return LV; |
4648 | } |
4649 | |
4650 | llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) { |
4651 | llvm::Value *Idx = EmitScalarExpr(E); |
4652 | if (Idx->getType() == IntPtrTy) |
4653 | return Idx; |
4654 | bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType(); |
4655 | return Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IsSigned); |
4656 | } |
4657 | |
4658 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
4659 | assert( |
4660 | !E->isIncomplete() && |
4661 | "incomplete matrix subscript expressions should be rejected during Sema" ); |
4662 | LValue Base = EmitLValue(E: E->getBase()); |
4663 | |
4664 | // Extend or truncate the index type to 32 or 64-bits if needed. |
4665 | llvm::Value *RowIdx = EmitMatrixIndexExpr(E: E->getRowIdx()); |
4666 | llvm::Value *ColIdx = EmitMatrixIndexExpr(E: E->getColumnIdx()); |
4667 | |
4668 | llvm::Value *NumRows = Builder.getIntN( |
4669 | N: RowIdx->getType()->getScalarSizeInBits(), |
4670 | C: E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
4671 | llvm::Value *FinalIdx = |
4672 | Builder.CreateAdd(LHS: Builder.CreateMul(LHS: ColIdx, RHS: NumRows), RHS: RowIdx); |
4673 | return LValue::MakeMatrixElt( |
4674 | matAddress: MaybeConvertMatrixAddress(Addr: Base.getAddress(), CGF&: *this), Idx: FinalIdx, |
4675 | type: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4676 | } |
4677 | |
4678 | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
4679 | LValueBaseInfo &BaseInfo, |
4680 | TBAAAccessInfo &TBAAInfo, |
4681 | QualType BaseTy, QualType ElTy, |
4682 | bool IsLowerBound) { |
4683 | LValue BaseLVal; |
4684 | if (auto *ASE = dyn_cast<ArraySectionExpr>(Val: Base->IgnoreParenImpCasts())) { |
4685 | BaseLVal = CGF.EmitArraySectionExpr(E: ASE, IsLowerBound); |
4686 | if (BaseTy->isArrayType()) { |
4687 | Address Addr = BaseLVal.getAddress(); |
4688 | BaseInfo = BaseLVal.getBaseInfo(); |
4689 | |
4690 | // If the array type was an incomplete type, we need to make sure |
4691 | // the decay ends up being the right type. |
4692 | llvm::Type *NewTy = CGF.ConvertType(T: BaseTy); |
4693 | Addr = Addr.withElementType(ElemTy: NewTy); |
4694 | |
4695 | // Note that VLA pointers are always decayed, so we don't need to do |
4696 | // anything here. |
4697 | if (!BaseTy->isVariableArrayType()) { |
4698 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4699 | "Expected pointer to array" ); |
4700 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
4701 | } |
4702 | |
4703 | return Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: ElTy)); |
4704 | } |
4705 | LValueBaseInfo TypeBaseInfo; |
4706 | TBAAAccessInfo TypeTBAAInfo; |
4707 | CharUnits Align = |
4708 | CGF.CGM.getNaturalTypeAlignment(T: ElTy, BaseInfo: &TypeBaseInfo, TBAAInfo: &TypeTBAAInfo); |
4709 | BaseInfo.mergeForCast(Info: TypeBaseInfo); |
4710 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(SourceInfo: TBAAInfo, TargetInfo: TypeTBAAInfo); |
4711 | return Address(CGF.Builder.CreateLoad(Addr: BaseLVal.getAddress()), |
4712 | CGF.ConvertTypeForMem(T: ElTy), Align); |
4713 | } |
4714 | return CGF.EmitPointerWithAlignment(E: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4715 | } |
4716 | |
4717 | LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E, |
4718 | bool IsLowerBound) { |
4719 | |
4720 | assert(!E->isOpenACCArraySection() && |
4721 | "OpenACC Array section codegen not implemented" ); |
4722 | |
4723 | QualType BaseTy = ArraySectionExpr::getBaseOriginalType(Base: E->getBase()); |
4724 | QualType ResultExprTy; |
4725 | if (auto *AT = getContext().getAsArrayType(T: BaseTy)) |
4726 | ResultExprTy = AT->getElementType(); |
4727 | else |
4728 | ResultExprTy = BaseTy->getPointeeType(); |
4729 | llvm::Value *Idx = nullptr; |
4730 | if (IsLowerBound || E->getColonLocFirst().isInvalid()) { |
4731 | // Requesting lower bound or upper bound, but without provided length and |
4732 | // without ':' symbol for the default length -> length = 1. |
4733 | // Idx = LowerBound ?: 0; |
4734 | if (auto *LowerBound = E->getLowerBound()) { |
4735 | Idx = Builder.CreateIntCast( |
4736 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4737 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()); |
4738 | } else |
4739 | Idx = llvm::ConstantInt::getNullValue(Ty: IntPtrTy); |
4740 | } else { |
4741 | // Try to emit length or lower bound as constant. If this is possible, 1 |
4742 | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
4743 | // IR (LB + Len) - 1. |
4744 | auto &C = CGM.getContext(); |
4745 | auto *Length = E->getLength(); |
4746 | llvm::APSInt ConstLength; |
4747 | if (Length) { |
4748 | // Idx = LowerBound + Length - 1; |
4749 | if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(Ctx: C)) { |
4750 | ConstLength = CL->zextOrTrunc(width: PointerWidthInBits); |
4751 | Length = nullptr; |
4752 | } |
4753 | auto *LowerBound = E->getLowerBound(); |
4754 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
4755 | if (LowerBound) { |
4756 | if (std::optional<llvm::APSInt> LB = |
4757 | LowerBound->getIntegerConstantExpr(Ctx: C)) { |
4758 | ConstLowerBound = LB->zextOrTrunc(width: PointerWidthInBits); |
4759 | LowerBound = nullptr; |
4760 | } |
4761 | } |
4762 | if (!Length) |
4763 | --ConstLength; |
4764 | else if (!LowerBound) |
4765 | --ConstLowerBound; |
4766 | |
4767 | if (Length || LowerBound) { |
4768 | auto *LowerBoundVal = |
4769 | LowerBound |
4770 | ? Builder.CreateIntCast( |
4771 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4772 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()) |
4773 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLowerBound); |
4774 | auto *LengthVal = |
4775 | Length |
4776 | ? Builder.CreateIntCast( |
4777 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4778 | isSigned: Length->getType()->hasSignedIntegerRepresentation()) |
4779 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4780 | Idx = Builder.CreateAdd(LHS: LowerBoundVal, RHS: LengthVal, Name: "lb_add_len" , |
4781 | /*HasNUW=*/false, |
4782 | HasNSW: !getLangOpts().PointerOverflowDefined); |
4783 | if (Length && LowerBound) { |
4784 | Idx = Builder.CreateSub( |
4785 | LHS: Idx, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "idx_sub_1" , |
4786 | /*HasNUW=*/false, HasNSW: !getLangOpts().PointerOverflowDefined); |
4787 | } |
4788 | } else |
4789 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength + ConstLowerBound); |
4790 | } else { |
4791 | // Idx = ArraySize - 1; |
4792 | QualType ArrayTy = BaseTy->isPointerType() |
4793 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
4794 | : BaseTy; |
4795 | if (auto *VAT = C.getAsVariableArrayType(T: ArrayTy)) { |
4796 | Length = VAT->getSizeExpr(); |
4797 | if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(Ctx: C)) { |
4798 | ConstLength = *L; |
4799 | Length = nullptr; |
4800 | } |
4801 | } else { |
4802 | auto *CAT = C.getAsConstantArrayType(T: ArrayTy); |
4803 | assert(CAT && "unexpected type for array initializer" ); |
4804 | ConstLength = CAT->getSize(); |
4805 | } |
4806 | if (Length) { |
4807 | auto *LengthVal = Builder.CreateIntCast( |
4808 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4809 | isSigned: Length->getType()->hasSignedIntegerRepresentation()); |
4810 | Idx = Builder.CreateSub( |
4811 | LHS: LengthVal, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "len_sub_1" , |
4812 | /*HasNUW=*/false, HasNSW: !getLangOpts().PointerOverflowDefined); |
4813 | } else { |
4814 | ConstLength = ConstLength.zextOrTrunc(width: PointerWidthInBits); |
4815 | --ConstLength; |
4816 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4817 | } |
4818 | } |
4819 | } |
4820 | assert(Idx); |
4821 | |
4822 | Address EltPtr = Address::invalid(); |
4823 | LValueBaseInfo BaseInfo; |
4824 | TBAAAccessInfo TBAAInfo; |
4825 | if (auto *VLA = getContext().getAsVariableArrayType(T: ResultExprTy)) { |
4826 | // The base must be a pointer, which is not an aggregate. Emit |
4827 | // it. It needs to be emitted first in case it's what captures |
4828 | // the VLA bounds. |
4829 | Address Base = |
4830 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, |
4831 | BaseTy, ElTy: VLA->getElementType(), IsLowerBound); |
4832 | // The element count here is the total number of non-VLA elements. |
4833 | llvm::Value *NumElements = getVLASize(vla: VLA).NumElts; |
4834 | |
4835 | // Effectively, the multiply by the VLA size is part of the GEP. |
4836 | // GEP indexes are signed, and scaling an index isn't permitted to |
4837 | // signed-overflow, so we use the same semantics for our explicit |
4838 | // multiply. We suppress this if overflow is not undefined behavior. |
4839 | if (getLangOpts().PointerOverflowDefined) |
4840 | Idx = Builder.CreateMul(LHS: Idx, RHS: NumElements); |
4841 | else |
4842 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: NumElements); |
4843 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: VLA->getElementType(), |
4844 | inbounds: !getLangOpts().PointerOverflowDefined, |
4845 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4846 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4847 | // If this is A[i] where A is an array, the frontend will have decayed the |
4848 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4849 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4850 | // "gep x, i" here. Emit one "gep A, 0, i". |
4851 | assert(Array->getType()->isArrayType() && |
4852 | "Array to pointer decay must have array source type!" ); |
4853 | LValue ArrayLV; |
4854 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4855 | // better bounds-checking of the base expression. |
4856 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4857 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4858 | else |
4859 | ArrayLV = EmitLValue(E: Array); |
4860 | |
4861 | // Propagate the alignment from the array itself to the result. |
4862 | EltPtr = emitArraySubscriptGEP( |
4863 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4864 | eltType: ResultExprTy, inbounds: !getLangOpts().PointerOverflowDefined, |
4865 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4866 | BaseInfo = ArrayLV.getBaseInfo(); |
4867 | TBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: ResultExprTy); |
4868 | } else { |
4869 | Address Base = |
4870 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, BaseTy, |
4871 | ElTy: ResultExprTy, IsLowerBound); |
4872 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: ResultExprTy, |
4873 | inbounds: !getLangOpts().PointerOverflowDefined, |
4874 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4875 | } |
4876 | |
4877 | return MakeAddrLValue(Addr: EltPtr, T: ResultExprTy, BaseInfo, TBAAInfo); |
4878 | } |
4879 | |
4880 | LValue CodeGenFunction:: |
4881 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4882 | // Emit the base vector as an l-value. |
4883 | LValue Base; |
4884 | |
4885 | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4886 | if (E->isArrow()) { |
4887 | // If it is a pointer to a vector, emit the address and form an lvalue with |
4888 | // it. |
4889 | LValueBaseInfo BaseInfo; |
4890 | TBAAAccessInfo TBAAInfo; |
4891 | Address Ptr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4892 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4893 | Base = MakeAddrLValue(Addr: Ptr, T: PT->getPointeeType(), BaseInfo, TBAAInfo); |
4894 | Base.getQuals().removeObjCGCAttr(); |
4895 | } else if (E->getBase()->isGLValue()) { |
4896 | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4897 | // emit the base as an lvalue. |
4898 | assert(E->getBase()->getType()->isVectorType()); |
4899 | Base = EmitLValue(E: E->getBase()); |
4900 | } else { |
4901 | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4902 | assert(E->getBase()->getType()->isVectorType() && |
4903 | "Result must be a vector" ); |
4904 | llvm::Value *Vec = EmitScalarExpr(E: E->getBase()); |
4905 | |
4906 | // Store the vector to memory (because LValue wants an address). |
4907 | Address VecMem = CreateMemTemp(Ty: E->getBase()->getType()); |
4908 | // need to zero extend an hlsl boolean vector to store it back to memory |
4909 | QualType Ty = E->getBase()->getType(); |
4910 | llvm::Type *LTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Vec->getType()); |
4911 | if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits()) |
4912 | Vec = Builder.CreateZExt(V: Vec, DestTy: LTy); |
4913 | Builder.CreateStore(Val: Vec, Addr: VecMem); |
4914 | Base = MakeAddrLValue(Addr: VecMem, T: Ty, Source: AlignmentSource::Decl); |
4915 | } |
4916 | |
4917 | QualType type = |
4918 | E->getType().withCVRQualifiers(CVR: Base.getQuals().getCVRQualifiers()); |
4919 | |
4920 | // Encode the element access list into a vector of unsigned indices. |
4921 | SmallVector<uint32_t, 4> Indices; |
4922 | E->getEncodedElementAccess(Elts&: Indices); |
4923 | |
4924 | if (Base.isSimple()) { |
4925 | llvm::Constant *CV = |
4926 | llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices); |
4927 | return LValue::MakeExtVectorElt(Addr: Base.getAddress(), Elts: CV, type, |
4928 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4929 | } |
4930 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!" ); |
4931 | |
4932 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4933 | SmallVector<llvm::Constant *, 4> CElts; |
4934 | |
4935 | for (unsigned Index : Indices) |
4936 | CElts.push_back(Elt: BaseElts->getAggregateElement(Elt: Index)); |
4937 | llvm::Constant *CV = llvm::ConstantVector::get(V: CElts); |
4938 | return LValue::MakeExtVectorElt(Addr: Base.getExtVectorAddress(), Elts: CV, type, |
4939 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4940 | } |
4941 | |
4942 | bool CodeGenFunction::isUnderlyingBasePointerConstantNull(const Expr *E) { |
4943 | const Expr *UnderlyingBaseExpr = E->IgnoreParens(); |
4944 | while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(Val: UnderlyingBaseExpr)) |
4945 | UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens(); |
4946 | return getContext().isSentinelNullExpr(E: UnderlyingBaseExpr); |
4947 | } |
4948 | |
4949 | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4950 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME: E)) { |
4951 | EmitIgnoredExpr(E: E->getBase()); |
4952 | return EmitDeclRefLValue(E: DRE); |
4953 | } |
4954 | |
4955 | Expr *BaseExpr = E->getBase(); |
4956 | // Check whether the underlying base pointer is a constant null. |
4957 | // If so, we do not set inbounds flag for GEP to avoid breaking some |
4958 | // old-style offsetof idioms. |
4959 | bool IsInBounds = !getLangOpts().PointerOverflowDefined && |
4960 | !isUnderlyingBasePointerConstantNull(E: BaseExpr); |
4961 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4962 | LValue BaseLV; |
4963 | if (E->isArrow()) { |
4964 | LValueBaseInfo BaseInfo; |
4965 | TBAAAccessInfo TBAAInfo; |
4966 | Address Addr = EmitPointerWithAlignment(E: BaseExpr, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4967 | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4968 | SanitizerSet SkippedChecks; |
4969 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: BaseExpr); |
4970 | if (IsBaseCXXThis) |
4971 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
4972 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: BaseExpr)) |
4973 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
4974 | EmitTypeCheck(TCK: TCK_MemberAccess, Loc: E->getExprLoc(), Addr, Type: PtrTy, |
4975 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4976 | BaseLV = MakeAddrLValue(Addr, T: PtrTy, BaseInfo, TBAAInfo); |
4977 | } else |
4978 | BaseLV = EmitCheckedLValue(E: BaseExpr, TCK: TCK_MemberAccess); |
4979 | |
4980 | NamedDecl *ND = E->getMemberDecl(); |
4981 | if (auto *Field = dyn_cast<FieldDecl>(Val: ND)) { |
4982 | LValue LV = EmitLValueForField(Base: BaseLV, Field, IsInBounds); |
4983 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4984 | if (getLangOpts().OpenMP) { |
4985 | // If the member was explicitly marked as nontemporal, mark it as |
4986 | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4987 | // to children as nontemporal too. |
4988 | if ((IsWrappedCXXThis(Obj: BaseExpr) && |
4989 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: Field)) || |
4990 | BaseLV.isNontemporal()) |
4991 | LV.setNontemporal(/*Value=*/true); |
4992 | } |
4993 | return LV; |
4994 | } |
4995 | |
4996 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
4997 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
4998 | |
4999 | llvm_unreachable("Unhandled member declaration!" ); |
5000 | } |
5001 | |
5002 | /// Given that we are currently emitting a lambda, emit an l-value for |
5003 | /// one of its members. |
5004 | /// |
5005 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, |
5006 | llvm::Value *ThisValue) { |
5007 | bool HasExplicitObjectParameter = false; |
5008 | const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Val: CurCodeDecl); |
5009 | if (MD) { |
5010 | HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); |
5011 | assert(MD->getParent()->isLambda()); |
5012 | assert(MD->getParent() == Field->getParent()); |
5013 | } |
5014 | LValue LambdaLV; |
5015 | if (HasExplicitObjectParameter) { |
5016 | const VarDecl *D = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParamDecl(i: 0); |
5017 | auto It = LocalDeclMap.find(Val: D); |
5018 | assert(It != LocalDeclMap.end() && "explicit parameter not loaded?" ); |
5019 | Address AddrOfExplicitObject = It->getSecond(); |
5020 | if (D->getType()->isReferenceType()) |
5021 | LambdaLV = EmitLoadOfReferenceLValue(RefAddr: AddrOfExplicitObject, RefTy: D->getType(), |
5022 | Source: AlignmentSource::Decl); |
5023 | else |
5024 | LambdaLV = MakeAddrLValue(Addr: AddrOfExplicitObject, |
5025 | T: D->getType().getNonReferenceType()); |
5026 | |
5027 | // Make sure we have an lvalue to the lambda itself and not a derived class. |
5028 | auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl(); |
5029 | auto *LambdaTy = cast<CXXRecordDecl>(Val: Field->getParent()); |
5030 | if (ThisTy != LambdaTy) { |
5031 | const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(Val: MD); |
5032 | Address Base = GetAddressOfBaseClass( |
5033 | Value: LambdaLV.getAddress(), Derived: ThisTy, PathBegin: BasePathArray.begin(), |
5034 | PathEnd: BasePathArray.end(), /*NullCheckValue=*/false, Loc: SourceLocation()); |
5035 | LambdaLV = MakeAddrLValue(Addr: Base, T: QualType{LambdaTy->getTypeForDecl(), 0}); |
5036 | } |
5037 | } else { |
5038 | QualType LambdaTagType = getContext().getTagDeclType(Decl: Field->getParent()); |
5039 | LambdaLV = MakeNaturalAlignAddrLValue(V: ThisValue, T: LambdaTagType); |
5040 | } |
5041 | return EmitLValueForField(Base: LambdaLV, Field); |
5042 | } |
5043 | |
5044 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
5045 | return EmitLValueForLambdaField(Field, ThisValue: CXXABIThisValue); |
5046 | } |
5047 | |
5048 | /// Get the field index in the debug info. The debug info structure/union |
5049 | /// will ignore the unnamed bitfields. |
5050 | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
5051 | unsigned FieldIndex) { |
5052 | unsigned I = 0, Skipped = 0; |
5053 | |
5054 | for (auto *F : Rec->getDefinition()->fields()) { |
5055 | if (I == FieldIndex) |
5056 | break; |
5057 | if (F->isUnnamedBitField()) |
5058 | Skipped++; |
5059 | I++; |
5060 | } |
5061 | |
5062 | return FieldIndex - Skipped; |
5063 | } |
5064 | |
5065 | /// Get the address of a zero-sized field within a record. The resulting |
5066 | /// address doesn't necessarily have the right type. |
5067 | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
5068 | const FieldDecl *Field, |
5069 | bool IsInBounds) { |
5070 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
5071 | BitSize: CGF.getContext().getFieldOffset(FD: Field)); |
5072 | if (Offset.isZero()) |
5073 | return Base; |
5074 | Base = Base.withElementType(ElemTy: CGF.Int8Ty); |
5075 | if (!IsInBounds) |
5076 | return CGF.Builder.CreateConstByteGEP(Addr: Base, Offset); |
5077 | return CGF.Builder.CreateConstInBoundsByteGEP(Addr: Base, Offset); |
5078 | } |
5079 | |
5080 | /// Drill down to the storage of a field without walking into |
5081 | /// reference types. |
5082 | /// |
5083 | /// The resulting address doesn't necessarily have the right type. |
5084 | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
5085 | const FieldDecl *field, bool IsInBounds) { |
5086 | if (isEmptyFieldForLayout(Context: CGF.getContext(), FD: field)) |
5087 | return emitAddrOfZeroSizeField(CGF, Base: base, Field: field, IsInBounds); |
5088 | |
5089 | const RecordDecl *rec = field->getParent(); |
5090 | |
5091 | unsigned idx = |
5092 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
5093 | |
5094 | if (!IsInBounds) |
5095 | return CGF.Builder.CreateConstGEP2_32(Addr: base, Idx0: 0, Idx1: idx, Name: field->getName()); |
5096 | |
5097 | return CGF.Builder.CreateStructGEP(Addr: base, Index: idx, Name: field->getName()); |
5098 | } |
5099 | |
5100 | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
5101 | Address addr, const FieldDecl *field) { |
5102 | const RecordDecl *rec = field->getParent(); |
5103 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
5104 | Ty: base.getType(), Loc: rec->getLocation()); |
5105 | |
5106 | unsigned idx = |
5107 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
5108 | |
5109 | return CGF.Builder.CreatePreserveStructAccessIndex( |
5110 | Addr: addr, Index: idx, FieldIndex: CGF.getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo); |
5111 | } |
5112 | |
5113 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
5114 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
5115 | if (!RD) |
5116 | return false; |
5117 | |
5118 | if (RD->isDynamicClass()) |
5119 | return true; |
5120 | |
5121 | for (const auto &Base : RD->bases()) |
5122 | if (hasAnyVptr(Type: Base.getType(), Context)) |
5123 | return true; |
5124 | |
5125 | for (const FieldDecl *Field : RD->fields()) |
5126 | if (hasAnyVptr(Type: Field->getType(), Context)) |
5127 | return true; |
5128 | |
5129 | return false; |
5130 | } |
5131 | |
5132 | LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field, |
5133 | bool IsInBounds) { |
5134 | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
5135 | |
5136 | if (field->isBitField()) { |
5137 | const CGRecordLayout &RL = |
5138 | CGM.getTypes().getCGRecordLayout(field->getParent()); |
5139 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: field); |
5140 | const bool UseVolatile = isAAPCS(TargetInfo: CGM.getTarget()) && |
5141 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
5142 | Info.VolatileStorageSize != 0 && |
5143 | field->getType() |
5144 | .withCVRQualifiers(CVR: base.getVRQualifiers()) |
5145 | .isVolatileQualified(); |
5146 | Address Addr = base.getAddress(); |
5147 | unsigned Idx = RL.getLLVMFieldNo(FD: field); |
5148 | const RecordDecl *rec = field->getParent(); |
5149 | if (hasBPFPreserveStaticOffset(D: rec)) |
5150 | Addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr); |
5151 | if (!UseVolatile) { |
5152 | if (!IsInPreservedAIRegion && |
5153 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
5154 | if (Idx != 0) { |
5155 | // For structs, we GEP to the field that the record layout suggests. |
5156 | if (!IsInBounds) |
5157 | Addr = Builder.CreateConstGEP2_32(Addr, Idx0: 0, Idx1: Idx, Name: field->getName()); |
5158 | else |
5159 | Addr = Builder.CreateStructGEP(Addr, Index: Idx, Name: field->getName()); |
5160 | } |
5161 | } else { |
5162 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
5163 | Ty: getContext().getRecordType(Decl: rec), L: rec->getLocation()); |
5164 | Addr = Builder.CreatePreserveStructAccessIndex( |
5165 | Addr, Index: Idx, FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), |
5166 | DbgInfo); |
5167 | } |
5168 | } |
5169 | const unsigned SS = |
5170 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
5171 | // Get the access type. |
5172 | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), N: SS); |
5173 | Addr = Addr.withElementType(ElemTy: FieldIntTy); |
5174 | if (UseVolatile) { |
5175 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
5176 | if (VolatileOffset) |
5177 | Addr = Builder.CreateConstInBoundsGEP(Addr, Index: VolatileOffset); |
5178 | } |
5179 | |
5180 | QualType fieldType = |
5181 | field->getType().withCVRQualifiers(CVR: base.getVRQualifiers()); |
5182 | // TODO: Support TBAA for bit fields. |
5183 | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
5184 | return LValue::MakeBitfield(Addr, Info, type: fieldType, BaseInfo: FieldBaseInfo, |
5185 | TBAAInfo: TBAAAccessInfo()); |
5186 | } |
5187 | |
5188 | // Fields of may-alias structures are may-alias themselves. |
5189 | // FIXME: this should get propagated down through anonymous structs |
5190 | // and unions. |
5191 | QualType FieldType = field->getType(); |
5192 | const RecordDecl *rec = field->getParent(); |
5193 | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
5194 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: BaseAlignSource)); |
5195 | TBAAAccessInfo FieldTBAAInfo; |
5196 | if (base.getTBAAInfo().isMayAlias() || |
5197 | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { |
5198 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
5199 | } else if (rec->isUnion()) { |
5200 | // TODO: Support TBAA for unions. |
5201 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
5202 | } else { |
5203 | // If no base type been assigned for the base access, then try to generate |
5204 | // one for this base lvalue. |
5205 | FieldTBAAInfo = base.getTBAAInfo(); |
5206 | if (!FieldTBAAInfo.BaseType) { |
5207 | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(QTy: base.getType()); |
5208 | assert(!FieldTBAAInfo.Offset && |
5209 | "Nonzero offset for an access with no base type!" ); |
5210 | } |
5211 | |
5212 | // Adjust offset to be relative to the base type. |
5213 | const ASTRecordLayout &Layout = |
5214 | getContext().getASTRecordLayout(D: field->getParent()); |
5215 | unsigned CharWidth = getContext().getCharWidth(); |
5216 | if (FieldTBAAInfo.BaseType) |
5217 | FieldTBAAInfo.Offset += |
5218 | Layout.getFieldOffset(FieldNo: field->getFieldIndex()) / CharWidth; |
5219 | |
5220 | // Update the final access type and size. |
5221 | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(QTy: FieldType); |
5222 | FieldTBAAInfo.Size = |
5223 | getContext().getTypeSizeInChars(T: FieldType).getQuantity(); |
5224 | } |
5225 | |
5226 | Address addr = base.getAddress(); |
5227 | if (hasBPFPreserveStaticOffset(D: rec)) |
5228 | addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr&: addr); |
5229 | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(Val: rec)) { |
5230 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
5231 | ClassDef->isDynamicClass()) { |
5232 | // Getting to any field of dynamic object requires stripping dynamic |
5233 | // information provided by invariant.group. This is because accessing |
5234 | // fields may leak the real address of dynamic object, which could result |
5235 | // in miscompilation when leaked pointer would be compared. |
5236 | auto *stripped = |
5237 | Builder.CreateStripInvariantGroup(Ptr: addr.emitRawPointer(CGF&: *this)); |
5238 | addr = Address(stripped, addr.getElementType(), addr.getAlignment()); |
5239 | } |
5240 | } |
5241 | |
5242 | unsigned RecordCVR = base.getVRQualifiers(); |
5243 | if (rec->isUnion()) { |
5244 | // For unions, there is no pointer adjustment. |
5245 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
5246 | hasAnyVptr(Type: FieldType, Context: getContext())) |
5247 | // Because unions can easily skip invariant.barriers, we need to add |
5248 | // a barrier every time CXXRecord field with vptr is referenced. |
5249 | addr = Builder.CreateLaunderInvariantGroup(Addr: addr); |
5250 | |
5251 | if (IsInPreservedAIRegion || |
5252 | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
5253 | // Remember the original union field index |
5254 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(Ty: base.getType(), |
5255 | Loc: rec->getLocation()); |
5256 | addr = |
5257 | Address(Builder.CreatePreserveUnionAccessIndex( |
5258 | Base: addr.emitRawPointer(CGF&: *this), |
5259 | FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo), |
5260 | addr.getElementType(), addr.getAlignment()); |
5261 | } |
5262 | |
5263 | if (FieldType->isReferenceType()) |
5264 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
5265 | } else { |
5266 | if (!IsInPreservedAIRegion && |
5267 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) |
5268 | // For structs, we GEP to the field that the record layout suggests. |
5269 | addr = emitAddrOfFieldStorage(CGF&: *this, base: addr, field, IsInBounds); |
5270 | else |
5271 | // Remember the original struct field index |
5272 | addr = emitPreserveStructAccess(CGF&: *this, base, addr, field); |
5273 | } |
5274 | |
5275 | // If this is a reference field, load the reference right now. |
5276 | if (FieldType->isReferenceType()) { |
5277 | LValue RefLVal = |
5278 | MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
5279 | if (RecordCVR & Qualifiers::Volatile) |
5280 | RefLVal.getQuals().addVolatile(); |
5281 | addr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &FieldBaseInfo, PointeeTBAAInfo: &FieldTBAAInfo); |
5282 | |
5283 | // Qualifiers on the struct don't apply to the referencee. |
5284 | RecordCVR = 0; |
5285 | FieldType = FieldType->getPointeeType(); |
5286 | } |
5287 | |
5288 | // Make sure that the address is pointing to the right type. This is critical |
5289 | // for both unions and structs. |
5290 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
5291 | |
5292 | if (field->hasAttr<AnnotateAttr>()) |
5293 | addr = EmitFieldAnnotations(D: field, V: addr); |
5294 | |
5295 | LValue LV = MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
5296 | LV.getQuals().addCVRQualifiers(mask: RecordCVR); |
5297 | |
5298 | // __weak attribute on a field is ignored. |
5299 | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
5300 | LV.getQuals().removeObjCGCAttr(); |
5301 | |
5302 | return LV; |
5303 | } |
5304 | |
5305 | LValue |
5306 | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
5307 | const FieldDecl *Field) { |
5308 | QualType FieldType = Field->getType(); |
5309 | |
5310 | if (!FieldType->isReferenceType()) |
5311 | return EmitLValueForField(base: Base, field: Field); |
5312 | |
5313 | Address V = emitAddrOfFieldStorage( |
5314 | CGF&: *this, base: Base.getAddress(), field: Field, |
5315 | /*IsInBounds=*/!getLangOpts().PointerOverflowDefined); |
5316 | |
5317 | // Make sure that the address is pointing to the right type. |
5318 | llvm::Type *llvmType = ConvertTypeForMem(T: FieldType); |
5319 | V = V.withElementType(ElemTy: llvmType); |
5320 | |
5321 | // TODO: Generate TBAA information that describes this access as a structure |
5322 | // member access and not just an access to an object of the field's type. This |
5323 | // should be similar to what we do in EmitLValueForField(). |
5324 | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
5325 | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
5326 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: FieldAlignSource)); |
5327 | return MakeAddrLValue(Addr: V, T: FieldType, BaseInfo: FieldBaseInfo, |
5328 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base, AccessType: FieldType)); |
5329 | } |
5330 | |
5331 | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
5332 | if (E->isFileScope()) { |
5333 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
5334 | return MakeAddrLValue(Addr: GlobalPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5335 | } |
5336 | if (E->getType()->isVariablyModifiedType()) |
5337 | // make sure to emit the VLA size. |
5338 | EmitVariablyModifiedType(Ty: E->getType()); |
5339 | |
5340 | Address DeclPtr = CreateMemTemp(Ty: E->getType(), Name: ".compoundliteral" ); |
5341 | const Expr *InitExpr = E->getInitializer(); |
5342 | LValue Result = MakeAddrLValue(Addr: DeclPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5343 | |
5344 | EmitAnyExprToMem(E: InitExpr, Location: DeclPtr, Quals: E->getType().getQualifiers(), |
5345 | /*Init*/ IsInit: true); |
5346 | |
5347 | // Block-scope compound literals are destroyed at the end of the enclosing |
5348 | // scope in C. |
5349 | if (!getLangOpts().CPlusPlus) |
5350 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
5351 | pushLifetimeExtendedDestroy(kind: getCleanupKind(kind: DtorKind), addr: DeclPtr, |
5352 | type: E->getType(), destroyer: getDestroyer(destructionKind: DtorKind), |
5353 | useEHCleanupForArray: DtorKind & EHCleanup); |
5354 | |
5355 | return Result; |
5356 | } |
5357 | |
5358 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
5359 | if (!E->isGLValue()) |
5360 | // Initializing an aggregate temporary in C++11: T{...}. |
5361 | return EmitAggExprToLValue(E); |
5362 | |
5363 | // An lvalue initializer list must be initializing a reference. |
5364 | assert(E->isTransparent() && "non-transparent glvalue init list" ); |
5365 | return EmitLValue(E: E->getInit(Init: 0)); |
5366 | } |
5367 | |
5368 | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
5369 | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
5370 | /// LValue is returned and the current block has been terminated. |
5371 | static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
5372 | const Expr *Operand) { |
5373 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Operand->IgnoreParens())) { |
5374 | CGF.EmitCXXThrowExpr(E: ThrowExpr, /*KeepInsertionPoint*/false); |
5375 | return std::nullopt; |
5376 | } |
5377 | |
5378 | return CGF.EmitLValue(E: Operand); |
5379 | } |
5380 | |
5381 | namespace { |
5382 | // Handle the case where the condition is a constant evaluatable simple integer, |
5383 | // which means we don't have to separately handle the true/false blocks. |
5384 | std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( |
5385 | CodeGenFunction &CGF, const AbstractConditionalOperator *E) { |
5386 | const Expr *condExpr = E->getCond(); |
5387 | bool CondExprBool; |
5388 | if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) { |
5389 | const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); |
5390 | if (!CondExprBool) |
5391 | std::swap(a&: Live, b&: Dead); |
5392 | |
5393 | if (!CGF.ContainsLabel(S: Dead)) { |
5394 | // If the true case is live, we need to track its region. |
5395 | if (CondExprBool) |
5396 | CGF.incrementProfileCounter(S: E); |
5397 | CGF.markStmtMaybeUsed(S: Dead); |
5398 | // If a throw expression we emit it and return an undefined lvalue |
5399 | // because it can't be used. |
5400 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Live->IgnoreParens())) { |
5401 | CGF.EmitCXXThrowExpr(E: ThrowExpr); |
5402 | llvm::Type *ElemTy = CGF.ConvertType(T: Dead->getType()); |
5403 | llvm::Type *Ty = CGF.UnqualPtrTy; |
5404 | return CGF.MakeAddrLValue( |
5405 | Addr: Address(llvm::UndefValue::get(T: Ty), ElemTy, CharUnits::One()), |
5406 | T: Dead->getType()); |
5407 | } |
5408 | return CGF.EmitLValue(E: Live); |
5409 | } |
5410 | } |
5411 | return std::nullopt; |
5412 | } |
5413 | struct ConditionalInfo { |
5414 | llvm::BasicBlock *lhsBlock, *rhsBlock; |
5415 | std::optional<LValue> LHS, RHS; |
5416 | }; |
5417 | |
5418 | // Create and generate the 3 blocks for a conditional operator. |
5419 | // Leaves the 'current block' in the continuation basic block. |
5420 | template<typename FuncTy> |
5421 | ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, |
5422 | const AbstractConditionalOperator *E, |
5423 | const FuncTy &BranchGenFunc) { |
5424 | ConditionalInfo Info{.lhsBlock: CGF.createBasicBlock(name: "cond.true" ), |
5425 | .rhsBlock: CGF.createBasicBlock(name: "cond.false" ), .LHS: std::nullopt, |
5426 | .RHS: std::nullopt}; |
5427 | llvm::BasicBlock *endBlock = CGF.createBasicBlock(name: "cond.end" ); |
5428 | |
5429 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
5430 | CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: Info.lhsBlock, FalseBlock: Info.rhsBlock, |
5431 | TrueCount: CGF.getProfileCount(S: E)); |
5432 | |
5433 | // Any temporaries created here are conditional. |
5434 | CGF.EmitBlock(BB: Info.lhsBlock); |
5435 | CGF.incrementProfileCounter(S: E); |
5436 | eval.begin(CGF); |
5437 | Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); |
5438 | eval.end(CGF); |
5439 | Info.lhsBlock = CGF.Builder.GetInsertBlock(); |
5440 | |
5441 | if (Info.LHS) |
5442 | CGF.Builder.CreateBr(Dest: endBlock); |
5443 | |
5444 | // Any temporaries created here are conditional. |
5445 | CGF.EmitBlock(BB: Info.rhsBlock); |
5446 | eval.begin(CGF); |
5447 | Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); |
5448 | eval.end(CGF); |
5449 | Info.rhsBlock = CGF.Builder.GetInsertBlock(); |
5450 | CGF.EmitBlock(BB: endBlock); |
5451 | |
5452 | return Info; |
5453 | } |
5454 | } // namespace |
5455 | |
5456 | void CodeGenFunction::EmitIgnoredConditionalOperator( |
5457 | const AbstractConditionalOperator *E) { |
5458 | if (!E->isGLValue()) { |
5459 | // ?: here should be an aggregate. |
5460 | assert(hasAggregateEvaluationKind(E->getType()) && |
5461 | "Unexpected conditional operator!" ); |
5462 | return (void)EmitAggExprToLValue(E); |
5463 | } |
5464 | |
5465 | OpaqueValueMapping binding(*this, E); |
5466 | if (HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E)) |
5467 | return; |
5468 | |
5469 | EmitConditionalBlocks(CGF&: *this, E, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5470 | CGF.EmitIgnoredExpr(E); |
5471 | return LValue{}; |
5472 | }); |
5473 | } |
5474 | LValue CodeGenFunction::EmitConditionalOperatorLValue( |
5475 | const AbstractConditionalOperator *expr) { |
5476 | if (!expr->isGLValue()) { |
5477 | // ?: here should be an aggregate. |
5478 | assert(hasAggregateEvaluationKind(expr->getType()) && |
5479 | "Unexpected conditional operator!" ); |
5480 | return EmitAggExprToLValue(E: expr); |
5481 | } |
5482 | |
5483 | OpaqueValueMapping binding(*this, expr); |
5484 | if (std::optional<LValue> Res = |
5485 | HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E: expr)) |
5486 | return *Res; |
5487 | |
5488 | ConditionalInfo Info = EmitConditionalBlocks( |
5489 | CGF&: *this, E: expr, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5490 | return EmitLValueOrThrowExpression(CGF, Operand: E); |
5491 | }); |
5492 | |
5493 | if ((Info.LHS && !Info.LHS->isSimple()) || |
5494 | (Info.RHS && !Info.RHS->isSimple())) |
5495 | return EmitUnsupportedLValue(E: expr, Name: "conditional operator" ); |
5496 | |
5497 | if (Info.LHS && Info.RHS) { |
5498 | Address lhsAddr = Info.LHS->getAddress(); |
5499 | Address rhsAddr = Info.RHS->getAddress(); |
5500 | Address result = mergeAddressesInConditionalExpr( |
5501 | LHS: lhsAddr, RHS: rhsAddr, LHSBlock: Info.lhsBlock, RHSBlock: Info.rhsBlock, |
5502 | MergeBlock: Builder.GetInsertBlock(), MergedType: expr->getType()); |
5503 | AlignmentSource alignSource = |
5504 | std::max(a: Info.LHS->getBaseInfo().getAlignmentSource(), |
5505 | b: Info.RHS->getBaseInfo().getAlignmentSource()); |
5506 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
5507 | InfoA: Info.LHS->getTBAAInfo(), InfoB: Info.RHS->getTBAAInfo()); |
5508 | return MakeAddrLValue(Addr: result, T: expr->getType(), BaseInfo: LValueBaseInfo(alignSource), |
5509 | TBAAInfo); |
5510 | } else { |
5511 | assert((Info.LHS || Info.RHS) && |
5512 | "both operands of glvalue conditional are throw-expressions?" ); |
5513 | return Info.LHS ? *Info.LHS : *Info.RHS; |
5514 | } |
5515 | } |
5516 | |
5517 | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
5518 | /// type. If the cast is to a reference, we can have the usual lvalue result, |
5519 | /// otherwise if a cast is needed by the code generator in an lvalue context, |
5520 | /// then it must mean that we need the address of an aggregate in order to |
5521 | /// access one of its members. This can happen for all the reasons that casts |
5522 | /// are permitted with aggregate result, including noop aggregate casts, and |
5523 | /// cast from scalar to union. |
5524 | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
5525 | switch (E->getCastKind()) { |
5526 | case CK_ToVoid: |
5527 | case CK_BitCast: |
5528 | case CK_LValueToRValueBitCast: |
5529 | case CK_ArrayToPointerDecay: |
5530 | case CK_FunctionToPointerDecay: |
5531 | case CK_NullToMemberPointer: |
5532 | case CK_NullToPointer: |
5533 | case CK_IntegralToPointer: |
5534 | case CK_PointerToIntegral: |
5535 | case CK_PointerToBoolean: |
5536 | case CK_IntegralCast: |
5537 | case CK_BooleanToSignedIntegral: |
5538 | case CK_IntegralToBoolean: |
5539 | case CK_IntegralToFloating: |
5540 | case CK_FloatingToIntegral: |
5541 | case CK_FloatingToBoolean: |
5542 | case CK_FloatingCast: |
5543 | case CK_FloatingRealToComplex: |
5544 | case CK_FloatingComplexToReal: |
5545 | case CK_FloatingComplexToBoolean: |
5546 | case CK_FloatingComplexCast: |
5547 | case CK_FloatingComplexToIntegralComplex: |
5548 | case CK_IntegralRealToComplex: |
5549 | case CK_IntegralComplexToReal: |
5550 | case CK_IntegralComplexToBoolean: |
5551 | case CK_IntegralComplexCast: |
5552 | case CK_IntegralComplexToFloatingComplex: |
5553 | case CK_DerivedToBaseMemberPointer: |
5554 | case CK_BaseToDerivedMemberPointer: |
5555 | case CK_MemberPointerToBoolean: |
5556 | case CK_ReinterpretMemberPointer: |
5557 | case CK_AnyPointerToBlockPointerCast: |
5558 | case CK_ARCProduceObject: |
5559 | case CK_ARCConsumeObject: |
5560 | case CK_ARCReclaimReturnedObject: |
5561 | case CK_ARCExtendBlockObject: |
5562 | case CK_CopyAndAutoreleaseBlockObject: |
5563 | case CK_IntToOCLSampler: |
5564 | case CK_FloatingToFixedPoint: |
5565 | case CK_FixedPointToFloating: |
5566 | case CK_FixedPointCast: |
5567 | case CK_FixedPointToBoolean: |
5568 | case CK_FixedPointToIntegral: |
5569 | case CK_IntegralToFixedPoint: |
5570 | case CK_MatrixCast: |
5571 | case CK_HLSLVectorTruncation: |
5572 | case CK_HLSLArrayRValue: |
5573 | case CK_HLSLElementwiseCast: |
5574 | case CK_HLSLAggregateSplatCast: |
5575 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5576 | |
5577 | case CK_Dependent: |
5578 | llvm_unreachable("dependent cast kind in IR gen!" ); |
5579 | |
5580 | case CK_BuiltinFnToFnPtr: |
5581 | llvm_unreachable("builtin functions are handled elsewhere" ); |
5582 | |
5583 | // These are never l-values; just use the aggregate emission code. |
5584 | case CK_NonAtomicToAtomic: |
5585 | case CK_AtomicToNonAtomic: |
5586 | return EmitAggExprToLValue(E); |
5587 | |
5588 | case CK_Dynamic: { |
5589 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5590 | Address V = LV.getAddress(); |
5591 | const auto *DCE = cast<CXXDynamicCastExpr>(Val: E); |
5592 | return MakeNaturalAlignRawAddrLValue(V: EmitDynamicCast(V, DCE), T: E->getType()); |
5593 | } |
5594 | |
5595 | case CK_ConstructorConversion: |
5596 | case CK_UserDefinedConversion: |
5597 | case CK_CPointerToObjCPointerCast: |
5598 | case CK_BlockPointerToObjCPointerCast: |
5599 | case CK_LValueToRValue: |
5600 | return EmitLValue(E: E->getSubExpr()); |
5601 | |
5602 | case CK_NoOp: { |
5603 | // CK_NoOp can model a qualification conversion, which can remove an array |
5604 | // bound and change the IR type. |
5605 | // FIXME: Once pointee types are removed from IR, remove this. |
5606 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5607 | // Propagate the volatile qualifer to LValue, if exist in E. |
5608 | if (E->changesVolatileQualification()) |
5609 | LV.getQuals() = E->getType().getQualifiers(); |
5610 | if (LV.isSimple()) { |
5611 | Address V = LV.getAddress(); |
5612 | if (V.isValid()) { |
5613 | llvm::Type *T = ConvertTypeForMem(T: E->getType()); |
5614 | if (V.getElementType() != T) |
5615 | LV.setAddress(V.withElementType(ElemTy: T)); |
5616 | } |
5617 | } |
5618 | return LV; |
5619 | } |
5620 | |
5621 | case CK_UncheckedDerivedToBase: |
5622 | case CK_DerivedToBase: { |
5623 | const auto *DerivedClassTy = |
5624 | E->getSubExpr()->getType()->castAs<RecordType>(); |
5625 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5626 | |
5627 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5628 | Address This = LV.getAddress(); |
5629 | |
5630 | // Perform the derived-to-base conversion |
5631 | Address Base = GetAddressOfBaseClass( |
5632 | Value: This, Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5633 | /*NullCheckValue=*/false, Loc: E->getExprLoc()); |
5634 | |
5635 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
5636 | // conservatively pretend that the complete object is of the base class |
5637 | // type. |
5638 | return MakeAddrLValue(Addr: Base, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5639 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5640 | } |
5641 | case CK_ToUnion: |
5642 | return EmitAggExprToLValue(E); |
5643 | case CK_BaseToDerived: { |
5644 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
5645 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5646 | |
5647 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5648 | |
5649 | // Perform the base-to-derived conversion |
5650 | Address Derived = GetAddressOfDerivedClass( |
5651 | Value: LV.getAddress(), Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5652 | /*NullCheckValue=*/false); |
5653 | |
5654 | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
5655 | // performed and the object is not of the derived type. |
5656 | if (sanitizePerformTypeCheck()) |
5657 | EmitTypeCheck(TCK: TCK_DowncastReference, Loc: E->getExprLoc(), Addr: Derived, |
5658 | Type: E->getType()); |
5659 | |
5660 | if (SanOpts.has(K: SanitizerKind::CFIDerivedCast)) |
5661 | EmitVTablePtrCheckForCast(T: E->getType(), Derived, |
5662 | /*MayBeNull=*/false, TCK: CFITCK_DerivedCast, |
5663 | Loc: E->getBeginLoc()); |
5664 | |
5665 | return MakeAddrLValue(Addr: Derived, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5666 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5667 | } |
5668 | case CK_LValueBitCast: { |
5669 | // This must be a reinterpret_cast (or c-style equivalent). |
5670 | const auto *CE = cast<ExplicitCastExpr>(Val: E); |
5671 | |
5672 | CGM.EmitExplicitCastExprType(E: CE, CGF: this); |
5673 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5674 | Address V = LV.getAddress().withElementType( |
5675 | ElemTy: ConvertTypeForMem(T: CE->getTypeAsWritten()->getPointeeType())); |
5676 | |
5677 | if (SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) |
5678 | EmitVTablePtrCheckForCast(T: E->getType(), Derived: V, |
5679 | /*MayBeNull=*/false, TCK: CFITCK_UnrelatedCast, |
5680 | Loc: E->getBeginLoc()); |
5681 | |
5682 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5683 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5684 | } |
5685 | case CK_AddressSpaceConversion: { |
5686 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5687 | QualType DestTy = getContext().getPointerType(T: E->getType()); |
5688 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
5689 | CGF&: *this, V: LV.getPointer(CGF&: *this), |
5690 | SrcAddr: E->getSubExpr()->getType().getAddressSpace(), DestTy: ConvertType(T: DestTy)); |
5691 | return MakeAddrLValue(Addr: Address(V, ConvertTypeForMem(T: E->getType()), |
5692 | LV.getAddress().getAlignment()), |
5693 | T: E->getType(), BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
5694 | } |
5695 | case CK_ObjCObjectLValueCast: { |
5696 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5697 | Address V = LV.getAddress().withElementType(ElemTy: ConvertType(T: E->getType())); |
5698 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5699 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5700 | } |
5701 | case CK_ZeroToOCLOpaqueType: |
5702 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid" ); |
5703 | |
5704 | case CK_VectorSplat: { |
5705 | // LValue results of vector splats are only supported in HLSL. |
5706 | if (!getLangOpts().HLSL) |
5707 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5708 | return EmitLValue(E: E->getSubExpr()); |
5709 | } |
5710 | } |
5711 | |
5712 | llvm_unreachable("Unhandled lvalue cast kind?" ); |
5713 | } |
5714 | |
5715 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
5716 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
5717 | return getOrCreateOpaqueLValueMapping(e); |
5718 | } |
5719 | |
5720 | std::pair<LValue, LValue> |
5721 | CodeGenFunction::EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty) { |
5722 | // Emitting the casted temporary through an opaque value. |
5723 | LValue BaseLV = EmitLValue(E: E->getArgLValue()); |
5724 | OpaqueValueMappingData::bind(CGF&: *this, ov: E->getOpaqueArgLValue(), lv: BaseLV); |
5725 | |
5726 | QualType ExprTy = E->getType(); |
5727 | Address OutTemp = CreateIRTemp(Ty: ExprTy); |
5728 | LValue TempLV = MakeAddrLValue(Addr: OutTemp, T: ExprTy); |
5729 | |
5730 | if (E->isInOut()) |
5731 | EmitInitializationToLValue(E: E->getCastedTemporary()->getSourceExpr(), |
5732 | LV: TempLV); |
5733 | |
5734 | OpaqueValueMappingData::bind(CGF&: *this, ov: E->getCastedTemporary(), lv: TempLV); |
5735 | return std::make_pair(x&: BaseLV, y&: TempLV); |
5736 | } |
5737 | |
5738 | LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, |
5739 | CallArgList &Args, QualType Ty) { |
5740 | |
5741 | auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty); |
5742 | |
5743 | llvm::Value *Addr = TempLV.getAddress().getBasePointer(); |
5744 | llvm::Type *ElTy = ConvertTypeForMem(T: TempLV.getType()); |
5745 | |
5746 | llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(Ty: ElTy); |
5747 | |
5748 | llvm::Value *LifetimeSize = EmitLifetimeStart(Size: Sz, Addr); |
5749 | |
5750 | Address TmpAddr(Addr, ElTy, TempLV.getAlignment()); |
5751 | Args.addWriteback(srcLV: BaseLV, temporary: TmpAddr, toUse: nullptr, writebackExpr: E->getWritebackCast(), |
5752 | lifetimeSz: LifetimeSize); |
5753 | Args.add(rvalue: RValue::get(Addr: TmpAddr, CGF&: *this), type: Ty); |
5754 | return TempLV; |
5755 | } |
5756 | |
5757 | LValue |
5758 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
5759 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
5760 | |
5761 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
5762 | it = OpaqueLValues.find(Val: e); |
5763 | |
5764 | if (it != OpaqueLValues.end()) |
5765 | return it->second; |
5766 | |
5767 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted" ); |
5768 | return EmitLValue(E: e->getSourceExpr()); |
5769 | } |
5770 | |
5771 | RValue |
5772 | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
5773 | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
5774 | |
5775 | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
5776 | it = OpaqueRValues.find(Val: e); |
5777 | |
5778 | if (it != OpaqueRValues.end()) |
5779 | return it->second; |
5780 | |
5781 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted" ); |
5782 | return EmitAnyExpr(E: e->getSourceExpr()); |
5783 | } |
5784 | |
5785 | bool CodeGenFunction::isOpaqueValueEmitted(const OpaqueValueExpr *E) { |
5786 | if (OpaqueValueMapping::shouldBindAsLValue(expr: E)) |
5787 | return OpaqueLValues.contains(Val: E); |
5788 | return OpaqueRValues.contains(Val: E); |
5789 | } |
5790 | |
5791 | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
5792 | const FieldDecl *FD, |
5793 | SourceLocation Loc) { |
5794 | QualType FT = FD->getType(); |
5795 | LValue FieldLV = EmitLValueForField(base: LV, field: FD); |
5796 | switch (getEvaluationKind(T: FT)) { |
5797 | case TEK_Complex: |
5798 | return RValue::getComplex(C: EmitLoadOfComplex(src: FieldLV, loc: Loc)); |
5799 | case TEK_Aggregate: |
5800 | return FieldLV.asAggregateRValue(); |
5801 | case TEK_Scalar: |
5802 | // This routine is used to load fields one-by-one to perform a copy, so |
5803 | // don't load reference fields. |
5804 | if (FD->getType()->isReferenceType()) |
5805 | return RValue::get(V: FieldLV.getPointer(CGF&: *this)); |
5806 | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
5807 | // primitive load. |
5808 | if (FieldLV.isBitField()) |
5809 | return EmitLoadOfLValue(LV: FieldLV, Loc); |
5810 | return RValue::get(V: EmitLoadOfScalar(lvalue: FieldLV, Loc)); |
5811 | } |
5812 | llvm_unreachable("bad evaluation kind" ); |
5813 | } |
5814 | |
5815 | //===--------------------------------------------------------------------===// |
5816 | // Expression Emission |
5817 | //===--------------------------------------------------------------------===// |
5818 | |
5819 | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
5820 | ReturnValueSlot ReturnValue, |
5821 | llvm::CallBase **CallOrInvoke) { |
5822 | llvm::CallBase *CallOrInvokeStorage; |
5823 | if (!CallOrInvoke) { |
5824 | CallOrInvoke = &CallOrInvokeStorage; |
5825 | } |
5826 | |
5827 | auto AddCoroElideSafeOnExit = llvm::make_scope_exit(F: [&] { |
5828 | if (E->isCoroElideSafe()) { |
5829 | auto *I = *CallOrInvoke; |
5830 | if (I) |
5831 | I->addFnAttr(Kind: llvm::Attribute::CoroElideSafe); |
5832 | } |
5833 | }); |
5834 | |
5835 | // Builtins never have block type. |
5836 | if (E->getCallee()->getType()->isBlockPointerType()) |
5837 | return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke); |
5838 | |
5839 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: E)) |
5840 | return EmitCXXMemberCallExpr(E: CE, ReturnValue, CallOrInvoke); |
5841 | |
5842 | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(Val: E)) |
5843 | return EmitCUDAKernelCallExpr(E: CE, ReturnValue, CallOrInvoke); |
5844 | |
5845 | // A CXXOperatorCallExpr is created even for explicit object methods, but |
5846 | // these should be treated like static function call. |
5847 | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(Val: E)) |
5848 | if (const auto *MD = |
5849 | dyn_cast_if_present<CXXMethodDecl>(Val: CE->getCalleeDecl()); |
5850 | MD && MD->isImplicitObjectMemberFunction()) |
5851 | return EmitCXXOperatorMemberCallExpr(E: CE, MD, ReturnValue, CallOrInvoke); |
5852 | |
5853 | CGCallee callee = EmitCallee(E: E->getCallee()); |
5854 | |
5855 | if (callee.isBuiltin()) { |
5856 | return EmitBuiltinExpr(GD: callee.getBuiltinDecl(), BuiltinID: callee.getBuiltinID(), |
5857 | E, ReturnValue); |
5858 | } |
5859 | |
5860 | if (callee.isPseudoDestructor()) { |
5861 | return EmitCXXPseudoDestructorExpr(E: callee.getPseudoDestructorExpr()); |
5862 | } |
5863 | |
5864 | return EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue, |
5865 | /*Chain=*/nullptr, CallOrInvoke); |
5866 | } |
5867 | |
5868 | /// Emit a CallExpr without considering whether it might be a subclass. |
5869 | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
5870 | ReturnValueSlot ReturnValue, |
5871 | llvm::CallBase **CallOrInvoke) { |
5872 | CGCallee Callee = EmitCallee(E: E->getCallee()); |
5873 | return EmitCall(FnType: E->getCallee()->getType(), Callee, E, ReturnValue, |
5874 | /*Chain=*/nullptr, CallOrInvoke); |
5875 | } |
5876 | |
5877 | // Detect the unusual situation where an inline version is shadowed by a |
5878 | // non-inline version. In that case we should pick the external one |
5879 | // everywhere. That's GCC behavior too. |
5880 | static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { |
5881 | for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) |
5882 | if (!PD->isInlineBuiltinDeclaration()) |
5883 | return false; |
5884 | return true; |
5885 | } |
5886 | |
5887 | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
5888 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
5889 | |
5890 | if (auto builtinID = FD->getBuiltinID()) { |
5891 | std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); |
5892 | std::string NoBuiltins = "no-builtins" ; |
5893 | |
5894 | StringRef Ident = CGF.CGM.getMangledName(GD); |
5895 | std::string FDInlineName = (Ident + ".inline" ).str(); |
5896 | |
5897 | bool IsPredefinedLibFunction = |
5898 | CGF.getContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID); |
5899 | bool HasAttributeNoBuiltin = |
5900 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltinFD) || |
5901 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltins); |
5902 | |
5903 | // When directing calling an inline builtin, call it through it's mangled |
5904 | // name to make it clear it's not the actual builtin. |
5905 | if (CGF.CurFn->getName() != FDInlineName && |
5906 | OnlyHasInlineBuiltinDeclaration(FD)) { |
5907 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5908 | llvm::Function *Fn = llvm::cast<llvm::Function>(Val: CalleePtr); |
5909 | llvm::Module *M = Fn->getParent(); |
5910 | llvm::Function *Clone = M->getFunction(Name: FDInlineName); |
5911 | if (!Clone) { |
5912 | Clone = llvm::Function::Create(Ty: Fn->getFunctionType(), |
5913 | Linkage: llvm::GlobalValue::InternalLinkage, |
5914 | AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M); |
5915 | Clone->addFnAttr(Kind: llvm::Attribute::AlwaysInline); |
5916 | } |
5917 | return CGCallee::forDirect(functionPtr: Clone, abstractInfo: GD); |
5918 | } |
5919 | |
5920 | // Replaceable builtins provide their own implementation of a builtin. If we |
5921 | // are in an inline builtin implementation, avoid trivial infinite |
5922 | // recursion. Honor __attribute__((no_builtin("foo"))) or |
5923 | // __attribute__((no_builtin)) on the current function unless foo is |
5924 | // not a predefined library function which means we must generate the |
5925 | // builtin no matter what. |
5926 | else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) |
5927 | return CGCallee::forBuiltin(builtinID, builtinDecl: FD); |
5928 | } |
5929 | |
5930 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5931 | if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && |
5932 | FD->hasAttr<CUDAGlobalAttr>()) |
5933 | CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( |
5934 | Handle: cast<llvm::GlobalValue>(Val: CalleePtr->stripPointerCasts())); |
5935 | |
5936 | return CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GD); |
5937 | } |
5938 | |
5939 | static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD) { |
5940 | if (DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>())) |
5941 | return GlobalDecl(FD, KernelReferenceKind::Stub); |
5942 | return GlobalDecl(FD); |
5943 | } |
5944 | |
5945 | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
5946 | E = E->IgnoreParens(); |
5947 | |
5948 | // Look through function-to-pointer decay. |
5949 | if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: E)) { |
5950 | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
5951 | ICE->getCastKind() == CK_BuiltinFnToFnPtr) { |
5952 | return EmitCallee(E: ICE->getSubExpr()); |
5953 | } |
5954 | |
5955 | // Try to remember the original __ptrauth qualifier for loads of |
5956 | // function pointers. |
5957 | if (ICE->getCastKind() == CK_LValueToRValue) { |
5958 | const Expr *SubExpr = ICE->getSubExpr(); |
5959 | if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) { |
5960 | std::pair<llvm::Value *, CGPointerAuthInfo> Result = |
5961 | EmitOrigPointerRValue(E); |
5962 | |
5963 | QualType FunctionType = PtrType->getPointeeType(); |
5964 | assert(FunctionType->isFunctionType()); |
5965 | |
5966 | GlobalDecl GD; |
5967 | if (const auto *VD = |
5968 | dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) { |
5969 | GD = GlobalDecl(VD); |
5970 | } |
5971 | CGCalleeInfo CalleeInfo(FunctionType->getAs<FunctionProtoType>(), GD); |
5972 | CGCallee Callee(CalleeInfo, Result.first, Result.second); |
5973 | return Callee; |
5974 | } |
5975 | } |
5976 | |
5977 | // Resolve direct calls. |
5978 | } else if (auto DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
5979 | if (auto FD = dyn_cast<FunctionDecl>(Val: DRE->getDecl())) { |
5980 | return EmitDirectCallee(CGF&: *this, GD: getGlobalDeclForDirectCall(FD)); |
5981 | } |
5982 | } else if (auto ME = dyn_cast<MemberExpr>(Val: E)) { |
5983 | if (auto FD = dyn_cast<FunctionDecl>(Val: ME->getMemberDecl())) { |
5984 | EmitIgnoredExpr(E: ME->getBase()); |
5985 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5986 | } |
5987 | |
5988 | // Look through template substitutions. |
5989 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(Val: E)) { |
5990 | return EmitCallee(E: NTTP->getReplacement()); |
5991 | |
5992 | // Treat pseudo-destructor calls differently. |
5993 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: E)) { |
5994 | return CGCallee::forPseudoDestructor(E: PDE); |
5995 | } |
5996 | |
5997 | // Otherwise, we have an indirect reference. |
5998 | llvm::Value *calleePtr; |
5999 | QualType functionType; |
6000 | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
6001 | calleePtr = EmitScalarExpr(E); |
6002 | functionType = ptrType->getPointeeType(); |
6003 | } else { |
6004 | functionType = E->getType(); |
6005 | calleePtr = EmitLValue(E, IsKnownNonNull: KnownNonNull).getPointer(CGF&: *this); |
6006 | } |
6007 | assert(functionType->isFunctionType()); |
6008 | |
6009 | GlobalDecl GD; |
6010 | if (const auto *VD = |
6011 | dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) |
6012 | GD = GlobalDecl(VD); |
6013 | |
6014 | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
6015 | CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(T: functionType); |
6016 | CGCallee callee(calleeInfo, calleePtr, pointerAuth); |
6017 | return callee; |
6018 | } |
6019 | |
6020 | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
6021 | // Comma expressions just emit their LHS then their RHS as an l-value. |
6022 | if (E->getOpcode() == BO_Comma) { |
6023 | EmitIgnoredExpr(E: E->getLHS()); |
6024 | EnsureInsertPoint(); |
6025 | return EmitLValue(E: E->getRHS()); |
6026 | } |
6027 | |
6028 | if (E->getOpcode() == BO_PtrMemD || |
6029 | E->getOpcode() == BO_PtrMemI) |
6030 | return EmitPointerToDataMemberBinaryExpr(E); |
6031 | |
6032 | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value" ); |
6033 | |
6034 | // Create a Key Instructions source location atom group that covers both |
6035 | // LHS and RHS expressions. Nested RHS expressions may get subsequently |
6036 | // separately grouped (1 below): |
6037 | // |
6038 | // 1. `a = b = c` -> Two atoms. |
6039 | // 2. `x = new(1)` -> One atom (for both addr store and value store). |
6040 | // 3. Complex and agg assignment -> One atom. |
6041 | ApplyAtomGroup Grp(getDebugInfo()); |
6042 | |
6043 | // Note that in all of these cases, __block variables need the RHS |
6044 | // evaluated first just in case the variable gets moved by the RHS. |
6045 | |
6046 | switch (getEvaluationKind(T: E->getType())) { |
6047 | case TEK_Scalar: { |
6048 | if (PointerAuthQualifier PtrAuth = |
6049 | E->getLHS()->getType().getPointerAuth()) { |
6050 | LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store); |
6051 | LValue CopiedLV = LV; |
6052 | CopiedLV.getQuals().removePointerAuth(); |
6053 | llvm::Value *RV = |
6054 | EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: CopiedLV.getAddress()); |
6055 | EmitNullabilityCheck(LHS: CopiedLV, RHS: RV, Loc: E->getExprLoc()); |
6056 | EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: CopiedLV); |
6057 | return LV; |
6058 | } |
6059 | |
6060 | switch (E->getLHS()->getType().getObjCLifetime()) { |
6061 | case Qualifiers::OCL_Strong: |
6062 | return EmitARCStoreStrong(e: E, /*ignored*/ false).first; |
6063 | |
6064 | case Qualifiers::OCL_Autoreleasing: |
6065 | return EmitARCStoreAutoreleasing(e: E).first; |
6066 | |
6067 | // No reason to do any of these differently. |
6068 | case Qualifiers::OCL_None: |
6069 | case Qualifiers::OCL_ExplicitNone: |
6070 | case Qualifiers::OCL_Weak: |
6071 | break; |
6072 | } |
6073 | |
6074 | // TODO: Can we de-duplicate this code with the corresponding code in |
6075 | // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works? |
6076 | RValue RV; |
6077 | llvm::Value *Previous = nullptr; |
6078 | QualType SrcType = E->getRHS()->getType(); |
6079 | // Check if LHS is a bitfield, if RHS contains an implicit cast expression |
6080 | // we want to extract that value and potentially (if the bitfield sanitizer |
6081 | // is enabled) use it to check for an implicit conversion. |
6082 | if (E->getLHS()->refersToBitField()) { |
6083 | llvm::Value *RHS = |
6084 | EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType); |
6085 | RV = RValue::get(V: RHS); |
6086 | } else |
6087 | RV = EmitAnyExpr(E: E->getRHS()); |
6088 | |
6089 | LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store); |
6090 | |
6091 | if (RV.isScalar()) |
6092 | EmitNullabilityCheck(LHS: LV, RHS: RV.getScalarVal(), Loc: E->getExprLoc()); |
6093 | |
6094 | if (LV.isBitField()) { |
6095 | llvm::Value *Result = nullptr; |
6096 | // If bitfield sanitizers are enabled we want to use the result |
6097 | // to check whether a truncation or sign change has occurred. |
6098 | if (SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) |
6099 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV, Result: &Result); |
6100 | else |
6101 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV); |
6102 | |
6103 | // If the expression contained an implicit conversion, make sure |
6104 | // to use the value before the scalar conversion. |
6105 | llvm::Value *Src = Previous ? Previous : RV.getScalarVal(); |
6106 | QualType DstType = E->getLHS()->getType(); |
6107 | EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType, |
6108 | Info: LV.getBitFieldInfo(), Loc: E->getExprLoc()); |
6109 | } else |
6110 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
6111 | |
6112 | if (getLangOpts().OpenMP) |
6113 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
6114 | LHS: E->getLHS()); |
6115 | return LV; |
6116 | } |
6117 | |
6118 | case TEK_Complex: |
6119 | return EmitComplexAssignmentLValue(E); |
6120 | |
6121 | case TEK_Aggregate: |
6122 | // If the lang opt is HLSL and the LHS is a constant array |
6123 | // then we are performing a copy assignment and call a special |
6124 | // function because EmitAggExprToLValue emits to a temporary LValue |
6125 | if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType()) |
6126 | return EmitHLSLArrayAssignLValue(E); |
6127 | |
6128 | return EmitAggExprToLValue(E); |
6129 | } |
6130 | llvm_unreachable("bad evaluation kind" ); |
6131 | } |
6132 | |
6133 | // This function implements trivial copy assignment for HLSL's |
6134 | // assignable constant arrays. |
6135 | LValue CodeGenFunction::EmitHLSLArrayAssignLValue(const BinaryOperator *E) { |
6136 | // Don't emit an LValue for the RHS because it might not be an LValue |
6137 | LValue LHS = EmitLValue(E: E->getLHS()); |
6138 | // In C the RHS of an assignment operator is an RValue. |
6139 | // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call |
6140 | // EmitInitializationToLValue to emit an RValue into an LValue. |
6141 | EmitInitializationToLValue(E: E->getRHS(), LV: LHS); |
6142 | return LHS; |
6143 | } |
6144 | |
6145 | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E, |
6146 | llvm::CallBase **CallOrInvoke) { |
6147 | RValue RV = EmitCallExpr(E, ReturnValue: ReturnValueSlot(), CallOrInvoke); |
6148 | |
6149 | if (!RV.isScalar()) |
6150 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
6151 | Source: AlignmentSource::Decl); |
6152 | |
6153 | assert(E->getCallReturnType(getContext())->isReferenceType() && |
6154 | "Can't have a scalar return unless the return type is a " |
6155 | "reference type!" ); |
6156 | |
6157 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
6158 | } |
6159 | |
6160 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
6161 | // FIXME: This shouldn't require another copy. |
6162 | return EmitAggExprToLValue(E); |
6163 | } |
6164 | |
6165 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
6166 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
6167 | && "binding l-value to type which needs a temporary" ); |
6168 | AggValueSlot Slot = CreateAggTemp(T: E->getType()); |
6169 | EmitCXXConstructExpr(E, Dest: Slot); |
6170 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
6171 | } |
6172 | |
6173 | LValue |
6174 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
6175 | return MakeNaturalAlignRawAddrLValue(V: EmitCXXTypeidExpr(E), T: E->getType()); |
6176 | } |
6177 | |
6178 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
6179 | return CGM.GetAddrOfMSGuidDecl(GD: E->getGuidDecl()) |
6180 | .withElementType(ElemTy: ConvertType(T: E->getType())); |
6181 | } |
6182 | |
6183 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
6184 | return MakeAddrLValue(Addr: EmitCXXUuidofExpr(E), T: E->getType(), |
6185 | Source: AlignmentSource::Decl); |
6186 | } |
6187 | |
6188 | LValue |
6189 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
6190 | AggValueSlot Slot = CreateAggTemp(T: E->getType(), Name: "temp.lvalue" ); |
6191 | Slot.setExternallyDestructed(); |
6192 | EmitAggExpr(E: E->getSubExpr(), AS: Slot); |
6193 | EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Slot.getAddress()); |
6194 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
6195 | } |
6196 | |
6197 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
6198 | RValue RV = EmitObjCMessageExpr(E); |
6199 | |
6200 | if (!RV.isScalar()) |
6201 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
6202 | Source: AlignmentSource::Decl); |
6203 | |
6204 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
6205 | "Can't have a scalar return unless the return type is a " |
6206 | "reference type!" ); |
6207 | |
6208 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
6209 | } |
6210 | |
6211 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
6212 | Address V = |
6213 | CGM.getObjCRuntime().GetAddrOfSelector(CGF&: *this, Sel: E->getSelector()); |
6214 | return MakeAddrLValue(Addr: V, T: E->getType(), Source: AlignmentSource::Decl); |
6215 | } |
6216 | |
6217 | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
6218 | const ObjCIvarDecl *Ivar) { |
6219 | return CGM.getObjCRuntime().EmitIvarOffset(CGF&: *this, Interface, Ivar); |
6220 | } |
6221 | |
6222 | llvm::Value * |
6223 | CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, |
6224 | const ObjCIvarDecl *Ivar) { |
6225 | llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); |
6226 | QualType PointerDiffType = getContext().getPointerDiffType(); |
6227 | return Builder.CreateZExtOrTrunc(V: OffsetValue, |
6228 | DestTy: getTypes().ConvertType(T: PointerDiffType)); |
6229 | } |
6230 | |
6231 | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
6232 | llvm::Value *BaseValue, |
6233 | const ObjCIvarDecl *Ivar, |
6234 | unsigned CVRQualifiers) { |
6235 | return CGM.getObjCRuntime().EmitObjCValueForIvar(CGF&: *this, ObjectTy, BaseValue, |
6236 | Ivar, CVRQualifiers); |
6237 | } |
6238 | |
6239 | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
6240 | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
6241 | llvm::Value *BaseValue = nullptr; |
6242 | const Expr *BaseExpr = E->getBase(); |
6243 | Qualifiers BaseQuals; |
6244 | QualType ObjectTy; |
6245 | if (E->isArrow()) { |
6246 | BaseValue = EmitScalarExpr(E: BaseExpr); |
6247 | ObjectTy = BaseExpr->getType()->getPointeeType(); |
6248 | BaseQuals = ObjectTy.getQualifiers(); |
6249 | } else { |
6250 | LValue BaseLV = EmitLValue(E: BaseExpr); |
6251 | BaseValue = BaseLV.getPointer(CGF&: *this); |
6252 | ObjectTy = BaseExpr->getType(); |
6253 | BaseQuals = ObjectTy.getQualifiers(); |
6254 | } |
6255 | |
6256 | LValue LV = |
6257 | EmitLValueForIvar(ObjectTy, BaseValue, Ivar: E->getDecl(), |
6258 | CVRQualifiers: BaseQuals.getCVRQualifiers()); |
6259 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
6260 | return LV; |
6261 | } |
6262 | |
6263 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
6264 | // Can only get l-value for message expression returning aggregate type |
6265 | RValue RV = EmitAnyExprToTemp(E); |
6266 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
6267 | Source: AlignmentSource::Decl); |
6268 | } |
6269 | |
6270 | RValue CodeGenFunction::EmitCall(QualType CalleeType, |
6271 | const CGCallee &OrigCallee, const CallExpr *E, |
6272 | ReturnValueSlot ReturnValue, |
6273 | llvm::Value *Chain, |
6274 | llvm::CallBase **CallOrInvoke, |
6275 | CGFunctionInfo const **ResolvedFnInfo) { |
6276 | // Get the actual function type. The callee type will always be a pointer to |
6277 | // function type or a block pointer type. |
6278 | assert(CalleeType->isFunctionPointerType() && |
6279 | "Call must have function pointer type!" ); |
6280 | |
6281 | const Decl *TargetDecl = |
6282 | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
6283 | |
6284 | assert((!isa_and_present<FunctionDecl>(TargetDecl) || |
6285 | !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && |
6286 | "trying to emit a call to an immediate function" ); |
6287 | |
6288 | CalleeType = getContext().getCanonicalType(T: CalleeType); |
6289 | |
6290 | auto PointeeType = cast<PointerType>(Val&: CalleeType)->getPointeeType(); |
6291 | |
6292 | CGCallee Callee = OrigCallee; |
6293 | |
6294 | if (SanOpts.has(K: SanitizerKind::Function) && |
6295 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && |
6296 | !isa<FunctionNoProtoType>(Val: PointeeType)) { |
6297 | if (llvm::Constant *PrefixSig = |
6298 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
6299 | auto CheckOrdinal = SanitizerKind::SO_Function; |
6300 | auto CheckHandler = SanitizerHandler::FunctionTypeMismatch; |
6301 | SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); |
6302 | auto *TypeHash = getUBSanFunctionTypeHash(T: PointeeType); |
6303 | |
6304 | llvm::Type *PrefixSigType = PrefixSig->getType(); |
6305 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
6306 | Context&: CGM.getLLVMContext(), Elements: {PrefixSigType, Int32Ty}, /*isPacked=*/true); |
6307 | |
6308 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
6309 | if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) { |
6310 | // Use raw pointer since we are using the callee pointer as data here. |
6311 | Address Addr = |
6312 | Address(CalleePtr, CalleePtr->getType(), |
6313 | CharUnits::fromQuantity( |
6314 | Quantity: CalleePtr->getPointerAlignment(DL: CGM.getDataLayout())), |
6315 | Callee.getPointerAuthInfo(), nullptr); |
6316 | CalleePtr = Addr.emitRawPointer(CGF&: *this); |
6317 | } |
6318 | |
6319 | // On 32-bit Arm, the low bit of a function pointer indicates whether |
6320 | // it's using the Arm or Thumb instruction set. The actual first |
6321 | // instruction lives at the same address either way, so we must clear |
6322 | // that low bit before using the function address to find the prefix |
6323 | // structure. |
6324 | // |
6325 | // This applies to both Arm and Thumb target triples, because |
6326 | // either one could be used in an interworking context where it |
6327 | // might be passed function pointers of both types. |
6328 | llvm::Value *AlignedCalleePtr; |
6329 | if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { |
6330 | llvm::Value *CalleeAddress = |
6331 | Builder.CreatePtrToInt(V: CalleePtr, DestTy: IntPtrTy); |
6332 | llvm::Value *Mask = llvm::ConstantInt::get(Ty: IntPtrTy, V: ~1); |
6333 | llvm::Value *AlignedCalleeAddress = |
6334 | Builder.CreateAnd(LHS: CalleeAddress, RHS: Mask); |
6335 | AlignedCalleePtr = |
6336 | Builder.CreateIntToPtr(V: AlignedCalleeAddress, DestTy: CalleePtr->getType()); |
6337 | } else { |
6338 | AlignedCalleePtr = CalleePtr; |
6339 | } |
6340 | |
6341 | llvm::Value *CalleePrefixStruct = AlignedCalleePtr; |
6342 | llvm::Value *CalleeSigPtr = |
6343 | Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 0); |
6344 | llvm::Value *CalleeSig = |
6345 | Builder.CreateAlignedLoad(Ty: PrefixSigType, Addr: CalleeSigPtr, Align: getIntAlign()); |
6346 | llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(LHS: CalleeSig, RHS: PrefixSig); |
6347 | |
6348 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
6349 | llvm::BasicBlock *TypeCheck = createBasicBlock(name: "typecheck" ); |
6350 | Builder.CreateCondBr(Cond: CalleeSigMatch, True: TypeCheck, False: Cont); |
6351 | |
6352 | EmitBlock(BB: TypeCheck); |
6353 | llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( |
6354 | Ty: Int32Ty, |
6355 | Addr: Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 1), |
6356 | Align: getPointerAlign()); |
6357 | llvm::Value *CalleeTypeHashMatch = |
6358 | Builder.CreateICmpEQ(LHS: CalleeTypeHash, RHS: TypeHash); |
6359 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
6360 | EmitCheckTypeDescriptor(T: CalleeType)}; |
6361 | EmitCheck(Checked: std::make_pair(x&: CalleeTypeHashMatch, y&: CheckOrdinal), CheckHandler, |
6362 | StaticArgs: StaticData, DynamicArgs: {CalleePtr}); |
6363 | |
6364 | Builder.CreateBr(Dest: Cont); |
6365 | EmitBlock(BB: Cont); |
6366 | } |
6367 | } |
6368 | |
6369 | const auto *FnType = cast<FunctionType>(Val&: PointeeType); |
6370 | |
6371 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl); |
6372 | FD && DeviceKernelAttr::isOpenCLSpelling(A: FD->getAttr<DeviceKernelAttr>())) |
6373 | CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType); |
6374 | |
6375 | bool CFIUnchecked = |
6376 | CalleeType->hasPointeeToToCFIUncheckedCalleeFunctionType(); |
6377 | |
6378 | // If we are checking indirect calls and this call is indirect, check that the |
6379 | // function pointer is a member of the bit set for the function type. |
6380 | if (SanOpts.has(K: SanitizerKind::CFIICall) && |
6381 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && !CFIUnchecked) { |
6382 | auto CheckOrdinal = SanitizerKind::SO_CFIICall; |
6383 | auto CheckHandler = SanitizerHandler::CFICheckFail; |
6384 | SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); |
6385 | EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_ICall); |
6386 | |
6387 | llvm::Metadata *MD; |
6388 | if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) |
6389 | MD = CGM.CreateMetadataIdentifierGeneralized(T: QualType(FnType, 0)); |
6390 | else |
6391 | MD = CGM.CreateMetadataIdentifierForType(T: QualType(FnType, 0)); |
6392 | |
6393 | llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD); |
6394 | |
6395 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
6396 | llvm::Value *TypeTest = Builder.CreateCall( |
6397 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {CalleePtr, TypeId}); |
6398 | |
6399 | auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); |
6400 | llvm::Constant *StaticData[] = { |
6401 | llvm::ConstantInt::get(Ty: Int8Ty, V: CFITCK_ICall), |
6402 | EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
6403 | EmitCheckTypeDescriptor(T: QualType(FnType, 0)), |
6404 | }; |
6405 | if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { |
6406 | EmitCfiSlowPathCheck(Ordinal: CheckOrdinal, Cond: TypeTest, TypeId: CrossDsoTypeId, Ptr: CalleePtr, |
6407 | StaticArgs: StaticData); |
6408 | } else { |
6409 | EmitCheck(Checked: std::make_pair(x&: TypeTest, y&: CheckOrdinal), CheckHandler, |
6410 | StaticArgs: StaticData, DynamicArgs: {CalleePtr, llvm::UndefValue::get(T: IntPtrTy)}); |
6411 | } |
6412 | } |
6413 | |
6414 | CallArgList Args; |
6415 | if (Chain) |
6416 | Args.add(rvalue: RValue::get(V: Chain), type: CGM.getContext().VoidPtrTy); |
6417 | |
6418 | // C++17 requires that we evaluate arguments to a call using assignment syntax |
6419 | // right-to-left, and that we evaluate arguments to certain other operators |
6420 | // left-to-right. Note that we allow this to override the order dictated by |
6421 | // the calling convention on the MS ABI, which means that parameter |
6422 | // destruction order is not necessarily reverse construction order. |
6423 | // FIXME: Revisit this based on C++ committee response to unimplementability. |
6424 | EvaluationOrder Order = EvaluationOrder::Default; |
6425 | bool StaticOperator = false; |
6426 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) { |
6427 | if (OCE->isAssignmentOp()) |
6428 | Order = EvaluationOrder::ForceRightToLeft; |
6429 | else { |
6430 | switch (OCE->getOperator()) { |
6431 | case OO_LessLess: |
6432 | case OO_GreaterGreater: |
6433 | case OO_AmpAmp: |
6434 | case OO_PipePipe: |
6435 | case OO_Comma: |
6436 | case OO_ArrowStar: |
6437 | Order = EvaluationOrder::ForceLeftToRight; |
6438 | break; |
6439 | default: |
6440 | break; |
6441 | } |
6442 | } |
6443 | |
6444 | if (const auto *MD = |
6445 | dyn_cast_if_present<CXXMethodDecl>(Val: OCE->getCalleeDecl()); |
6446 | MD && MD->isStatic()) |
6447 | StaticOperator = true; |
6448 | } |
6449 | |
6450 | auto Arguments = E->arguments(); |
6451 | if (StaticOperator) { |
6452 | // If we're calling a static operator, we need to emit the object argument |
6453 | // and ignore it. |
6454 | EmitIgnoredExpr(E: E->getArg(Arg: 0)); |
6455 | Arguments = drop_begin(RangeOrContainer&: Arguments, N: 1); |
6456 | } |
6457 | EmitCallArgs(Args, Prototype: dyn_cast<FunctionProtoType>(Val: FnType), ArgRange: Arguments, |
6458 | AC: E->getDirectCallee(), /*ParamsToSkip=*/0, Order); |
6459 | |
6460 | const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( |
6461 | Args, Ty: FnType, /*ChainCall=*/Chain); |
6462 | |
6463 | if (ResolvedFnInfo) |
6464 | *ResolvedFnInfo = &FnInfo; |
6465 | |
6466 | // HIP function pointer contains kernel handle when it is used in triple |
6467 | // chevron. The kernel stub needs to be loaded from kernel handle and used |
6468 | // as callee. |
6469 | if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && |
6470 | isa<CUDAKernelCallExpr>(Val: E) && |
6471 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
6472 | llvm::Value *Handle = Callee.getFunctionPointer(); |
6473 | auto *Stub = Builder.CreateLoad( |
6474 | Addr: Address(Handle, Handle->getType(), CGM.getPointerAlign())); |
6475 | Callee.setFunctionPointer(Stub); |
6476 | } |
6477 | llvm::CallBase *LocalCallOrInvoke = nullptr; |
6478 | RValue Call = EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, CallOrInvoke: &LocalCallOrInvoke, |
6479 | IsMustTail: E == MustTailCall, Loc: E->getExprLoc()); |
6480 | |
6481 | // Generate function declaration DISuprogram in order to be used |
6482 | // in debug info about call sites. |
6483 | if (CGDebugInfo *DI = getDebugInfo()) { |
6484 | if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
6485 | FunctionArgList Args; |
6486 | QualType ResTy = BuildFunctionArgList(GD: CalleeDecl, Args); |
6487 | DI->EmitFuncDeclForCallSite(CallOrInvoke: LocalCallOrInvoke, |
6488 | CalleeType: DI->getFunctionType(FD: CalleeDecl, RetTy: ResTy, Args), |
6489 | CalleeDecl); |
6490 | } |
6491 | } |
6492 | if (CallOrInvoke) |
6493 | *CallOrInvoke = LocalCallOrInvoke; |
6494 | |
6495 | return Call; |
6496 | } |
6497 | |
6498 | LValue CodeGenFunction:: |
6499 | EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { |
6500 | Address BaseAddr = Address::invalid(); |
6501 | if (E->getOpcode() == BO_PtrMemI) { |
6502 | BaseAddr = EmitPointerWithAlignment(E: E->getLHS()); |
6503 | } else { |
6504 | BaseAddr = EmitLValue(E: E->getLHS()).getAddress(); |
6505 | } |
6506 | |
6507 | llvm::Value *OffsetV = EmitScalarExpr(E: E->getRHS()); |
6508 | const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); |
6509 | |
6510 | LValueBaseInfo BaseInfo; |
6511 | TBAAAccessInfo TBAAInfo; |
6512 | bool IsInBounds = !getLangOpts().PointerOverflowDefined && |
6513 | !isUnderlyingBasePointerConstantNull(E: E->getLHS()); |
6514 | Address MemberAddr = EmitCXXMemberDataPointerAddress( |
6515 | E, base: BaseAddr, memberPtr: OffsetV, memberPtrType: MPT, IsInBounds, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
6516 | |
6517 | return MakeAddrLValue(Addr: MemberAddr, T: MPT->getPointeeType(), BaseInfo, TBAAInfo); |
6518 | } |
6519 | |
6520 | /// Given the address of a temporary variable, produce an r-value of |
6521 | /// its type. |
6522 | RValue CodeGenFunction::convertTempToRValue(Address addr, |
6523 | QualType type, |
6524 | SourceLocation loc) { |
6525 | LValue lvalue = MakeAddrLValue(Addr: addr, T: type, Source: AlignmentSource::Decl); |
6526 | switch (getEvaluationKind(T: type)) { |
6527 | case TEK_Complex: |
6528 | return RValue::getComplex(C: EmitLoadOfComplex(src: lvalue, loc)); |
6529 | case TEK_Aggregate: |
6530 | return lvalue.asAggregateRValue(); |
6531 | case TEK_Scalar: |
6532 | return RValue::get(V: EmitLoadOfScalar(lvalue, Loc: loc)); |
6533 | } |
6534 | llvm_unreachable("bad evaluation kind" ); |
6535 | } |
6536 | |
6537 | void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { |
6538 | assert(Val->getType()->isFPOrFPVectorTy()); |
6539 | if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) |
6540 | return; |
6541 | |
6542 | llvm::MDBuilder MDHelper(getLLVMContext()); |
6543 | llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); |
6544 | |
6545 | cast<llvm::Instruction>(Val)->setMetadata(KindID: llvm::LLVMContext::MD_fpmath, Node); |
6546 | } |
6547 | |
6548 | void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { |
6549 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6550 | if (!EltTy->isFloatTy()) |
6551 | return; |
6552 | |
6553 | if ((getLangOpts().OpenCL && |
6554 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6555 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6556 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6557 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp |
6558 | // |
6559 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6560 | // build option allows an application to specify that single precision |
6561 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6562 | // source are correctly rounded. |
6563 | // |
6564 | // TODO: CUDA has a prec-sqrt flag |
6565 | SetFPAccuracy(Val, Accuracy: 3.0f); |
6566 | } |
6567 | } |
6568 | |
6569 | void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { |
6570 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6571 | if (!EltTy->isFloatTy()) |
6572 | return; |
6573 | |
6574 | if ((getLangOpts().OpenCL && |
6575 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6576 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6577 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6578 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp |
6579 | // |
6580 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6581 | // build option allows an application to specify that single precision |
6582 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6583 | // source are correctly rounded. |
6584 | // |
6585 | // TODO: CUDA has a prec-div flag |
6586 | SetFPAccuracy(Val, Accuracy: 2.5f); |
6587 | } |
6588 | } |
6589 | |
6590 | namespace { |
6591 | struct LValueOrRValue { |
6592 | LValue LV; |
6593 | RValue RV; |
6594 | }; |
6595 | } |
6596 | |
6597 | static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, |
6598 | const PseudoObjectExpr *E, |
6599 | bool forLValue, |
6600 | AggValueSlot slot) { |
6601 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
6602 | |
6603 | // Find the result expression, if any. |
6604 | const Expr *resultExpr = E->getResultExpr(); |
6605 | LValueOrRValue result; |
6606 | |
6607 | for (PseudoObjectExpr::const_semantics_iterator |
6608 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
6609 | const Expr *semantic = *i; |
6610 | |
6611 | // If this semantic expression is an opaque value, bind it |
6612 | // to the result of its source expression. |
6613 | if (const auto *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) { |
6614 | // Skip unique OVEs. |
6615 | if (ov->isUnique()) { |
6616 | assert(ov != resultExpr && |
6617 | "A unique OVE cannot be used as the result expression" ); |
6618 | continue; |
6619 | } |
6620 | |
6621 | // If this is the result expression, we may need to evaluate |
6622 | // directly into the slot. |
6623 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
6624 | OVMA opaqueData; |
6625 | if (ov == resultExpr && ov->isPRValue() && !forLValue && |
6626 | CodeGenFunction::hasAggregateEvaluationKind(T: ov->getType())) { |
6627 | CGF.EmitAggExpr(E: ov->getSourceExpr(), AS: slot); |
6628 | LValue LV = CGF.MakeAddrLValue(Addr: slot.getAddress(), T: ov->getType(), |
6629 | Source: AlignmentSource::Decl); |
6630 | opaqueData = OVMA::bind(CGF, ov, lv: LV); |
6631 | result.RV = slot.asRValue(); |
6632 | |
6633 | // Otherwise, emit as normal. |
6634 | } else { |
6635 | opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr()); |
6636 | |
6637 | // If this is the result, also evaluate the result now. |
6638 | if (ov == resultExpr) { |
6639 | if (forLValue) |
6640 | result.LV = CGF.EmitLValue(E: ov); |
6641 | else |
6642 | result.RV = CGF.EmitAnyExpr(E: ov, aggSlot: slot); |
6643 | } |
6644 | } |
6645 | |
6646 | opaques.push_back(Elt: opaqueData); |
6647 | |
6648 | // Otherwise, if the expression is the result, evaluate it |
6649 | // and remember the result. |
6650 | } else if (semantic == resultExpr) { |
6651 | if (forLValue) |
6652 | result.LV = CGF.EmitLValue(E: semantic); |
6653 | else |
6654 | result.RV = CGF.EmitAnyExpr(E: semantic, aggSlot: slot); |
6655 | |
6656 | // Otherwise, evaluate the expression in an ignored context. |
6657 | } else { |
6658 | CGF.EmitIgnoredExpr(E: semantic); |
6659 | } |
6660 | } |
6661 | |
6662 | // Unbind all the opaques now. |
6663 | for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques) |
6664 | opaque.unbind(CGF); |
6665 | |
6666 | return result; |
6667 | } |
6668 | |
6669 | RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, |
6670 | AggValueSlot slot) { |
6671 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: false, slot).RV; |
6672 | } |
6673 | |
6674 | LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { |
6675 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: true, slot: AggValueSlot::ignored()).LV; |
6676 | } |
6677 | |
6678 | void CodeGenFunction::FlattenAccessAndType( |
6679 | Address Addr, QualType AddrType, |
6680 | SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList, |
6681 | SmallVectorImpl<QualType> &FlatTypes) { |
6682 | // WorkList is list of type we are processing + the Index List to access |
6683 | // the field of that type in Addr for use in a GEP |
6684 | llvm::SmallVector<std::pair<QualType, llvm::SmallVector<llvm::Value *, 4>>, |
6685 | 16> |
6686 | WorkList; |
6687 | llvm::IntegerType *IdxTy = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 32); |
6688 | // Addr should be a pointer so we need to 'dereference' it |
6689 | WorkList.push_back(Elt: {AddrType, {llvm::ConstantInt::get(Ty: IdxTy, V: 0)}}); |
6690 | |
6691 | while (!WorkList.empty()) { |
6692 | auto [T, IdxList] = WorkList.pop_back_val(); |
6693 | T = T.getCanonicalType().getUnqualifiedType(); |
6694 | assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL" ); |
6695 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val&: T)) { |
6696 | uint64_t Size = CAT->getZExtSize(); |
6697 | for (int64_t I = Size - 1; I > -1; I--) { |
6698 | llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; |
6699 | IdxListCopy.push_back(Elt: llvm::ConstantInt::get(Ty: IdxTy, V: I)); |
6700 | WorkList.emplace_back(Args: CAT->getElementType(), Args&: IdxListCopy); |
6701 | } |
6702 | } else if (const auto *RT = dyn_cast<RecordType>(Val&: T)) { |
6703 | const RecordDecl *Record = RT->getDecl(); |
6704 | assert(!Record->isUnion() && "Union types not supported in flat cast." ); |
6705 | |
6706 | const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Val: Record); |
6707 | |
6708 | llvm::SmallVector<QualType, 16> FieldTypes; |
6709 | if (CXXD && CXXD->isStandardLayout()) |
6710 | Record = CXXD->getStandardLayoutBaseWithFields(); |
6711 | |
6712 | // deal with potential base classes |
6713 | if (CXXD && !CXXD->isStandardLayout()) { |
6714 | for (auto &Base : CXXD->bases()) |
6715 | FieldTypes.push_back(Elt: Base.getType()); |
6716 | } |
6717 | |
6718 | for (auto *FD : Record->fields()) |
6719 | FieldTypes.push_back(Elt: FD->getType()); |
6720 | |
6721 | for (int64_t I = FieldTypes.size() - 1; I > -1; I--) { |
6722 | llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; |
6723 | IdxListCopy.push_back(Elt: llvm::ConstantInt::get(Ty: IdxTy, V: I)); |
6724 | WorkList.insert(I: WorkList.end(), Elt: {FieldTypes[I], IdxListCopy}); |
6725 | } |
6726 | } else if (const auto *VT = dyn_cast<VectorType>(Val&: T)) { |
6727 | llvm::Type *LLVMT = ConvertTypeForMem(T); |
6728 | CharUnits Align = getContext().getTypeAlignInChars(T); |
6729 | Address GEP = |
6730 | Builder.CreateInBoundsGEP(Addr, IdxList, ElementType: LLVMT, Align, Name: "vector.gep" ); |
6731 | for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) { |
6732 | llvm::Value *Idx = llvm::ConstantInt::get(Ty: IdxTy, V: I); |
6733 | // gep on vector fields is not recommended so combine gep with |
6734 | // extract/insert |
6735 | AccessList.emplace_back(Args&: GEP, Args&: Idx); |
6736 | FlatTypes.push_back(Elt: VT->getElementType()); |
6737 | } |
6738 | } else { |
6739 | // a scalar/builtin type |
6740 | llvm::Type *LLVMT = ConvertTypeForMem(T); |
6741 | CharUnits Align = getContext().getTypeAlignInChars(T); |
6742 | Address GEP = |
6743 | Builder.CreateInBoundsGEP(Addr, IdxList, ElementType: LLVMT, Align, Name: "gep" ); |
6744 | AccessList.emplace_back(Args&: GEP, Args: nullptr); |
6745 | FlatTypes.push_back(Elt: T); |
6746 | } |
6747 | } |
6748 | } |
6749 | |