1 | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "ABIInfoImpl.h" |
14 | #include "CGCUDARuntime.h" |
15 | #include "CGCXXABI.h" |
16 | #include "CGCall.h" |
17 | #include "CGCleanup.h" |
18 | #include "CGDebugInfo.h" |
19 | #include "CGObjCRuntime.h" |
20 | #include "CGOpenMPRuntime.h" |
21 | #include "CGRecordLayout.h" |
22 | #include "CodeGenFunction.h" |
23 | #include "CodeGenModule.h" |
24 | #include "ConstantEmitter.h" |
25 | #include "TargetInfo.h" |
26 | #include "clang/AST/ASTContext.h" |
27 | #include "clang/AST/Attr.h" |
28 | #include "clang/AST/DeclObjC.h" |
29 | #include "clang/AST/NSAPI.h" |
30 | #include "clang/AST/StmtVisitor.h" |
31 | #include "clang/Basic/Builtins.h" |
32 | #include "clang/Basic/CodeGenOptions.h" |
33 | #include "clang/Basic/SourceManager.h" |
34 | #include "llvm/ADT/Hashing.h" |
35 | #include "llvm/ADT/STLExtras.h" |
36 | #include "llvm/ADT/StringExtras.h" |
37 | #include "llvm/IR/DataLayout.h" |
38 | #include "llvm/IR/Intrinsics.h" |
39 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
40 | #include "llvm/IR/LLVMContext.h" |
41 | #include "llvm/IR/MDBuilder.h" |
42 | #include "llvm/IR/MatrixBuilder.h" |
43 | #include "llvm/Passes/OptimizationLevel.h" |
44 | #include "llvm/Support/ConvertUTF.h" |
45 | #include "llvm/Support/MathExtras.h" |
46 | #include "llvm/Support/Path.h" |
47 | #include "llvm/Support/SaveAndRestore.h" |
48 | #include "llvm/Support/xxhash.h" |
49 | #include "llvm/Transforms/Utils/SanitizerStats.h" |
50 | |
51 | #include <optional> |
52 | #include <string> |
53 | |
54 | using namespace clang; |
55 | using namespace CodeGen; |
56 | |
57 | // Experiment to make sanitizers easier to debug |
58 | static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization( |
59 | "ubsan-unique-traps" , llvm::cl::Optional, |
60 | llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check." )); |
61 | |
62 | // TODO: Introduce frontend options to enabled per sanitizers, similar to |
63 | // `fsanitize-trap`. |
64 | static llvm::cl::opt<bool> ClSanitizeGuardChecks( |
65 | "ubsan-guard-checks" , llvm::cl::Optional, |
66 | llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`." )); |
67 | |
68 | //===--------------------------------------------------------------------===// |
69 | // Miscellaneous Helper Methods |
70 | //===--------------------------------------------------------------------===// |
71 | |
72 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
73 | /// block. |
74 | RawAddress |
75 | CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, |
76 | const Twine &Name, |
77 | llvm::Value *ArraySize) { |
78 | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
79 | Alloca->setAlignment(Align.getAsAlign()); |
80 | return RawAddress(Alloca, Ty, Align, KnownNonNull); |
81 | } |
82 | |
83 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
84 | /// block. The alloca is casted to default address space if necessary. |
85 | RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, |
86 | const Twine &Name, |
87 | llvm::Value *ArraySize, |
88 | RawAddress *AllocaAddr) { |
89 | auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
90 | if (AllocaAddr) |
91 | *AllocaAddr = Alloca; |
92 | llvm::Value *V = Alloca.getPointer(); |
93 | // Alloca always returns a pointer in alloca address space, which may |
94 | // be different from the type defined by the language. For example, |
95 | // in C++ the auto variables are in the default address space. Therefore |
96 | // cast alloca to the default address space when necessary. |
97 | if (getASTAllocaAddressSpace() != LangAS::Default) { |
98 | auto DestAddrSpace = getContext().getTargetAddressSpace(AS: LangAS::Default); |
99 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
100 | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
101 | // otherwise alloca is inserted at the current insertion point of the |
102 | // builder. |
103 | if (!ArraySize) |
104 | Builder.SetInsertPoint(getPostAllocaInsertPoint()); |
105 | V = getTargetHooks().performAddrSpaceCast( |
106 | CGF&: *this, V, SrcAddr: getASTAllocaAddressSpace(), DestAddr: LangAS::Default, |
107 | DestTy: Ty->getPointerTo(AddrSpace: DestAddrSpace), /*non-null*/ IsNonNull: true); |
108 | } |
109 | |
110 | return RawAddress(V, Ty, Align, KnownNonNull); |
111 | } |
112 | |
113 | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
114 | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
115 | /// insertion point of the builder. |
116 | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
117 | const Twine &Name, |
118 | llvm::Value *ArraySize) { |
119 | llvm::AllocaInst *Alloca; |
120 | if (ArraySize) |
121 | Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); |
122 | else |
123 | Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
124 | ArraySize, Name, &*AllocaInsertPt); |
125 | if (Allocas) { |
126 | Allocas->Add(I: Alloca); |
127 | } |
128 | return Alloca; |
129 | } |
130 | |
131 | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
132 | /// default alignment of the corresponding LLVM type, which is *not* |
133 | /// guaranteed to be related in any way to the expected alignment of |
134 | /// an AST type that might have been lowered to Ty. |
135 | RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
136 | const Twine &Name) { |
137 | CharUnits Align = |
138 | CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty)); |
139 | return CreateTempAlloca(Ty, Align, Name); |
140 | } |
141 | |
142 | RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
143 | CharUnits Align = getContext().getTypeAlignInChars(T: Ty); |
144 | return CreateTempAlloca(Ty: ConvertType(T: Ty), Align, Name); |
145 | } |
146 | |
147 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
148 | RawAddress *Alloca) { |
149 | // FIXME: Should we prefer the preferred type alignment here? |
150 | return CreateMemTemp(T: Ty, Align: getContext().getTypeAlignInChars(T: Ty), Name, Alloca); |
151 | } |
152 | |
153 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
154 | const Twine &Name, |
155 | RawAddress *Alloca) { |
156 | RawAddress Result = CreateTempAlloca(Ty: ConvertTypeForMem(T: Ty), Align, Name, |
157 | /*ArraySize=*/nullptr, AllocaAddr: Alloca); |
158 | |
159 | if (Ty->isConstantMatrixType()) { |
160 | auto *ArrayTy = cast<llvm::ArrayType>(Val: Result.getElementType()); |
161 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
162 | NumElts: ArrayTy->getNumElements()); |
163 | |
164 | Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), |
165 | KnownNonNull); |
166 | } |
167 | return Result; |
168 | } |
169 | |
170 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
171 | CharUnits Align, |
172 | const Twine &Name) { |
173 | return CreateTempAllocaWithoutCast(Ty: ConvertTypeForMem(T: Ty), Align, Name); |
174 | } |
175 | |
176 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
177 | const Twine &Name) { |
178 | return CreateMemTempWithoutCast(Ty, Align: getContext().getTypeAlignInChars(T: Ty), |
179 | Name); |
180 | } |
181 | |
182 | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
183 | /// expression and compare the result against zero, returning an Int1Ty value. |
184 | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
185 | PGO.setCurrentStmt(E); |
186 | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
187 | llvm::Value *MemPtr = EmitScalarExpr(E); |
188 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr, MPT); |
189 | } |
190 | |
191 | QualType BoolTy = getContext().BoolTy; |
192 | SourceLocation Loc = E->getExprLoc(); |
193 | CGFPOptionsRAII FPOptsRAII(*this, E); |
194 | if (!E->getType()->isAnyComplexType()) |
195 | return EmitScalarConversion(Src: EmitScalarExpr(E), SrcTy: E->getType(), DstTy: BoolTy, Loc); |
196 | |
197 | return EmitComplexToScalarConversion(Src: EmitComplexExpr(E), SrcTy: E->getType(), DstTy: BoolTy, |
198 | Loc); |
199 | } |
200 | |
201 | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
202 | /// ignoring the result. |
203 | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
204 | if (E->isPRValue()) |
205 | return (void)EmitAnyExpr(E, aggSlot: AggValueSlot::ignored(), ignoreResult: true); |
206 | |
207 | // if this is a bitfield-resulting conditional operator, we can special case |
208 | // emit this. The normal 'EmitLValue' version of this is particularly |
209 | // difficult to codegen for, since creating a single "LValue" for two |
210 | // different sized arguments here is not particularly doable. |
211 | if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( |
212 | Val: E->IgnoreParenNoopCasts(Ctx: getContext()))) { |
213 | if (CondOp->getObjectKind() == OK_BitField) |
214 | return EmitIgnoredConditionalOperator(E: CondOp); |
215 | } |
216 | |
217 | // Just emit it as an l-value and drop the result. |
218 | EmitLValue(E); |
219 | } |
220 | |
221 | /// EmitAnyExpr - Emit code to compute the specified expression which |
222 | /// can have any type. The result is returned as an RValue struct. |
223 | /// If this is an aggregate expression, AggSlot indicates where the |
224 | /// result should be returned. |
225 | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
226 | AggValueSlot aggSlot, |
227 | bool ignoreResult) { |
228 | switch (getEvaluationKind(T: E->getType())) { |
229 | case TEK_Scalar: |
230 | return RValue::get(V: EmitScalarExpr(E, IgnoreResultAssign: ignoreResult)); |
231 | case TEK_Complex: |
232 | return RValue::getComplex(C: EmitComplexExpr(E, IgnoreReal: ignoreResult, IgnoreImag: ignoreResult)); |
233 | case TEK_Aggregate: |
234 | if (!ignoreResult && aggSlot.isIgnored()) |
235 | aggSlot = CreateAggTemp(T: E->getType(), Name: "agg-temp" ); |
236 | EmitAggExpr(E, AS: aggSlot); |
237 | return aggSlot.asRValue(); |
238 | } |
239 | llvm_unreachable("bad evaluation kind" ); |
240 | } |
241 | |
242 | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
243 | /// always be accessible even if no aggregate location is provided. |
244 | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
245 | AggValueSlot AggSlot = AggValueSlot::ignored(); |
246 | |
247 | if (hasAggregateEvaluationKind(T: E->getType())) |
248 | AggSlot = CreateAggTemp(T: E->getType(), Name: "agg.tmp" ); |
249 | return EmitAnyExpr(E, aggSlot: AggSlot); |
250 | } |
251 | |
252 | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
253 | /// location. |
254 | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
255 | Address Location, |
256 | Qualifiers Quals, |
257 | bool IsInit) { |
258 | // FIXME: This function should take an LValue as an argument. |
259 | switch (getEvaluationKind(T: E->getType())) { |
260 | case TEK_Complex: |
261 | EmitComplexExprIntoLValue(E, dest: MakeAddrLValue(Addr: Location, T: E->getType()), |
262 | /*isInit*/ false); |
263 | return; |
264 | |
265 | case TEK_Aggregate: { |
266 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Location, quals: Quals, |
267 | isDestructed: AggValueSlot::IsDestructed_t(IsInit), |
268 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
269 | isAliased: AggValueSlot::IsAliased_t(!IsInit), |
270 | mayOverlap: AggValueSlot::MayOverlap)); |
271 | return; |
272 | } |
273 | |
274 | case TEK_Scalar: { |
275 | RValue RV = RValue::get(V: EmitScalarExpr(E, /*Ignore*/ IgnoreResultAssign: false)); |
276 | LValue LV = MakeAddrLValue(Addr: Location, T: E->getType()); |
277 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
278 | return; |
279 | } |
280 | } |
281 | llvm_unreachable("bad evaluation kind" ); |
282 | } |
283 | |
284 | static void |
285 | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
286 | const Expr *E, Address ReferenceTemporary) { |
287 | // Objective-C++ ARC: |
288 | // If we are binding a reference to a temporary that has ownership, we |
289 | // need to perform retain/release operations on the temporary. |
290 | // |
291 | // FIXME: This should be looking at E, not M. |
292 | if (auto Lifetime = M->getType().getObjCLifetime()) { |
293 | switch (Lifetime) { |
294 | case Qualifiers::OCL_None: |
295 | case Qualifiers::OCL_ExplicitNone: |
296 | // Carry on to normal cleanup handling. |
297 | break; |
298 | |
299 | case Qualifiers::OCL_Autoreleasing: |
300 | // Nothing to do; cleaned up by an autorelease pool. |
301 | return; |
302 | |
303 | case Qualifiers::OCL_Strong: |
304 | case Qualifiers::OCL_Weak: |
305 | switch (StorageDuration Duration = M->getStorageDuration()) { |
306 | case SD_Static: |
307 | // Note: we intentionally do not register a cleanup to release |
308 | // the object on program termination. |
309 | return; |
310 | |
311 | case SD_Thread: |
312 | // FIXME: We should probably register a cleanup in this case. |
313 | return; |
314 | |
315 | case SD_Automatic: |
316 | case SD_FullExpression: |
317 | CodeGenFunction::Destroyer *Destroy; |
318 | CleanupKind CleanupKind; |
319 | if (Lifetime == Qualifiers::OCL_Strong) { |
320 | const ValueDecl *VD = M->getExtendingDecl(); |
321 | bool Precise = isa_and_nonnull<VarDecl>(Val: VD) && |
322 | VD->hasAttr<ObjCPreciseLifetimeAttr>(); |
323 | CleanupKind = CGF.getARCCleanupKind(); |
324 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
325 | : &CodeGenFunction::destroyARCStrongImprecise; |
326 | } else { |
327 | // __weak objects always get EH cleanups; otherwise, exceptions |
328 | // could cause really nasty crashes instead of mere leaks. |
329 | CleanupKind = NormalAndEHCleanup; |
330 | Destroy = &CodeGenFunction::destroyARCWeak; |
331 | } |
332 | if (Duration == SD_FullExpression) |
333 | CGF.pushDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
334 | type: M->getType(), destroyer: *Destroy, |
335 | useEHCleanupForArray: CleanupKind & EHCleanup); |
336 | else |
337 | CGF.pushLifetimeExtendedDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
338 | type: M->getType(), |
339 | destroyer: *Destroy, useEHCleanupForArray: CleanupKind & EHCleanup); |
340 | return; |
341 | |
342 | case SD_Dynamic: |
343 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
344 | } |
345 | llvm_unreachable("unknown storage duration" ); |
346 | } |
347 | } |
348 | |
349 | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
350 | if (const RecordType *RT = |
351 | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
352 | // Get the destructor for the reference temporary. |
353 | auto *ClassDecl = cast<CXXRecordDecl>(Val: RT->getDecl()); |
354 | if (!ClassDecl->hasTrivialDestructor()) |
355 | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
356 | } |
357 | |
358 | if (!ReferenceTemporaryDtor) |
359 | return; |
360 | |
361 | // Call the destructor for the temporary. |
362 | switch (M->getStorageDuration()) { |
363 | case SD_Static: |
364 | case SD_Thread: { |
365 | llvm::FunctionCallee CleanupFn; |
366 | llvm::Constant *CleanupArg; |
367 | if (E->getType()->isArrayType()) { |
368 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
369 | addr: ReferenceTemporary, type: E->getType(), |
370 | destroyer: CodeGenFunction::destroyCXXObject, useEHCleanupForArray: CGF.getLangOpts().Exceptions, |
371 | VD: dyn_cast_or_null<VarDecl>(Val: M->getExtendingDecl())); |
372 | CleanupArg = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy); |
373 | } else { |
374 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
375 | GD: GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
376 | CleanupArg = cast<llvm::Constant>(Val: ReferenceTemporary.emitRawPointer(CGF)); |
377 | } |
378 | CGF.CGM.getCXXABI().registerGlobalDtor( |
379 | CGF, D: *cast<VarDecl>(Val: M->getExtendingDecl()), Dtor: CleanupFn, Addr: CleanupArg); |
380 | break; |
381 | } |
382 | |
383 | case SD_FullExpression: |
384 | CGF.pushDestroy(kind: NormalAndEHCleanup, addr: ReferenceTemporary, type: E->getType(), |
385 | destroyer: CodeGenFunction::destroyCXXObject, |
386 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
387 | break; |
388 | |
389 | case SD_Automatic: |
390 | CGF.pushLifetimeExtendedDestroy(kind: NormalAndEHCleanup, |
391 | addr: ReferenceTemporary, type: E->getType(), |
392 | destroyer: CodeGenFunction::destroyCXXObject, |
393 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
394 | break; |
395 | |
396 | case SD_Dynamic: |
397 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
398 | } |
399 | } |
400 | |
401 | static RawAddress createReferenceTemporary(CodeGenFunction &CGF, |
402 | const MaterializeTemporaryExpr *M, |
403 | const Expr *Inner, |
404 | RawAddress *Alloca = nullptr) { |
405 | auto &TCG = CGF.getTargetHooks(); |
406 | switch (M->getStorageDuration()) { |
407 | case SD_FullExpression: |
408 | case SD_Automatic: { |
409 | // If we have a constant temporary array or record try to promote it into a |
410 | // constant global under the same rules a normal constant would've been |
411 | // promoted. This is easier on the optimizer and generally emits fewer |
412 | // instructions. |
413 | QualType Ty = Inner->getType(); |
414 | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
415 | (Ty->isArrayType() || Ty->isRecordType()) && |
416 | Ty.isConstantStorage(Ctx: CGF.getContext(), ExcludeCtor: true, ExcludeDtor: false)) |
417 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(E: Inner, T: Ty)) { |
418 | auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); |
419 | auto *GV = new llvm::GlobalVariable( |
420 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
421 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp" , nullptr, |
422 | llvm::GlobalValue::NotThreadLocal, |
423 | CGF.getContext().getTargetAddressSpace(AS)); |
424 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(T: Ty); |
425 | GV->setAlignment(alignment.getAsAlign()); |
426 | llvm::Constant *C = GV; |
427 | if (AS != LangAS::Default) |
428 | C = TCG.performAddrSpaceCast( |
429 | CGM&: CGF.CGM, V: GV, SrcAddr: AS, DestAddr: LangAS::Default, |
430 | DestTy: GV->getValueType()->getPointerTo( |
431 | AddrSpace: CGF.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
432 | // FIXME: Should we put the new global into a COMDAT? |
433 | return RawAddress(C, GV->getValueType(), alignment); |
434 | } |
435 | return CGF.CreateMemTemp(Ty, Name: "ref.tmp" , Alloca); |
436 | } |
437 | case SD_Thread: |
438 | case SD_Static: |
439 | return CGF.CGM.GetAddrOfGlobalTemporary(E: M, Inner); |
440 | |
441 | case SD_Dynamic: |
442 | llvm_unreachable("temporary can't have dynamic storage duration" ); |
443 | } |
444 | llvm_unreachable("unknown storage duration" ); |
445 | } |
446 | |
447 | /// Helper method to check if the underlying ABI is AAPCS |
448 | static bool isAAPCS(const TargetInfo &TargetInfo) { |
449 | return TargetInfo.getABI().starts_with(Prefix: "aapcs" ); |
450 | } |
451 | |
452 | LValue CodeGenFunction:: |
453 | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
454 | const Expr *E = M->getSubExpr(); |
455 | |
456 | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
457 | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
458 | "Reference should never be pseudo-strong!" ); |
459 | |
460 | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
461 | // as that will cause the lifetime adjustment to be lost for ARC |
462 | auto ownership = M->getType().getObjCLifetime(); |
463 | if (ownership != Qualifiers::OCL_None && |
464 | ownership != Qualifiers::OCL_ExplicitNone) { |
465 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E); |
466 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: Object.getPointer())) { |
467 | llvm::Type *Ty = ConvertTypeForMem(T: E->getType()); |
468 | Object = Object.withElementType(ElemTy: Ty); |
469 | |
470 | // createReferenceTemporary will promote the temporary to a global with a |
471 | // constant initializer if it can. It can only do this to a value of |
472 | // ARC-manageable type if the value is global and therefore "immune" to |
473 | // ref-counting operations. Therefore we have no need to emit either a |
474 | // dynamic initialization or a cleanup and we can just return the address |
475 | // of the temporary. |
476 | if (Var->hasInitializer()) |
477 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
478 | |
479 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
480 | } |
481 | LValue RefTempDst = MakeAddrLValue(Addr: Object, T: M->getType(), |
482 | Source: AlignmentSource::Decl); |
483 | |
484 | switch (getEvaluationKind(T: E->getType())) { |
485 | default: llvm_unreachable("expected scalar or aggregate expression" ); |
486 | case TEK_Scalar: |
487 | EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: RefTempDst, capturedByInit: false); |
488 | break; |
489 | case TEK_Aggregate: { |
490 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Object, |
491 | quals: E->getType().getQualifiers(), |
492 | isDestructed: AggValueSlot::IsDestructed, |
493 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
494 | isAliased: AggValueSlot::IsNotAliased, |
495 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
496 | break; |
497 | } |
498 | } |
499 | |
500 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
501 | return RefTempDst; |
502 | } |
503 | |
504 | SmallVector<const Expr *, 2> CommaLHSs; |
505 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
506 | E = E->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments); |
507 | |
508 | for (const auto &Ignored : CommaLHSs) |
509 | EmitIgnoredExpr(E: Ignored); |
510 | |
511 | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(Val: E)) { |
512 | if (opaque->getType()->isRecordType()) { |
513 | assert(Adjustments.empty()); |
514 | return EmitOpaqueValueLValue(e: opaque); |
515 | } |
516 | } |
517 | |
518 | // Create and initialize the reference temporary. |
519 | RawAddress Alloca = Address::invalid(); |
520 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E, Alloca: &Alloca); |
521 | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
522 | Val: Object.getPointer()->stripPointerCasts())) { |
523 | llvm::Type *TemporaryType = ConvertTypeForMem(T: E->getType()); |
524 | Object = Object.withElementType(ElemTy: TemporaryType); |
525 | // If the temporary is a global and has a constant initializer or is a |
526 | // constant temporary that we promoted to a global, we may have already |
527 | // initialized it. |
528 | if (!Var->hasInitializer()) { |
529 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
530 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
531 | } |
532 | } else { |
533 | switch (M->getStorageDuration()) { |
534 | case SD_Automatic: |
535 | if (auto *Size = EmitLifetimeStart( |
536 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
537 | Addr: Alloca.getPointer())) { |
538 | pushCleanupAfterFullExpr<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker, |
539 | A: Alloca, A: Size); |
540 | } |
541 | break; |
542 | |
543 | case SD_FullExpression: { |
544 | if (!ShouldEmitLifetimeMarkers) |
545 | break; |
546 | |
547 | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
548 | // marker. Instead, start the lifetime of a conditional temporary earlier |
549 | // so that it's unconditional. Don't do this with sanitizers which need |
550 | // more precise lifetime marks. However when inside an "await.suspend" |
551 | // block, we should always avoid conditional cleanup because it creates |
552 | // boolean marker that lives across await_suspend, which can destroy coro |
553 | // frame. |
554 | ConditionalEvaluation *OldConditional = nullptr; |
555 | CGBuilderTy::InsertPoint OldIP; |
556 | if (isInConditionalBranch() && !E->getType().isDestructedType() && |
557 | ((!SanOpts.has(K: SanitizerKind::HWAddress) && |
558 | !SanOpts.has(K: SanitizerKind::Memory) && |
559 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || |
560 | inSuspendBlock())) { |
561 | OldConditional = OutermostConditional; |
562 | OutermostConditional = nullptr; |
563 | |
564 | OldIP = Builder.saveIP(); |
565 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
566 | Builder.restoreIP(IP: CGBuilderTy::InsertPoint( |
567 | Block, llvm::BasicBlock::iterator(Block->back()))); |
568 | } |
569 | |
570 | if (auto *Size = EmitLifetimeStart( |
571 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
572 | Addr: Alloca.getPointer())) { |
573 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: Alloca, |
574 | A: Size); |
575 | } |
576 | |
577 | if (OldConditional) { |
578 | OutermostConditional = OldConditional; |
579 | Builder.restoreIP(IP: OldIP); |
580 | } |
581 | break; |
582 | } |
583 | |
584 | default: |
585 | break; |
586 | } |
587 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
588 | } |
589 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
590 | |
591 | // Perform derived-to-base casts and/or field accesses, to get from the |
592 | // temporary object we created (and, potentially, for which we extended |
593 | // the lifetime) to the subobject we're binding the reference to. |
594 | for (SubobjectAdjustment &Adjustment : llvm::reverse(C&: Adjustments)) { |
595 | switch (Adjustment.Kind) { |
596 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
597 | Object = |
598 | GetAddressOfBaseClass(Value: Object, Derived: Adjustment.DerivedToBase.DerivedClass, |
599 | PathBegin: Adjustment.DerivedToBase.BasePath->path_begin(), |
600 | PathEnd: Adjustment.DerivedToBase.BasePath->path_end(), |
601 | /*NullCheckValue=*/ false, Loc: E->getExprLoc()); |
602 | break; |
603 | |
604 | case SubobjectAdjustment::FieldAdjustment: { |
605 | LValue LV = MakeAddrLValue(Addr: Object, T: E->getType(), Source: AlignmentSource::Decl); |
606 | LV = EmitLValueForField(Base: LV, Field: Adjustment.Field); |
607 | assert(LV.isSimple() && |
608 | "materialized temporary field is not a simple lvalue" ); |
609 | Object = LV.getAddress(); |
610 | break; |
611 | } |
612 | |
613 | case SubobjectAdjustment::MemberPointerAdjustment: { |
614 | llvm::Value *Ptr = EmitScalarExpr(E: Adjustment.Ptr.RHS); |
615 | Object = EmitCXXMemberDataPointerAddress(E, base: Object, memberPtr: Ptr, |
616 | memberPtrType: Adjustment.Ptr.MPT); |
617 | break; |
618 | } |
619 | } |
620 | } |
621 | |
622 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
623 | } |
624 | |
625 | RValue |
626 | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
627 | // Emit the expression as an lvalue. |
628 | LValue LV = EmitLValue(E); |
629 | assert(LV.isSimple()); |
630 | llvm::Value *Value = LV.getPointer(CGF&: *this); |
631 | |
632 | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { |
633 | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
634 | // If a glvalue to which a reference is directly bound designates neither |
635 | // an existing object or function of an appropriate type nor a region of |
636 | // storage of suitable size and alignment to contain an object of the |
637 | // reference's type, the behavior is undefined. |
638 | QualType Ty = E->getType(); |
639 | EmitTypeCheck(TCK: TCK_ReferenceBinding, Loc: E->getExprLoc(), V: Value, Type: Ty); |
640 | } |
641 | |
642 | return RValue::get(V: Value); |
643 | } |
644 | |
645 | |
646 | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
647 | /// input field number being accessed. |
648 | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
649 | const llvm::Constant *Elts) { |
650 | return cast<llvm::ConstantInt>(Val: Elts->getAggregateElement(Elt: Idx)) |
651 | ->getZExtValue(); |
652 | } |
653 | |
654 | static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, |
655 | llvm::Value *Ptr) { |
656 | llvm::Value *A0 = |
657 | Builder.CreateMul(LHS: Ptr, RHS: Builder.getInt64(C: 0xbf58476d1ce4e5b9u)); |
658 | llvm::Value *A1 = |
659 | Builder.CreateXor(LHS: A0, RHS: Builder.CreateLShr(LHS: A0, RHS: Builder.getInt64(C: 31))); |
660 | return Builder.CreateXor(LHS: Acc, RHS: A1); |
661 | } |
662 | |
663 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
664 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || |
665 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; |
666 | } |
667 | |
668 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
669 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
670 | return (RD && RD->hasDefinition() && RD->isDynamicClass()) && |
671 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || |
672 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || |
673 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); |
674 | } |
675 | |
676 | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
677 | return SanOpts.has(K: SanitizerKind::Null) || |
678 | SanOpts.has(K: SanitizerKind::Alignment) || |
679 | SanOpts.has(K: SanitizerKind::ObjectSize) || |
680 | SanOpts.has(K: SanitizerKind::Vptr); |
681 | } |
682 | |
683 | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
684 | llvm::Value *Ptr, QualType Ty, |
685 | CharUnits Alignment, |
686 | SanitizerSet SkippedChecks, |
687 | llvm::Value *ArraySize) { |
688 | if (!sanitizePerformTypeCheck()) |
689 | return; |
690 | |
691 | // Don't check pointers outside the default address space. The null check |
692 | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
693 | // communicate the addresses to the runtime handler for the vptr check. |
694 | if (Ptr->getType()->getPointerAddressSpace()) |
695 | return; |
696 | |
697 | // Don't check pointers to volatile data. The behavior here is implementation- |
698 | // defined. |
699 | if (Ty.isVolatileQualified()) |
700 | return; |
701 | |
702 | SanitizerScope SanScope(this); |
703 | |
704 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; |
705 | llvm::BasicBlock *Done = nullptr; |
706 | |
707 | // Quickly determine whether we have a pointer to an alloca. It's possible |
708 | // to skip null checks, and some alignment checks, for these pointers. This |
709 | // can reduce compile-time significantly. |
710 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Val: Ptr->stripPointerCasts()); |
711 | |
712 | llvm::Value *True = llvm::ConstantInt::getTrue(Context&: getLLVMContext()); |
713 | llvm::Value *IsNonNull = nullptr; |
714 | bool IsGuaranteedNonNull = |
715 | SkippedChecks.has(K: SanitizerKind::Null) || PtrToAlloca; |
716 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
717 | if ((SanOpts.has(K: SanitizerKind::Null) || AllowNullPointers) && |
718 | !IsGuaranteedNonNull) { |
719 | // The glvalue must not be an empty glvalue. |
720 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
721 | |
722 | // The IR builder can constant-fold the null check if the pointer points to |
723 | // a constant. |
724 | IsGuaranteedNonNull = IsNonNull == True; |
725 | |
726 | // Skip the null check if the pointer is known to be non-null. |
727 | if (!IsGuaranteedNonNull) { |
728 | if (AllowNullPointers) { |
729 | // When performing pointer casts, it's OK if the value is null. |
730 | // Skip the remaining checks in that case. |
731 | Done = createBasicBlock(name: "null" ); |
732 | llvm::BasicBlock *Rest = createBasicBlock(name: "not.null" ); |
733 | Builder.CreateCondBr(Cond: IsNonNull, True: Rest, False: Done); |
734 | EmitBlock(BB: Rest); |
735 | } else { |
736 | Checks.push_back(Elt: std::make_pair(x&: IsNonNull, y: SanitizerKind::Null)); |
737 | } |
738 | } |
739 | } |
740 | |
741 | if (SanOpts.has(K: SanitizerKind::ObjectSize) && |
742 | !SkippedChecks.has(K: SanitizerKind::ObjectSize) && |
743 | !Ty->isIncompleteType()) { |
744 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
745 | llvm::Value *Size = llvm::ConstantInt::get(Ty: IntPtrTy, V: TySize); |
746 | if (ArraySize) |
747 | Size = Builder.CreateMul(LHS: Size, RHS: ArraySize); |
748 | |
749 | // Degenerate case: new X[0] does not need an objectsize check. |
750 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Val: Size); |
751 | if (!ConstantSize || !ConstantSize->isNullValue()) { |
752 | // The glvalue must refer to a large enough storage region. |
753 | // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation |
754 | // to check this. |
755 | // FIXME: Get object address space |
756 | llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; |
757 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::objectsize, Tys); |
758 | llvm::Value *Min = Builder.getFalse(); |
759 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
760 | llvm::Value *Dynamic = Builder.getFalse(); |
761 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
762 | LHS: Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}), RHS: Size); |
763 | Checks.push_back(Elt: std::make_pair(x&: LargeEnough, y: SanitizerKind::ObjectSize)); |
764 | } |
765 | } |
766 | |
767 | llvm::MaybeAlign AlignVal; |
768 | llvm::Value *PtrAsInt = nullptr; |
769 | |
770 | if (SanOpts.has(K: SanitizerKind::Alignment) && |
771 | !SkippedChecks.has(K: SanitizerKind::Alignment)) { |
772 | AlignVal = Alignment.getAsMaybeAlign(); |
773 | if (!Ty->isIncompleteType() && !AlignVal) |
774 | AlignVal = CGM.getNaturalTypeAlignment(T: Ty, BaseInfo: nullptr, TBAAInfo: nullptr, |
775 | /*ForPointeeType=*/forPointeeType: true) |
776 | .getAsMaybeAlign(); |
777 | |
778 | // The glvalue must be suitably aligned. |
779 | if (AlignVal && *AlignVal > llvm::Align(1) && |
780 | (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { |
781 | PtrAsInt = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy); |
782 | llvm::Value *Align = Builder.CreateAnd( |
783 | LHS: PtrAsInt, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: AlignVal->value() - 1)); |
784 | llvm::Value *Aligned = |
785 | Builder.CreateICmpEQ(LHS: Align, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 0)); |
786 | if (Aligned != True) |
787 | Checks.push_back(Elt: std::make_pair(x&: Aligned, y: SanitizerKind::Alignment)); |
788 | } |
789 | } |
790 | |
791 | if (Checks.size() > 0) { |
792 | llvm::Constant *StaticData[] = { |
793 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: Ty), |
794 | llvm::ConstantInt::get(Ty: Int8Ty, V: AlignVal ? llvm::Log2(A: *AlignVal) : 1), |
795 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)}; |
796 | EmitCheck(Checked: Checks, Check: SanitizerHandler::TypeMismatch, StaticArgs: StaticData, |
797 | DynamicArgs: PtrAsInt ? PtrAsInt : Ptr); |
798 | } |
799 | |
800 | // If possible, check that the vptr indicates that there is a subobject of |
801 | // type Ty at offset zero within this object. |
802 | // |
803 | // C++11 [basic.life]p5,6: |
804 | // [For storage which does not refer to an object within its lifetime] |
805 | // The program has undefined behavior if: |
806 | // -- the [pointer or glvalue] is used to access a non-static data member |
807 | // or call a non-static member function |
808 | if (SanOpts.has(K: SanitizerKind::Vptr) && |
809 | !SkippedChecks.has(K: SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
810 | // Ensure that the pointer is non-null before loading it. If there is no |
811 | // compile-time guarantee, reuse the run-time null check or emit a new one. |
812 | if (!IsGuaranteedNonNull) { |
813 | if (!IsNonNull) |
814 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
815 | if (!Done) |
816 | Done = createBasicBlock(name: "vptr.null" ); |
817 | llvm::BasicBlock *VptrNotNull = createBasicBlock(name: "vptr.not.null" ); |
818 | Builder.CreateCondBr(Cond: IsNonNull, True: VptrNotNull, False: Done); |
819 | EmitBlock(BB: VptrNotNull); |
820 | } |
821 | |
822 | // Compute a deterministic hash of the mangled name of the type. |
823 | SmallString<64> MangledName; |
824 | llvm::raw_svector_ostream Out(MangledName); |
825 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty.getUnqualifiedType(), |
826 | Out); |
827 | |
828 | // Contained in NoSanitizeList based on the mangled type. |
829 | if (!CGM.getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::Vptr, |
830 | MangledTypeName: Out.str())) { |
831 | // Load the vptr, and mix it with TypeHash. |
832 | llvm::Value *TypeHash = |
833 | llvm::ConstantInt::get(Ty: Int64Ty, V: xxh3_64bits(data: Out.str())); |
834 | |
835 | llvm::Type *VPtrTy = llvm::PointerType::get(ElementType: IntPtrTy, AddressSpace: 0); |
836 | Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); |
837 | llvm::Value *VPtrVal = GetVTablePtr(This: VPtrAddr, VTableTy: VPtrTy, |
838 | VTableClass: Ty->getAsCXXRecordDecl(), |
839 | AuthMode: VTableAuthMode::UnsafeUbsanStrip); |
840 | VPtrVal = Builder.CreateBitOrPointerCast(V: VPtrVal, DestTy: IntPtrTy); |
841 | |
842 | llvm::Value *Hash = |
843 | emitHashMix(Builder, Acc: TypeHash, Ptr: Builder.CreateZExt(V: VPtrVal, DestTy: Int64Ty)); |
844 | Hash = Builder.CreateTrunc(V: Hash, DestTy: IntPtrTy); |
845 | |
846 | // Look the hash up in our cache. |
847 | const int CacheSize = 128; |
848 | llvm::Type *HashTable = llvm::ArrayType::get(ElementType: IntPtrTy, NumElements: CacheSize); |
849 | llvm::Value *Cache = CGM.CreateRuntimeVariable(Ty: HashTable, |
850 | Name: "__ubsan_vptr_type_cache" ); |
851 | llvm::Value *Slot = Builder.CreateAnd(LHS: Hash, |
852 | RHS: llvm::ConstantInt::get(Ty: IntPtrTy, |
853 | V: CacheSize-1)); |
854 | llvm::Value *Indices[] = { Builder.getInt32(C: 0), Slot }; |
855 | llvm::Value *CacheVal = Builder.CreateAlignedLoad( |
856 | Ty: IntPtrTy, Addr: Builder.CreateInBoundsGEP(Ty: HashTable, Ptr: Cache, IdxList: Indices), |
857 | Align: getPointerAlign()); |
858 | |
859 | // If the hash isn't in the cache, call a runtime handler to perform the |
860 | // hard work of checking whether the vptr is for an object of the right |
861 | // type. This will either fill in the cache and return, or produce a |
862 | // diagnostic. |
863 | llvm::Value *EqualHash = Builder.CreateICmpEQ(LHS: CacheVal, RHS: Hash); |
864 | llvm::Constant *StaticData[] = { |
865 | EmitCheckSourceLocation(Loc), |
866 | EmitCheckTypeDescriptor(T: Ty), |
867 | CGM.GetAddrOfRTTIDescriptor(Ty: Ty.getUnqualifiedType()), |
868 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK) |
869 | }; |
870 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
871 | EmitCheck(Checked: std::make_pair(x&: EqualHash, y: SanitizerKind::Vptr), |
872 | Check: SanitizerHandler::DynamicTypeCacheMiss, StaticArgs: StaticData, |
873 | DynamicArgs: DynamicData); |
874 | } |
875 | } |
876 | |
877 | if (Done) { |
878 | Builder.CreateBr(Dest: Done); |
879 | EmitBlock(BB: Done); |
880 | } |
881 | } |
882 | |
883 | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
884 | QualType EltTy) { |
885 | ASTContext &C = getContext(); |
886 | uint64_t EltSize = C.getTypeSizeInChars(T: EltTy).getQuantity(); |
887 | if (!EltSize) |
888 | return nullptr; |
889 | |
890 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()); |
891 | if (!ArrayDeclRef) |
892 | return nullptr; |
893 | |
894 | auto *ParamDecl = dyn_cast<ParmVarDecl>(Val: ArrayDeclRef->getDecl()); |
895 | if (!ParamDecl) |
896 | return nullptr; |
897 | |
898 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
899 | if (!POSAttr) |
900 | return nullptr; |
901 | |
902 | // Don't load the size if it's a lower bound. |
903 | int POSType = POSAttr->getType(); |
904 | if (POSType != 0 && POSType != 1) |
905 | return nullptr; |
906 | |
907 | // Find the implicit size parameter. |
908 | auto PassedSizeIt = SizeArguments.find(Val: ParamDecl); |
909 | if (PassedSizeIt == SizeArguments.end()) |
910 | return nullptr; |
911 | |
912 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
913 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable" ); |
914 | Address AddrOfSize = LocalDeclMap.find(Val: PassedSizeDecl)->second; |
915 | llvm::Value *SizeInBytes = EmitLoadOfScalar(Addr: AddrOfSize, /*Volatile=*/false, |
916 | Ty: C.getSizeType(), Loc: E->getExprLoc()); |
917 | llvm::Value *SizeOfElement = |
918 | llvm::ConstantInt::get(Ty: SizeInBytes->getType(), V: EltSize); |
919 | return Builder.CreateUDiv(LHS: SizeInBytes, RHS: SizeOfElement); |
920 | } |
921 | |
922 | /// If Base is known to point to the start of an array, return the length of |
923 | /// that array. Return 0 if the length cannot be determined. |
924 | static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, |
925 | const Expr *Base, |
926 | QualType &IndexedType, |
927 | LangOptions::StrictFlexArraysLevelKind |
928 | StrictFlexArraysLevel) { |
929 | // For the vector indexing extension, the bound is the number of elements. |
930 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
931 | IndexedType = Base->getType(); |
932 | return CGF.Builder.getInt32(C: VT->getNumElements()); |
933 | } |
934 | |
935 | Base = Base->IgnoreParens(); |
936 | |
937 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
938 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
939 | !CE->getSubExpr()->isFlexibleArrayMemberLike(Context&: CGF.getContext(), |
940 | StrictFlexArraysLevel)) { |
941 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
942 | |
943 | IndexedType = CE->getSubExpr()->getType(); |
944 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
945 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
946 | return CGF.Builder.getInt(AI: CAT->getSize()); |
947 | |
948 | if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) |
949 | return CGF.getVLASize(vla: VAT).NumElts; |
950 | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
951 | } |
952 | } |
953 | |
954 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
955 | |
956 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
957 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(E: Base, EltTy)) { |
958 | IndexedType = Base->getType(); |
959 | return POS; |
960 | } |
961 | |
962 | return nullptr; |
963 | } |
964 | |
965 | namespace { |
966 | |
967 | /// \p StructAccessBase returns the base \p Expr of a field access. It returns |
968 | /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: |
969 | /// |
970 | /// p in p-> a.b.c |
971 | /// |
972 | /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're |
973 | /// looking for: |
974 | /// |
975 | /// struct s { |
976 | /// struct s *ptr; |
977 | /// int count; |
978 | /// char array[] __attribute__((counted_by(count))); |
979 | /// }; |
980 | /// |
981 | /// If we have an expression like \p p->ptr->array[index], we want the |
982 | /// \p MemberExpr for \p p->ptr instead of \p p. |
983 | class StructAccessBase |
984 | : public ConstStmtVisitor<StructAccessBase, const Expr *> { |
985 | const RecordDecl *ExpectedRD; |
986 | |
987 | bool IsExpectedRecordDecl(const Expr *E) const { |
988 | QualType Ty = E->getType(); |
989 | if (Ty->isPointerType()) |
990 | Ty = Ty->getPointeeType(); |
991 | return ExpectedRD == Ty->getAsRecordDecl(); |
992 | } |
993 | |
994 | public: |
995 | StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} |
996 | |
997 | //===--------------------------------------------------------------------===// |
998 | // Visitor Methods |
999 | //===--------------------------------------------------------------------===// |
1000 | |
1001 | // NOTE: If we build C++ support for counted_by, then we'll have to handle |
1002 | // horrors like this: |
1003 | // |
1004 | // struct S { |
1005 | // int x, y; |
1006 | // int blah[] __attribute__((counted_by(x))); |
1007 | // } s; |
1008 | // |
1009 | // int foo(int index, int val) { |
1010 | // int (S::*IHatePMDs)[] = &S::blah; |
1011 | // (s.*IHatePMDs)[index] = val; |
1012 | // } |
1013 | |
1014 | const Expr *Visit(const Expr *E) { |
1015 | return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(S: E); |
1016 | } |
1017 | |
1018 | const Expr *VisitStmt(const Stmt *S) { return nullptr; } |
1019 | |
1020 | // These are the types we expect to return (in order of most to least |
1021 | // likely): |
1022 | // |
1023 | // 1. DeclRefExpr - This is the expression for the base of the structure. |
1024 | // It's exactly what we want to build an access to the \p counted_by |
1025 | // field. |
1026 | // 2. MemberExpr - This is the expression that has the same \p RecordDecl |
1027 | // as the flexble array member's lexical enclosing \p RecordDecl. This |
1028 | // allows us to catch things like: "p->p->array" |
1029 | // 3. CompoundLiteralExpr - This is for people who create something |
1030 | // heretical like (struct foo has a flexible array member): |
1031 | // |
1032 | // (struct foo){ 1, 2 }.blah[idx]; |
1033 | const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { |
1034 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1035 | } |
1036 | const Expr *VisitMemberExpr(const MemberExpr *E) { |
1037 | if (IsExpectedRecordDecl(E) && E->isArrow()) |
1038 | return E; |
1039 | const Expr *Res = Visit(E: E->getBase()); |
1040 | return !Res && IsExpectedRecordDecl(E) ? E : Res; |
1041 | } |
1042 | const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
1043 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1044 | } |
1045 | const Expr *VisitCallExpr(const CallExpr *E) { |
1046 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1047 | } |
1048 | |
1049 | const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
1050 | if (IsExpectedRecordDecl(E)) |
1051 | return E; |
1052 | return Visit(E: E->getBase()); |
1053 | } |
1054 | const Expr *VisitCastExpr(const CastExpr *E) { |
1055 | if (E->getCastKind() == CK_LValueToRValue) |
1056 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1057 | return Visit(E: E->getSubExpr()); |
1058 | } |
1059 | const Expr *VisitParenExpr(const ParenExpr *E) { |
1060 | return Visit(E: E->getSubExpr()); |
1061 | } |
1062 | const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { |
1063 | return Visit(E: E->getSubExpr()); |
1064 | } |
1065 | const Expr *VisitUnaryDeref(const UnaryOperator *E) { |
1066 | return Visit(E: E->getSubExpr()); |
1067 | } |
1068 | }; |
1069 | |
1070 | } // end anonymous namespace |
1071 | |
1072 | using RecIndicesTy = |
1073 | SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>; |
1074 | |
1075 | static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, |
1076 | const FieldDecl *Field, |
1077 | RecIndicesTy &Indices) { |
1078 | const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); |
1079 | int64_t FieldNo = -1; |
1080 | for (const FieldDecl *FD : RD->fields()) { |
1081 | if (!Layout.containsFieldDecl(FD)) |
1082 | // This could happen if the field has a struct type that's empty. I don't |
1083 | // know why either. |
1084 | continue; |
1085 | |
1086 | FieldNo = Layout.getLLVMFieldNo(FD); |
1087 | if (FD == Field) { |
1088 | Indices.emplace_back(Args: std::make_pair(x&: RD, y: CGF.Builder.getInt32(C: FieldNo))); |
1089 | return true; |
1090 | } |
1091 | |
1092 | QualType Ty = FD->getType(); |
1093 | if (Ty->isRecordType()) { |
1094 | if (getGEPIndicesToField(CGF, RD: Ty->getAsRecordDecl(), Field, Indices)) { |
1095 | if (RD->isUnion()) |
1096 | FieldNo = 0; |
1097 | Indices.emplace_back(Args: std::make_pair(x&: RD, y: CGF.Builder.getInt32(C: FieldNo))); |
1098 | return true; |
1099 | } |
1100 | } |
1101 | } |
1102 | |
1103 | return false; |
1104 | } |
1105 | |
1106 | /// This method is typically called in contexts where we can't generate |
1107 | /// side-effects, like in __builtin_dynamic_object_size. When finding |
1108 | /// expressions, only choose those that have either already been emitted or can |
1109 | /// be loaded without side-effects. |
1110 | /// |
1111 | /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be |
1112 | /// within the top-level struct. |
1113 | /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. |
1114 | llvm::Value *CodeGenFunction::EmitCountedByFieldExpr( |
1115 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1116 | const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); |
1117 | |
1118 | // Find the base struct expr (i.e. p in p->a.b.c.d). |
1119 | const Expr *StructBase = StructAccessBase(RD).Visit(E: Base); |
1120 | if (!StructBase || StructBase->HasSideEffects(Ctx: getContext())) |
1121 | return nullptr; |
1122 | |
1123 | llvm::Value *Res = nullptr; |
1124 | if (StructBase->getType()->isPointerType()) { |
1125 | LValueBaseInfo BaseInfo; |
1126 | TBAAAccessInfo TBAAInfo; |
1127 | Address Addr = EmitPointerWithAlignment(Addr: StructBase, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
1128 | Res = Addr.emitRawPointer(CGF&: *this); |
1129 | } else if (StructBase->isLValue()) { |
1130 | LValue LV = EmitLValue(E: StructBase); |
1131 | Address Addr = LV.getAddress(); |
1132 | Res = Addr.emitRawPointer(CGF&: *this); |
1133 | } else { |
1134 | return nullptr; |
1135 | } |
1136 | |
1137 | llvm::Value *Zero = Builder.getInt32(C: 0); |
1138 | RecIndicesTy Indices; |
1139 | |
1140 | getGEPIndicesToField(CGF&: *this, RD, Field: CountDecl, Indices); |
1141 | |
1142 | for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I) |
1143 | Res = Builder.CreateInBoundsGEP( |
1144 | Ty: ConvertType(T: QualType(I->first->getTypeForDecl(), 0)), Ptr: Res, |
1145 | IdxList: {Zero, I->second}, Name: "..counted_by.gep" ); |
1146 | |
1147 | return Builder.CreateAlignedLoad(Ty: ConvertType(T: CountDecl->getType()), Addr: Res, |
1148 | Align: getIntAlign(), Name: "..counted_by.load" ); |
1149 | } |
1150 | |
1151 | const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) { |
1152 | if (!FD) |
1153 | return nullptr; |
1154 | |
1155 | const auto *CAT = FD->getType()->getAs<CountAttributedType>(); |
1156 | if (!CAT) |
1157 | return nullptr; |
1158 | |
1159 | const auto *CountDRE = cast<DeclRefExpr>(Val: CAT->getCountExpr()); |
1160 | const auto *CountDecl = CountDRE->getDecl(); |
1161 | if (const auto *IFD = dyn_cast<IndirectFieldDecl>(Val: CountDecl)) |
1162 | CountDecl = IFD->getAnonField(); |
1163 | |
1164 | return dyn_cast<FieldDecl>(Val: CountDecl); |
1165 | } |
1166 | |
1167 | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
1168 | llvm::Value *Index, QualType IndexType, |
1169 | bool Accessed) { |
1170 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
1171 | "should not be called unless adding bounds checks" ); |
1172 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
1173 | getLangOpts().getStrictFlexArraysLevel(); |
1174 | QualType IndexedType; |
1175 | llvm::Value *Bound = |
1176 | getArrayIndexingBound(CGF&: *this, Base, IndexedType, StrictFlexArraysLevel); |
1177 | |
1178 | EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); |
1179 | } |
1180 | |
1181 | void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, |
1182 | llvm::Value *Index, |
1183 | QualType IndexType, |
1184 | QualType IndexedType, bool Accessed) { |
1185 | if (!Bound) |
1186 | return; |
1187 | |
1188 | SanitizerScope SanScope(this); |
1189 | |
1190 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1191 | llvm::Value *IndexVal = Builder.CreateIntCast(V: Index, DestTy: SizeTy, isSigned: IndexSigned); |
1192 | llvm::Value *BoundVal = Builder.CreateIntCast(V: Bound, DestTy: SizeTy, isSigned: false); |
1193 | |
1194 | llvm::Constant *StaticData[] = { |
1195 | EmitCheckSourceLocation(Loc: E->getExprLoc()), |
1196 | EmitCheckTypeDescriptor(T: IndexedType), |
1197 | EmitCheckTypeDescriptor(T: IndexType) |
1198 | }; |
1199 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(LHS: IndexVal, RHS: BoundVal) |
1200 | : Builder.CreateICmpULE(LHS: IndexVal, RHS: BoundVal); |
1201 | EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::ArrayBounds), |
1202 | Check: SanitizerHandler::OutOfBounds, StaticArgs: StaticData, DynamicArgs: Index); |
1203 | } |
1204 | |
1205 | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1206 | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1207 | bool isInc, bool isPre) { |
1208 | ComplexPairTy InVal = EmitLoadOfComplex(src: LV, loc: E->getExprLoc()); |
1209 | |
1210 | llvm::Value *NextVal; |
1211 | if (isa<llvm::IntegerType>(Val: InVal.first->getType())) { |
1212 | uint64_t AmountVal = isInc ? 1 : -1; |
1213 | NextVal = llvm::ConstantInt::get(Ty: InVal.first->getType(), V: AmountVal, IsSigned: true); |
1214 | |
1215 | // Add the inc/dec to the real part. |
1216 | NextVal = Builder.CreateAdd(LHS: InVal.first, RHS: NextVal, Name: isInc ? "inc" : "dec" ); |
1217 | } else { |
1218 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1219 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(T: ElemTy), 1); |
1220 | if (!isInc) |
1221 | FVal.changeSign(); |
1222 | NextVal = llvm::ConstantFP::get(Context&: getLLVMContext(), V: FVal); |
1223 | |
1224 | // Add the inc/dec to the real part. |
1225 | NextVal = Builder.CreateFAdd(L: InVal.first, R: NextVal, Name: isInc ? "inc" : "dec" ); |
1226 | } |
1227 | |
1228 | ComplexPairTy IncVal(NextVal, InVal.second); |
1229 | |
1230 | // Store the updated result through the lvalue. |
1231 | EmitStoreOfComplex(V: IncVal, dest: LV, /*init*/ isInit: false); |
1232 | if (getLangOpts().OpenMP) |
1233 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
1234 | LHS: E->getSubExpr()); |
1235 | |
1236 | // If this is a postinc, return the value read from memory, otherwise use the |
1237 | // updated value. |
1238 | return isPre ? IncVal : InVal; |
1239 | } |
1240 | |
1241 | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1242 | CodeGenFunction *CGF) { |
1243 | // Bind VLAs in the cast type. |
1244 | if (CGF && E->getType()->isVariablyModifiedType()) |
1245 | CGF->EmitVariablyModifiedType(Ty: E->getType()); |
1246 | |
1247 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1248 | DI->EmitExplicitCastType(Ty: E->getType()); |
1249 | } |
1250 | |
1251 | //===----------------------------------------------------------------------===// |
1252 | // LValue Expression Emission |
1253 | //===----------------------------------------------------------------------===// |
1254 | |
1255 | static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, |
1256 | TBAAAccessInfo *TBAAInfo, |
1257 | KnownNonNull_t IsKnownNonNull, |
1258 | CodeGenFunction &CGF) { |
1259 | // We allow this with ObjC object pointers because of fragile ABIs. |
1260 | assert(E->getType()->isPointerType() || |
1261 | E->getType()->isObjCObjectPointerType()); |
1262 | E = E->IgnoreParens(); |
1263 | |
1264 | // Casts: |
1265 | if (const CastExpr *CE = dyn_cast<CastExpr>(Val: E)) { |
1266 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: CE)) |
1267 | CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF); |
1268 | |
1269 | switch (CE->getCastKind()) { |
1270 | // Non-converting casts (but not C's implicit conversion from void*). |
1271 | case CK_BitCast: |
1272 | case CK_NoOp: |
1273 | case CK_AddressSpaceConversion: |
1274 | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1275 | if (PtrTy->getPointeeType()->isVoidType()) |
1276 | break; |
1277 | |
1278 | LValueBaseInfo InnerBaseInfo; |
1279 | TBAAAccessInfo InnerTBAAInfo; |
1280 | Address Addr = CGF.EmitPointerWithAlignment( |
1281 | Addr: CE->getSubExpr(), BaseInfo: &InnerBaseInfo, TBAAInfo: &InnerTBAAInfo, IsKnownNonNull); |
1282 | if (BaseInfo) *BaseInfo = InnerBaseInfo; |
1283 | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; |
1284 | |
1285 | if (isa<ExplicitCastExpr>(Val: CE)) { |
1286 | LValueBaseInfo TargetTypeBaseInfo; |
1287 | TBAAAccessInfo TargetTypeTBAAInfo; |
1288 | CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( |
1289 | T: E->getType(), BaseInfo: &TargetTypeBaseInfo, TBAAInfo: &TargetTypeTBAAInfo); |
1290 | if (TBAAInfo) |
1291 | *TBAAInfo = |
1292 | CGF.CGM.mergeTBAAInfoForCast(SourceInfo: *TBAAInfo, TargetInfo: TargetTypeTBAAInfo); |
1293 | // If the source l-value is opaque, honor the alignment of the |
1294 | // casted-to type. |
1295 | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1296 | if (BaseInfo) |
1297 | BaseInfo->mergeForCast(Info: TargetTypeBaseInfo); |
1298 | Addr.setAlignment(Align); |
1299 | } |
1300 | } |
1301 | |
1302 | if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast) && |
1303 | CE->getCastKind() == CK_BitCast) { |
1304 | if (auto PT = E->getType()->getAs<PointerType>()) |
1305 | CGF.EmitVTablePtrCheckForCast(T: PT->getPointeeType(), Derived: Addr, |
1306 | /*MayBeNull=*/true, |
1307 | TCK: CodeGenFunction::CFITCK_UnrelatedCast, |
1308 | Loc: CE->getBeginLoc()); |
1309 | } |
1310 | |
1311 | llvm::Type *ElemTy = |
1312 | CGF.ConvertTypeForMem(T: E->getType()->getPointeeType()); |
1313 | Addr = Addr.withElementType(ElemTy); |
1314 | if (CE->getCastKind() == CK_AddressSpaceConversion) |
1315 | Addr = CGF.Builder.CreateAddrSpaceCast( |
1316 | Addr, Ty: CGF.ConvertType(T: E->getType()), ElementTy: ElemTy); |
1317 | return CGF.authPointerToPointerCast(Ptr: Addr, SourceType: CE->getSubExpr()->getType(), |
1318 | DestType: CE->getType()); |
1319 | } |
1320 | break; |
1321 | |
1322 | // Array-to-pointer decay. |
1323 | case CK_ArrayToPointerDecay: |
1324 | return CGF.EmitArrayToPointerDecay(Array: CE->getSubExpr(), BaseInfo, TBAAInfo); |
1325 | |
1326 | // Derived-to-base conversions. |
1327 | case CK_UncheckedDerivedToBase: |
1328 | case CK_DerivedToBase: { |
1329 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1330 | // conservatively pretend that the complete object is of the base class |
1331 | // type. |
1332 | if (TBAAInfo) |
1333 | *TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: E->getType()); |
1334 | Address Addr = CGF.EmitPointerWithAlignment( |
1335 | Addr: CE->getSubExpr(), BaseInfo, TBAAInfo: nullptr, |
1336 | IsKnownNonNull: (KnownNonNull_t)(IsKnownNonNull || |
1337 | CE->getCastKind() == CK_UncheckedDerivedToBase)); |
1338 | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1339 | return CGF.GetAddressOfBaseClass( |
1340 | Value: Addr, Derived, PathBegin: CE->path_begin(), PathEnd: CE->path_end(), |
1341 | NullCheckValue: CGF.ShouldNullCheckClassCastValue(Cast: CE), Loc: CE->getExprLoc()); |
1342 | } |
1343 | |
1344 | // TODO: Is there any reason to treat base-to-derived conversions |
1345 | // specially? |
1346 | default: |
1347 | break; |
1348 | } |
1349 | } |
1350 | |
1351 | // Unary &. |
1352 | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) { |
1353 | if (UO->getOpcode() == UO_AddrOf) { |
1354 | LValue LV = CGF.EmitLValue(E: UO->getSubExpr(), IsKnownNonNull); |
1355 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1356 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1357 | return LV.getAddress(); |
1358 | } |
1359 | } |
1360 | |
1361 | // std::addressof and variants. |
1362 | if (auto *Call = dyn_cast<CallExpr>(Val: E)) { |
1363 | switch (Call->getBuiltinCallee()) { |
1364 | default: |
1365 | break; |
1366 | case Builtin::BIaddressof: |
1367 | case Builtin::BI__addressof: |
1368 | case Builtin::BI__builtin_addressof: { |
1369 | LValue LV = CGF.EmitLValue(E: Call->getArg(Arg: 0), IsKnownNonNull); |
1370 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1371 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1372 | return LV.getAddress(); |
1373 | } |
1374 | } |
1375 | } |
1376 | |
1377 | // TODO: conditional operators, comma. |
1378 | |
1379 | // Otherwise, use the alignment of the type. |
1380 | return CGF.makeNaturalAddressForPointer( |
1381 | Ptr: CGF.EmitScalarExpr(E), T: E->getType()->getPointeeType(), Alignment: CharUnits(), |
1382 | /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); |
1383 | } |
1384 | |
1385 | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1386 | /// derive a more accurate bound on the alignment of the pointer. |
1387 | Address CodeGenFunction::EmitPointerWithAlignment( |
1388 | const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, |
1389 | KnownNonNull_t IsKnownNonNull) { |
1390 | Address Addr = |
1391 | ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, CGF&: *this); |
1392 | if (IsKnownNonNull && !Addr.isKnownNonNull()) |
1393 | Addr.setKnownNonNull(); |
1394 | return Addr; |
1395 | } |
1396 | |
1397 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1398 | llvm::Value *V = RV.getScalarVal(); |
1399 | if (auto MPT = T->getAs<MemberPointerType>()) |
1400 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr: V, MPT); |
1401 | return Builder.CreateICmpNE(LHS: V, RHS: llvm::Constant::getNullValue(Ty: V->getType())); |
1402 | } |
1403 | |
1404 | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1405 | if (Ty->isVoidType()) |
1406 | return RValue::get(V: nullptr); |
1407 | |
1408 | switch (getEvaluationKind(T: Ty)) { |
1409 | case TEK_Complex: { |
1410 | llvm::Type *EltTy = |
1411 | ConvertType(T: Ty->castAs<ComplexType>()->getElementType()); |
1412 | llvm::Value *U = llvm::UndefValue::get(T: EltTy); |
1413 | return RValue::getComplex(C: std::make_pair(x&: U, y&: U)); |
1414 | } |
1415 | |
1416 | // If this is a use of an undefined aggregate type, the aggregate must have an |
1417 | // identifiable address. Just because the contents of the value are undefined |
1418 | // doesn't mean that the address can't be taken and compared. |
1419 | case TEK_Aggregate: { |
1420 | Address DestPtr = CreateMemTemp(Ty, Name: "undef.agg.tmp" ); |
1421 | return RValue::getAggregate(addr: DestPtr); |
1422 | } |
1423 | |
1424 | case TEK_Scalar: |
1425 | return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: Ty))); |
1426 | } |
1427 | llvm_unreachable("bad evaluation kind" ); |
1428 | } |
1429 | |
1430 | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1431 | const char *Name) { |
1432 | ErrorUnsupported(S: E, Type: Name); |
1433 | return GetUndefRValue(Ty: E->getType()); |
1434 | } |
1435 | |
1436 | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1437 | const char *Name) { |
1438 | ErrorUnsupported(S: E, Type: Name); |
1439 | llvm::Type *ElTy = ConvertType(T: E->getType()); |
1440 | llvm::Type *Ty = UnqualPtrTy; |
1441 | return MakeAddrLValue( |
1442 | Addr: Address(llvm::UndefValue::get(T: Ty), ElTy, CharUnits::One()), T: E->getType()); |
1443 | } |
1444 | |
1445 | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1446 | const Expr *Base = Obj; |
1447 | while (!isa<CXXThisExpr>(Val: Base)) { |
1448 | // The result of a dynamic_cast can be null. |
1449 | if (isa<CXXDynamicCastExpr>(Val: Base)) |
1450 | return false; |
1451 | |
1452 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
1453 | Base = CE->getSubExpr(); |
1454 | } else if (const auto *PE = dyn_cast<ParenExpr>(Val: Base)) { |
1455 | Base = PE->getSubExpr(); |
1456 | } else if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base)) { |
1457 | if (UO->getOpcode() == UO_Extension) |
1458 | Base = UO->getSubExpr(); |
1459 | else |
1460 | return false; |
1461 | } else { |
1462 | return false; |
1463 | } |
1464 | } |
1465 | return true; |
1466 | } |
1467 | |
1468 | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1469 | LValue LV; |
1470 | if (SanOpts.has(K: SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(Val: E)) |
1471 | LV = EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E), /*Accessed*/true); |
1472 | else |
1473 | LV = EmitLValue(E); |
1474 | if (!isa<DeclRefExpr>(Val: E) && !LV.isBitField() && LV.isSimple()) { |
1475 | SanitizerSet SkippedChecks; |
1476 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) { |
1477 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: ME->getBase()); |
1478 | if (IsBaseCXXThis) |
1479 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
1480 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: ME->getBase())) |
1481 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
1482 | } |
1483 | EmitTypeCheck(TCK, Loc: E->getExprLoc(), LV, Type: E->getType(), SkippedChecks); |
1484 | } |
1485 | return LV; |
1486 | } |
1487 | |
1488 | /// EmitLValue - Emit code to compute a designator that specifies the location |
1489 | /// of the expression. |
1490 | /// |
1491 | /// This can return one of two things: a simple address or a bitfield reference. |
1492 | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1493 | /// an LLVM pointer type. |
1494 | /// |
1495 | /// If this returns a bitfield reference, nothing about the pointee type of the |
1496 | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1497 | /// |
1498 | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1499 | /// this method guarantees that the returned pointer type will point to an LLVM |
1500 | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1501 | /// length type, this is not possible. |
1502 | /// |
1503 | LValue CodeGenFunction::EmitLValue(const Expr *E, |
1504 | KnownNonNull_t IsKnownNonNull) { |
1505 | LValue LV = EmitLValueHelper(E, IsKnownNonNull); |
1506 | if (IsKnownNonNull && !LV.isKnownNonNull()) |
1507 | LV.setKnownNonNull(); |
1508 | return LV; |
1509 | } |
1510 | |
1511 | static QualType getConstantExprReferredType(const FullExpr *E, |
1512 | const ASTContext &Ctx) { |
1513 | const Expr *SE = E->getSubExpr()->IgnoreImplicit(); |
1514 | if (isa<OpaqueValueExpr>(Val: SE)) |
1515 | return SE->getType(); |
1516 | return cast<CallExpr>(Val: SE)->getCallReturnType(Ctx)->getPointeeType(); |
1517 | } |
1518 | |
1519 | LValue CodeGenFunction::EmitLValueHelper(const Expr *E, |
1520 | KnownNonNull_t IsKnownNonNull) { |
1521 | ApplyDebugLocation DL(*this, E); |
1522 | switch (E->getStmtClass()) { |
1523 | default: return EmitUnsupportedLValue(E, Name: "l-value expression" ); |
1524 | |
1525 | case Expr::ObjCPropertyRefExprClass: |
1526 | llvm_unreachable("cannot emit a property reference directly" ); |
1527 | |
1528 | case Expr::ObjCSelectorExprClass: |
1529 | return EmitObjCSelectorLValue(E: cast<ObjCSelectorExpr>(Val: E)); |
1530 | case Expr::ObjCIsaExprClass: |
1531 | return EmitObjCIsaExpr(E: cast<ObjCIsaExpr>(Val: E)); |
1532 | case Expr::BinaryOperatorClass: |
1533 | return EmitBinaryOperatorLValue(E: cast<BinaryOperator>(Val: E)); |
1534 | case Expr::CompoundAssignOperatorClass: { |
1535 | QualType Ty = E->getType(); |
1536 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1537 | Ty = AT->getValueType(); |
1538 | if (!Ty->isAnyComplexType()) |
1539 | return EmitCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1540 | return EmitComplexCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1541 | } |
1542 | case Expr::CallExprClass: |
1543 | case Expr::CXXMemberCallExprClass: |
1544 | case Expr::CXXOperatorCallExprClass: |
1545 | case Expr::UserDefinedLiteralClass: |
1546 | return EmitCallExprLValue(E: cast<CallExpr>(Val: E)); |
1547 | case Expr::CXXRewrittenBinaryOperatorClass: |
1548 | return EmitLValue(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(), |
1549 | IsKnownNonNull); |
1550 | case Expr::VAArgExprClass: |
1551 | return EmitVAArgExprLValue(E: cast<VAArgExpr>(Val: E)); |
1552 | case Expr::DeclRefExprClass: |
1553 | return EmitDeclRefLValue(E: cast<DeclRefExpr>(Val: E)); |
1554 | case Expr::ConstantExprClass: { |
1555 | const ConstantExpr *CE = cast<ConstantExpr>(Val: E); |
1556 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1557 | QualType RetType = getConstantExprReferredType(E: CE, Ctx: getContext()); |
1558 | return MakeNaturalAlignAddrLValue(V: Result, T: RetType); |
1559 | } |
1560 | return EmitLValue(E: cast<ConstantExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1561 | } |
1562 | case Expr::ParenExprClass: |
1563 | return EmitLValue(E: cast<ParenExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1564 | case Expr::GenericSelectionExprClass: |
1565 | return EmitLValue(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), |
1566 | IsKnownNonNull); |
1567 | case Expr::PredefinedExprClass: |
1568 | return EmitPredefinedLValue(E: cast<PredefinedExpr>(Val: E)); |
1569 | case Expr::StringLiteralClass: |
1570 | return EmitStringLiteralLValue(E: cast<StringLiteral>(Val: E)); |
1571 | case Expr::ObjCEncodeExprClass: |
1572 | return EmitObjCEncodeExprLValue(E: cast<ObjCEncodeExpr>(Val: E)); |
1573 | case Expr::PseudoObjectExprClass: |
1574 | return EmitPseudoObjectLValue(e: cast<PseudoObjectExpr>(Val: E)); |
1575 | case Expr::InitListExprClass: |
1576 | return EmitInitListLValue(E: cast<InitListExpr>(Val: E)); |
1577 | case Expr::CXXTemporaryObjectExprClass: |
1578 | case Expr::CXXConstructExprClass: |
1579 | return EmitCXXConstructLValue(E: cast<CXXConstructExpr>(Val: E)); |
1580 | case Expr::CXXBindTemporaryExprClass: |
1581 | return EmitCXXBindTemporaryLValue(E: cast<CXXBindTemporaryExpr>(Val: E)); |
1582 | case Expr::CXXUuidofExprClass: |
1583 | return EmitCXXUuidofLValue(E: cast<CXXUuidofExpr>(Val: E)); |
1584 | case Expr::LambdaExprClass: |
1585 | return EmitAggExprToLValue(E); |
1586 | |
1587 | case Expr::ExprWithCleanupsClass: { |
1588 | const auto *cleanups = cast<ExprWithCleanups>(Val: E); |
1589 | RunCleanupsScope Scope(*this); |
1590 | LValue LV = EmitLValue(E: cleanups->getSubExpr(), IsKnownNonNull); |
1591 | if (LV.isSimple()) { |
1592 | // Defend against branches out of gnu statement expressions surrounded by |
1593 | // cleanups. |
1594 | Address Addr = LV.getAddress(); |
1595 | llvm::Value *V = Addr.getBasePointer(); |
1596 | Scope.ForceCleanup(ValuesToReload: {&V}); |
1597 | Addr.replaceBasePointer(P: V); |
1598 | return LValue::MakeAddr(Addr, type: LV.getType(), Context&: getContext(), |
1599 | BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
1600 | } |
1601 | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1602 | // bitfield lvalue or some other non-simple lvalue? |
1603 | return LV; |
1604 | } |
1605 | |
1606 | case Expr::CXXDefaultArgExprClass: { |
1607 | auto *DAE = cast<CXXDefaultArgExpr>(Val: E); |
1608 | CXXDefaultArgExprScope Scope(*this, DAE); |
1609 | return EmitLValue(E: DAE->getExpr(), IsKnownNonNull); |
1610 | } |
1611 | case Expr::CXXDefaultInitExprClass: { |
1612 | auto *DIE = cast<CXXDefaultInitExpr>(Val: E); |
1613 | CXXDefaultInitExprScope Scope(*this, DIE); |
1614 | return EmitLValue(E: DIE->getExpr(), IsKnownNonNull); |
1615 | } |
1616 | case Expr::CXXTypeidExprClass: |
1617 | return EmitCXXTypeidLValue(E: cast<CXXTypeidExpr>(Val: E)); |
1618 | |
1619 | case Expr::ObjCMessageExprClass: |
1620 | return EmitObjCMessageExprLValue(E: cast<ObjCMessageExpr>(Val: E)); |
1621 | case Expr::ObjCIvarRefExprClass: |
1622 | return EmitObjCIvarRefLValue(E: cast<ObjCIvarRefExpr>(Val: E)); |
1623 | case Expr::StmtExprClass: |
1624 | return EmitStmtExprLValue(E: cast<StmtExpr>(Val: E)); |
1625 | case Expr::UnaryOperatorClass: |
1626 | return EmitUnaryOpLValue(E: cast<UnaryOperator>(Val: E)); |
1627 | case Expr::ArraySubscriptExprClass: |
1628 | return EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E)); |
1629 | case Expr::MatrixSubscriptExprClass: |
1630 | return EmitMatrixSubscriptExpr(E: cast<MatrixSubscriptExpr>(Val: E)); |
1631 | case Expr::ArraySectionExprClass: |
1632 | return EmitArraySectionExpr(E: cast<ArraySectionExpr>(Val: E)); |
1633 | case Expr::ExtVectorElementExprClass: |
1634 | return EmitExtVectorElementExpr(E: cast<ExtVectorElementExpr>(Val: E)); |
1635 | case Expr::CXXThisExprClass: |
1636 | return MakeAddrLValue(Addr: LoadCXXThisAddress(), T: E->getType()); |
1637 | case Expr::MemberExprClass: |
1638 | return EmitMemberExpr(E: cast<MemberExpr>(Val: E)); |
1639 | case Expr::CompoundLiteralExprClass: |
1640 | return EmitCompoundLiteralLValue(E: cast<CompoundLiteralExpr>(Val: E)); |
1641 | case Expr::ConditionalOperatorClass: |
1642 | return EmitConditionalOperatorLValue(E: cast<ConditionalOperator>(Val: E)); |
1643 | case Expr::BinaryConditionalOperatorClass: |
1644 | return EmitConditionalOperatorLValue(E: cast<BinaryConditionalOperator>(Val: E)); |
1645 | case Expr::ChooseExprClass: |
1646 | return EmitLValue(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), IsKnownNonNull); |
1647 | case Expr::OpaqueValueExprClass: |
1648 | return EmitOpaqueValueLValue(e: cast<OpaqueValueExpr>(Val: E)); |
1649 | case Expr::SubstNonTypeTemplateParmExprClass: |
1650 | return EmitLValue(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), |
1651 | IsKnownNonNull); |
1652 | case Expr::ImplicitCastExprClass: |
1653 | case Expr::CStyleCastExprClass: |
1654 | case Expr::CXXFunctionalCastExprClass: |
1655 | case Expr::CXXStaticCastExprClass: |
1656 | case Expr::CXXDynamicCastExprClass: |
1657 | case Expr::CXXReinterpretCastExprClass: |
1658 | case Expr::CXXConstCastExprClass: |
1659 | case Expr::CXXAddrspaceCastExprClass: |
1660 | case Expr::ObjCBridgedCastExprClass: |
1661 | return EmitCastLValue(E: cast<CastExpr>(Val: E)); |
1662 | |
1663 | case Expr::MaterializeTemporaryExprClass: |
1664 | return EmitMaterializeTemporaryExpr(M: cast<MaterializeTemporaryExpr>(Val: E)); |
1665 | |
1666 | case Expr::CoawaitExprClass: |
1667 | return EmitCoawaitLValue(E: cast<CoawaitExpr>(Val: E)); |
1668 | case Expr::CoyieldExprClass: |
1669 | return EmitCoyieldLValue(E: cast<CoyieldExpr>(Val: E)); |
1670 | case Expr::PackIndexingExprClass: |
1671 | return EmitLValue(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr()); |
1672 | } |
1673 | } |
1674 | |
1675 | /// Given an object of the given canonical type, can we safely copy a |
1676 | /// value out of it based on its initializer? |
1677 | static bool isConstantEmittableObjectType(QualType type) { |
1678 | assert(type.isCanonical()); |
1679 | assert(!type->isReferenceType()); |
1680 | |
1681 | // Must be const-qualified but non-volatile. |
1682 | Qualifiers qs = type.getLocalQualifiers(); |
1683 | if (!qs.hasConst() || qs.hasVolatile()) return false; |
1684 | |
1685 | // Otherwise, all object types satisfy this except C++ classes with |
1686 | // mutable subobjects or non-trivial copy/destroy behavior. |
1687 | if (const auto *RT = dyn_cast<RecordType>(Val&: type)) |
1688 | if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1689 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1690 | return false; |
1691 | |
1692 | return true; |
1693 | } |
1694 | |
1695 | /// Can we constant-emit a load of a reference to a variable of the |
1696 | /// given type? This is different from predicates like |
1697 | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1698 | /// in situations that don't necessarily satisfy the language's rules |
1699 | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1700 | /// to do this with const float variables even if those variables |
1701 | /// aren't marked 'constexpr'. |
1702 | enum ConstantEmissionKind { |
1703 | CEK_None, |
1704 | CEK_AsReferenceOnly, |
1705 | CEK_AsValueOrReference, |
1706 | CEK_AsValueOnly |
1707 | }; |
1708 | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1709 | type = type.getCanonicalType(); |
1710 | if (const auto *ref = dyn_cast<ReferenceType>(Val&: type)) { |
1711 | if (isConstantEmittableObjectType(type: ref->getPointeeType())) |
1712 | return CEK_AsValueOrReference; |
1713 | return CEK_AsReferenceOnly; |
1714 | } |
1715 | if (isConstantEmittableObjectType(type)) |
1716 | return CEK_AsValueOnly; |
1717 | return CEK_None; |
1718 | } |
1719 | |
1720 | /// Try to emit a reference to the given value without producing it as |
1721 | /// an l-value. This is just an optimization, but it avoids us needing |
1722 | /// to emit global copies of variables if they're named without triggering |
1723 | /// a formal use in a context where we can't emit a direct reference to them, |
1724 | /// for instance if a block or lambda or a member of a local class uses a |
1725 | /// const int variable or constexpr variable from an enclosing function. |
1726 | CodeGenFunction::ConstantEmission |
1727 | CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { |
1728 | ValueDecl *value = refExpr->getDecl(); |
1729 | |
1730 | // The value needs to be an enum constant or a constant variable. |
1731 | ConstantEmissionKind CEK; |
1732 | if (isa<ParmVarDecl>(Val: value)) { |
1733 | CEK = CEK_None; |
1734 | } else if (auto *var = dyn_cast<VarDecl>(Val: value)) { |
1735 | CEK = checkVarTypeForConstantEmission(type: var->getType()); |
1736 | } else if (isa<EnumConstantDecl>(Val: value)) { |
1737 | CEK = CEK_AsValueOnly; |
1738 | } else { |
1739 | CEK = CEK_None; |
1740 | } |
1741 | if (CEK == CEK_None) return ConstantEmission(); |
1742 | |
1743 | Expr::EvalResult result; |
1744 | bool resultIsReference; |
1745 | QualType resultType; |
1746 | |
1747 | // It's best to evaluate all the way as an r-value if that's permitted. |
1748 | if (CEK != CEK_AsReferenceOnly && |
1749 | refExpr->EvaluateAsRValue(Result&: result, Ctx: getContext())) { |
1750 | resultIsReference = false; |
1751 | resultType = refExpr->getType(); |
1752 | |
1753 | // Otherwise, try to evaluate as an l-value. |
1754 | } else if (CEK != CEK_AsValueOnly && |
1755 | refExpr->EvaluateAsLValue(Result&: result, Ctx: getContext())) { |
1756 | resultIsReference = true; |
1757 | resultType = value->getType(); |
1758 | |
1759 | // Failure. |
1760 | } else { |
1761 | return ConstantEmission(); |
1762 | } |
1763 | |
1764 | // In any case, if the initializer has side-effects, abandon ship. |
1765 | if (result.HasSideEffects) |
1766 | return ConstantEmission(); |
1767 | |
1768 | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1769 | // referencing a global host variable by copy. In this case the lambda should |
1770 | // make a copy of the value of the global host variable. The DRE of the |
1771 | // captured reference variable cannot be emitted as load from the host |
1772 | // global variable as compile time constant, since the host variable is not |
1773 | // accessible on device. The DRE of the captured reference variable has to be |
1774 | // loaded from captures. |
1775 | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && |
1776 | refExpr->refersToEnclosingVariableOrCapture()) { |
1777 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: CurCodeDecl); |
1778 | if (MD && MD->getParent()->isLambda() && |
1779 | MD->getOverloadedOperator() == OO_Call) { |
1780 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1781 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1782 | if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D)) { |
1783 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1784 | return ConstantEmission(); |
1785 | } |
1786 | } |
1787 | } |
1788 | } |
1789 | } |
1790 | |
1791 | // Emit as a constant. |
1792 | auto C = ConstantEmitter(*this).emitAbstract(loc: refExpr->getLocation(), |
1793 | value: result.Val, T: resultType); |
1794 | |
1795 | // Make sure we emit a debug reference to the global variable. |
1796 | // This should probably fire even for |
1797 | if (isa<VarDecl>(Val: value)) { |
1798 | if (!getContext().DeclMustBeEmitted(D: cast<VarDecl>(Val: value))) |
1799 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1800 | } else { |
1801 | assert(isa<EnumConstantDecl>(value)); |
1802 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1803 | } |
1804 | |
1805 | // If we emitted a reference constant, we need to dereference that. |
1806 | if (resultIsReference) |
1807 | return ConstantEmission::forReference(C); |
1808 | |
1809 | return ConstantEmission::forValue(C); |
1810 | } |
1811 | |
1812 | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1813 | const MemberExpr *ME) { |
1814 | if (auto *VD = dyn_cast<VarDecl>(Val: ME->getMemberDecl())) { |
1815 | // Try to emit static variable member expressions as DREs. |
1816 | return DeclRefExpr::Create( |
1817 | Context: CGF.getContext(), QualifierLoc: NestedNameSpecifierLoc(), TemplateKWLoc: SourceLocation(), D: VD, |
1818 | /*RefersToEnclosingVariableOrCapture=*/false, NameLoc: ME->getExprLoc(), |
1819 | T: ME->getType(), VK: ME->getValueKind(), FoundD: nullptr, TemplateArgs: nullptr, NOUR: ME->isNonOdrUse()); |
1820 | } |
1821 | return nullptr; |
1822 | } |
1823 | |
1824 | CodeGenFunction::ConstantEmission |
1825 | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1826 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME)) |
1827 | return tryEmitAsConstant(refExpr: DRE); |
1828 | return ConstantEmission(); |
1829 | } |
1830 | |
1831 | llvm::Value *CodeGenFunction::emitScalarConstant( |
1832 | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1833 | assert(Constant && "not a constant" ); |
1834 | if (Constant.isReference()) |
1835 | return EmitLoadOfLValue(V: Constant.getReferenceLValue(CGF&: *this, refExpr: E), |
1836 | Loc: E->getExprLoc()) |
1837 | .getScalarVal(); |
1838 | return Constant.getValue(); |
1839 | } |
1840 | |
1841 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1842 | SourceLocation Loc) { |
1843 | return EmitLoadOfScalar(Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
1844 | Ty: lvalue.getType(), Loc, BaseInfo: lvalue.getBaseInfo(), |
1845 | TBAAInfo: lvalue.getTBAAInfo(), isNontemporal: lvalue.isNontemporal()); |
1846 | } |
1847 | |
1848 | static bool hasBooleanRepresentation(QualType Ty) { |
1849 | if (Ty->isBooleanType()) |
1850 | return true; |
1851 | |
1852 | if (const EnumType *ET = Ty->getAs<EnumType>()) |
1853 | return ET->getDecl()->getIntegerType()->isBooleanType(); |
1854 | |
1855 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1856 | return hasBooleanRepresentation(Ty: AT->getValueType()); |
1857 | |
1858 | return false; |
1859 | } |
1860 | |
1861 | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1862 | llvm::APInt &Min, llvm::APInt &End, |
1863 | bool StrictEnums, bool IsBool) { |
1864 | const EnumType *ET = Ty->getAs<EnumType>(); |
1865 | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && |
1866 | ET && !ET->getDecl()->isFixed(); |
1867 | if (!IsBool && !IsRegularCPlusPlusEnum) |
1868 | return false; |
1869 | |
1870 | if (IsBool) { |
1871 | Min = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 0); |
1872 | End = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 2); |
1873 | } else { |
1874 | const EnumDecl *ED = ET->getDecl(); |
1875 | ED->getValueRange(Max&: End, Min); |
1876 | } |
1877 | return true; |
1878 | } |
1879 | |
1880 | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1881 | llvm::APInt Min, End; |
1882 | if (!getRangeForType(CGF&: *this, Ty, Min, End, StrictEnums: CGM.getCodeGenOpts().StrictEnums, |
1883 | IsBool: hasBooleanRepresentation(Ty))) |
1884 | return nullptr; |
1885 | |
1886 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1887 | return MDHelper.createRange(Lo: Min, Hi: End); |
1888 | } |
1889 | |
1890 | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1891 | SourceLocation Loc) { |
1892 | bool HasBoolCheck = SanOpts.has(K: SanitizerKind::Bool); |
1893 | bool HasEnumCheck = SanOpts.has(K: SanitizerKind::Enum); |
1894 | if (!HasBoolCheck && !HasEnumCheck) |
1895 | return false; |
1896 | |
1897 | bool IsBool = hasBooleanRepresentation(Ty) || |
1898 | NSAPI(CGM.getContext()).isObjCBOOLType(T: Ty); |
1899 | bool NeedsBoolCheck = HasBoolCheck && IsBool; |
1900 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); |
1901 | if (!NeedsBoolCheck && !NeedsEnumCheck) |
1902 | return false; |
1903 | |
1904 | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1905 | // a bit width mismatch when handling bitfield values. This is handled by |
1906 | // EmitFromMemory for the non-bitfield case. |
1907 | if (IsBool && |
1908 | cast<llvm::IntegerType>(Val: Value->getType())->getBitWidth() == 1) |
1909 | return false; |
1910 | |
1911 | llvm::APInt Min, End; |
1912 | if (!getRangeForType(CGF&: *this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1913 | return true; |
1914 | |
1915 | auto &Ctx = getLLVMContext(); |
1916 | SanitizerScope SanScope(this); |
1917 | llvm::Value *Check; |
1918 | --End; |
1919 | if (!Min) { |
1920 | Check = Builder.CreateICmpULE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1921 | } else { |
1922 | llvm::Value *Upper = |
1923 | Builder.CreateICmpSLE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1924 | llvm::Value *Lower = |
1925 | Builder.CreateICmpSGE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: Min)); |
1926 | Check = Builder.CreateAnd(LHS: Upper, RHS: Lower); |
1927 | } |
1928 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
1929 | EmitCheckTypeDescriptor(T: Ty)}; |
1930 | SanitizerMask Kind = |
1931 | NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; |
1932 | EmitCheck(Checked: std::make_pair(x&: Check, y&: Kind), Check: SanitizerHandler::LoadInvalidValue, |
1933 | StaticArgs, DynamicArgs: EmitCheckValue(V: Value)); |
1934 | return true; |
1935 | } |
1936 | |
1937 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
1938 | QualType Ty, |
1939 | SourceLocation Loc, |
1940 | LValueBaseInfo BaseInfo, |
1941 | TBAAAccessInfo TBAAInfo, |
1942 | bool isNontemporal) { |
1943 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
1944 | if (GV->isThreadLocal()) |
1945 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
1946 | IsKnownNonNull: NotKnownNonNull); |
1947 | |
1948 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
1949 | // Boolean vectors use `iN` as storage type. |
1950 | if (ClangVecTy->isExtVectorBoolType()) { |
1951 | llvm::Type *ValTy = ConvertType(T: Ty); |
1952 | unsigned ValNumElems = |
1953 | cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
1954 | // Load the `iP` storage object (P is the padded vector size). |
1955 | auto *RawIntV = Builder.CreateLoad(Addr, IsVolatile: Volatile, Name: "load_bits" ); |
1956 | const auto *RawIntTy = RawIntV->getType(); |
1957 | assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors" ); |
1958 | // Bitcast iP --> <P x i1>. |
1959 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
1960 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
1961 | llvm::Value *V = Builder.CreateBitCast(V: RawIntV, DestTy: PaddedVecTy); |
1962 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
1963 | V = emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
1964 | |
1965 | return EmitFromMemory(Value: V, Ty); |
1966 | } |
1967 | |
1968 | // Handle vectors of size 3 like size 4 for better performance. |
1969 | const llvm::Type *EltTy = Addr.getElementType(); |
1970 | const auto *VTy = cast<llvm::FixedVectorType>(Val: EltTy); |
1971 | |
1972 | if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) { |
1973 | |
1974 | llvm::VectorType *vec4Ty = |
1975 | llvm::FixedVectorType::get(ElementType: VTy->getElementType(), NumElts: 4); |
1976 | Address Cast = Addr.withElementType(ElemTy: vec4Ty); |
1977 | // Now load value. |
1978 | llvm::Value *V = Builder.CreateLoad(Addr: Cast, IsVolatile: Volatile, Name: "loadVec4" ); |
1979 | |
1980 | // Shuffle vector to get vec3. |
1981 | V = Builder.CreateShuffleVector(V, Mask: ArrayRef<int>{0, 1, 2}, Name: "extractVec" ); |
1982 | return EmitFromMemory(Value: V, Ty); |
1983 | } |
1984 | } |
1985 | |
1986 | // Atomic operations have to be done on integral types. |
1987 | LValue AtomicLValue = |
1988 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
1989 | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(Src: AtomicLValue)) { |
1990 | return EmitAtomicLoad(LV: AtomicLValue, SL: Loc).getScalarVal(); |
1991 | } |
1992 | |
1993 | Addr = |
1994 | Addr.withElementType(ElemTy: convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Addr.getElementType())); |
1995 | |
1996 | llvm::LoadInst *Load = Builder.CreateLoad(Addr, IsVolatile: Volatile); |
1997 | if (isNontemporal) { |
1998 | llvm::MDNode *Node = llvm::MDNode::get( |
1999 | Context&: Load->getContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2000 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2001 | } |
2002 | |
2003 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo); |
2004 | |
2005 | if (EmitScalarRangeCheck(Value: Load, Ty, Loc)) { |
2006 | // In order to prevent the optimizer from throwing away the check, don't |
2007 | // attach range metadata to the load. |
2008 | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) |
2009 | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { |
2010 | Load->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RangeInfo); |
2011 | Load->setMetadata(KindID: llvm::LLVMContext::MD_noundef, |
2012 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: std::nullopt)); |
2013 | } |
2014 | |
2015 | return EmitFromMemory(Value: Load, Ty); |
2016 | } |
2017 | |
2018 | /// Converts a scalar value from its primary IR type (as returned |
2019 | /// by ConvertType) to its load/store type (as returned by |
2020 | /// convertTypeForLoadStore). |
2021 | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
2022 | if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) { |
2023 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2024 | bool Signed = Ty->isSignedIntegerOrEnumerationType(); |
2025 | return Builder.CreateIntCast(V: Value, DestTy: StoreTy, isSigned: Signed, Name: "storedv" ); |
2026 | } |
2027 | |
2028 | if (Ty->isExtVectorBoolType()) { |
2029 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2030 | // Expand to the memory bit width. |
2031 | unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits(); |
2032 | // <N x i1> --> <P x i1>. |
2033 | Value = emitBoolVecConversion(SrcVec: Value, NumElementsDst: MemNumElems, Name: "insertvec" ); |
2034 | // <P x i1> --> iP. |
2035 | Value = Builder.CreateBitCast(V: Value, DestTy: StoreTy); |
2036 | } |
2037 | |
2038 | return Value; |
2039 | } |
2040 | |
2041 | /// Converts a scalar value from its load/store type (as returned |
2042 | /// by convertTypeForLoadStore) to its primary IR type (as returned |
2043 | /// by ConvertType). |
2044 | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
2045 | if (Ty->isExtVectorBoolType()) { |
2046 | const auto *RawIntTy = Value->getType(); |
2047 | // Bitcast iP --> <P x i1>. |
2048 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2049 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
2050 | auto *V = Builder.CreateBitCast(V: Value, DestTy: PaddedVecTy); |
2051 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2052 | llvm::Type *ValTy = ConvertType(T: Ty); |
2053 | unsigned ValNumElems = cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
2054 | return emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
2055 | } |
2056 | |
2057 | if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) { |
2058 | llvm::Type *ResTy = ConvertType(T: Ty); |
2059 | return Builder.CreateTrunc(V: Value, DestTy: ResTy, Name: "loadedv" ); |
2060 | } |
2061 | |
2062 | return Value; |
2063 | } |
2064 | |
2065 | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
2066 | // MatrixType), if it points to a array (the memory type of MatrixType). |
2067 | static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, |
2068 | CodeGenFunction &CGF, |
2069 | bool IsVector = true) { |
2070 | auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: Addr.getElementType()); |
2071 | if (ArrayTy && IsVector) { |
2072 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
2073 | NumElts: ArrayTy->getNumElements()); |
2074 | |
2075 | return Addr.withElementType(ElemTy: VectorTy); |
2076 | } |
2077 | auto *VectorTy = dyn_cast<llvm::VectorType>(Val: Addr.getElementType()); |
2078 | if (VectorTy && !IsVector) { |
2079 | auto *ArrayTy = llvm::ArrayType::get( |
2080 | ElementType: VectorTy->getElementType(), |
2081 | NumElements: cast<llvm::FixedVectorType>(Val: VectorTy)->getNumElements()); |
2082 | |
2083 | return Addr.withElementType(ElemTy: ArrayTy); |
2084 | } |
2085 | |
2086 | return Addr; |
2087 | } |
2088 | |
2089 | // Emit a store of a matrix LValue. This may require casting the original |
2090 | // pointer to memory address (ArrayType) to a pointer to the value type |
2091 | // (VectorType). |
2092 | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
2093 | bool isInit, CodeGenFunction &CGF) { |
2094 | Address Addr = MaybeConvertMatrixAddress(Addr: lvalue.getAddress(), CGF, |
2095 | IsVector: value->getType()->isVectorTy()); |
2096 | CGF.EmitStoreOfScalar(Value: value, Addr, Volatile: lvalue.isVolatile(), Ty: lvalue.getType(), |
2097 | BaseInfo: lvalue.getBaseInfo(), TBAAInfo: lvalue.getTBAAInfo(), isInit, |
2098 | isNontemporal: lvalue.isNontemporal()); |
2099 | } |
2100 | |
2101 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
2102 | bool Volatile, QualType Ty, |
2103 | LValueBaseInfo BaseInfo, |
2104 | TBAAAccessInfo TBAAInfo, |
2105 | bool isInit, bool isNontemporal) { |
2106 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
2107 | if (GV->isThreadLocal()) |
2108 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
2109 | IsKnownNonNull: NotKnownNonNull); |
2110 | |
2111 | llvm::Type *SrcTy = Value->getType(); |
2112 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2113 | auto *VecTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy); |
2114 | if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
2115 | // Handle vec3 special. |
2116 | if (VecTy && !ClangVecTy->isExtVectorBoolType() && |
2117 | cast<llvm::FixedVectorType>(Val: VecTy)->getNumElements() == 3) { |
2118 | // Our source is a vec3, do a shuffle vector to make it a vec4. |
2119 | Value = Builder.CreateShuffleVector(V: Value, Mask: ArrayRef<int>{0, 1, 2, -1}, |
2120 | Name: "extractVec" ); |
2121 | SrcTy = llvm::FixedVectorType::get(ElementType: VecTy->getElementType(), NumElts: 4); |
2122 | } |
2123 | if (Addr.getElementType() != SrcTy) { |
2124 | Addr = Addr.withElementType(ElemTy: SrcTy); |
2125 | } |
2126 | } |
2127 | } |
2128 | |
2129 | Value = EmitToMemory(Value, Ty); |
2130 | |
2131 | LValue AtomicLValue = |
2132 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
2133 | if (Ty->isAtomicType() || |
2134 | (!isInit && LValueIsSuitableForInlineAtomic(Src: AtomicLValue))) { |
2135 | EmitAtomicStore(rvalue: RValue::get(V: Value), lvalue: AtomicLValue, isInit); |
2136 | return; |
2137 | } |
2138 | |
2139 | llvm::StoreInst *Store = Builder.CreateStore(Val: Value, Addr, IsVolatile: Volatile); |
2140 | if (isNontemporal) { |
2141 | llvm::MDNode *Node = |
2142 | llvm::MDNode::get(Context&: Store->getContext(), |
2143 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2144 | Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2145 | } |
2146 | |
2147 | CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo); |
2148 | } |
2149 | |
2150 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
2151 | bool isInit) { |
2152 | if (lvalue.getType()->isConstantMatrixType()) { |
2153 | EmitStoreOfMatrixScalar(value, lvalue, isInit, CGF&: *this); |
2154 | return; |
2155 | } |
2156 | |
2157 | EmitStoreOfScalar(Value: value, Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
2158 | Ty: lvalue.getType(), BaseInfo: lvalue.getBaseInfo(), |
2159 | TBAAInfo: lvalue.getTBAAInfo(), isInit, isNontemporal: lvalue.isNontemporal()); |
2160 | } |
2161 | |
2162 | // Emit a load of a LValue of matrix type. This may require casting the pointer |
2163 | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
2164 | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
2165 | CodeGenFunction &CGF) { |
2166 | assert(LV.getType()->isConstantMatrixType()); |
2167 | Address Addr = MaybeConvertMatrixAddress(Addr: LV.getAddress(), CGF); |
2168 | LV.setAddress(Addr); |
2169 | return RValue::get(V: CGF.EmitLoadOfScalar(lvalue: LV, Loc)); |
2170 | } |
2171 | |
2172 | RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot, |
2173 | SourceLocation Loc) { |
2174 | QualType Ty = LV.getType(); |
2175 | switch (getEvaluationKind(T: Ty)) { |
2176 | case TEK_Scalar: |
2177 | return EmitLoadOfLValue(V: LV, Loc); |
2178 | case TEK_Complex: |
2179 | return RValue::getComplex(C: EmitLoadOfComplex(src: LV, loc: Loc)); |
2180 | case TEK_Aggregate: |
2181 | EmitAggFinalDestCopy(Type: Ty, Dest: Slot, Src: LV, SrcKind: EVK_NonRValue); |
2182 | return Slot.asRValue(); |
2183 | } |
2184 | llvm_unreachable("bad evaluation kind" ); |
2185 | } |
2186 | |
2187 | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
2188 | /// method emits the address of the lvalue, then loads the result as an rvalue, |
2189 | /// returning the rvalue. |
2190 | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
2191 | if (LV.isObjCWeak()) { |
2192 | // load of a __weak object. |
2193 | Address AddrWeakObj = LV.getAddress(); |
2194 | return RValue::get(V: CGM.getObjCRuntime().EmitObjCWeakRead(CGF&: *this, |
2195 | AddrWeakObj)); |
2196 | } |
2197 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
2198 | // In MRC mode, we do a load+autorelease. |
2199 | if (!getLangOpts().ObjCAutoRefCount) { |
2200 | return RValue::get(V: EmitARCLoadWeak(addr: LV.getAddress())); |
2201 | } |
2202 | |
2203 | // In ARC mode, we load retained and then consume the value. |
2204 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: LV.getAddress()); |
2205 | Object = EmitObjCConsumeObject(T: LV.getType(), Ptr: Object); |
2206 | return RValue::get(V: Object); |
2207 | } |
2208 | |
2209 | if (LV.isSimple()) { |
2210 | assert(!LV.getType()->isFunctionType()); |
2211 | |
2212 | if (LV.getType()->isConstantMatrixType()) |
2213 | return EmitLoadOfMatrixLValue(LV, Loc, CGF&: *this); |
2214 | |
2215 | // Everything needs a load. |
2216 | return RValue::get(V: EmitLoadOfScalar(lvalue: LV, Loc)); |
2217 | } |
2218 | |
2219 | if (LV.isVectorElt()) { |
2220 | llvm::LoadInst *Load = Builder.CreateLoad(Addr: LV.getVectorAddress(), |
2221 | IsVolatile: LV.isVolatileQualified()); |
2222 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx: LV.getVectorIdx(), |
2223 | Name: "vecext" )); |
2224 | } |
2225 | |
2226 | // If this is a reference to a subset of the elements of a vector, either |
2227 | // shuffle the input or extract/insert them as appropriate. |
2228 | if (LV.isExtVectorElt()) { |
2229 | return EmitLoadOfExtVectorElementLValue(V: LV); |
2230 | } |
2231 | |
2232 | // Global Register variables always invoke intrinsics |
2233 | if (LV.isGlobalReg()) |
2234 | return EmitLoadOfGlobalRegLValue(LV); |
2235 | |
2236 | if (LV.isMatrixElt()) { |
2237 | llvm::Value *Idx = LV.getMatrixIdx(); |
2238 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2239 | const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); |
2240 | llvm::MatrixBuilder MB(Builder); |
2241 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2242 | } |
2243 | llvm::LoadInst *Load = |
2244 | Builder.CreateLoad(Addr: LV.getMatrixAddress(), IsVolatile: LV.isVolatileQualified()); |
2245 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx, Name: "matrixext" )); |
2246 | } |
2247 | |
2248 | assert(LV.isBitField() && "Unknown LValue type!" ); |
2249 | return EmitLoadOfBitfieldLValue(LV, Loc); |
2250 | } |
2251 | |
2252 | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
2253 | SourceLocation Loc) { |
2254 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
2255 | |
2256 | // Get the output type. |
2257 | llvm::Type *ResLTy = ConvertType(T: LV.getType()); |
2258 | |
2259 | Address Ptr = LV.getBitFieldAddress(); |
2260 | llvm::Value *Val = |
2261 | Builder.CreateLoad(Addr: Ptr, IsVolatile: LV.isVolatileQualified(), Name: "bf.load" ); |
2262 | |
2263 | bool UseVolatile = LV.isVolatileQualified() && |
2264 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2265 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2266 | const unsigned StorageSize = |
2267 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2268 | if (Info.IsSigned) { |
2269 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
2270 | unsigned HighBits = StorageSize - Offset - Info.Size; |
2271 | if (HighBits) |
2272 | Val = Builder.CreateShl(LHS: Val, RHS: HighBits, Name: "bf.shl" ); |
2273 | if (Offset + HighBits) |
2274 | Val = Builder.CreateAShr(LHS: Val, RHS: Offset + HighBits, Name: "bf.ashr" ); |
2275 | } else { |
2276 | if (Offset) |
2277 | Val = Builder.CreateLShr(LHS: Val, RHS: Offset, Name: "bf.lshr" ); |
2278 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
2279 | Val = Builder.CreateAnd( |
2280 | LHS: Val, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), Name: "bf.clear" ); |
2281 | } |
2282 | Val = Builder.CreateIntCast(V: Val, DestTy: ResLTy, isSigned: Info.IsSigned, Name: "bf.cast" ); |
2283 | EmitScalarRangeCheck(Value: Val, Ty: LV.getType(), Loc); |
2284 | return RValue::get(V: Val); |
2285 | } |
2286 | |
2287 | // If this is a reference to a subset of the elements of a vector, create an |
2288 | // appropriate shufflevector. |
2289 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
2290 | llvm::Value *Vec = Builder.CreateLoad(Addr: LV.getExtVectorAddress(), |
2291 | IsVolatile: LV.isVolatileQualified()); |
2292 | |
2293 | // HLSL allows treating scalars as one-element vectors. Converting the scalar |
2294 | // IR value to a vector here allows the rest of codegen to behave as normal. |
2295 | if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { |
2296 | llvm::Type *DstTy = llvm::FixedVectorType::get(ElementType: Vec->getType(), NumElts: 1); |
2297 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
2298 | Vec = Builder.CreateInsertElement(VecTy: DstTy, NewElt: Vec, Idx: Zero, Name: "cast.splat" ); |
2299 | } |
2300 | |
2301 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2302 | |
2303 | // If the result of the expression is a non-vector type, we must be extracting |
2304 | // a single element. Just codegen as an extractelement. |
2305 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
2306 | if (!ExprVT) { |
2307 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2308 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2309 | return RValue::get(V: Builder.CreateExtractElement(Vec, Idx: Elt)); |
2310 | } |
2311 | |
2312 | // Always use shuffle vector to try to retain the original program structure |
2313 | unsigned NumResultElts = ExprVT->getNumElements(); |
2314 | |
2315 | SmallVector<int, 4> Mask; |
2316 | for (unsigned i = 0; i != NumResultElts; ++i) |
2317 | Mask.push_back(Elt: getAccessedFieldNo(Idx: i, Elts)); |
2318 | |
2319 | Vec = Builder.CreateShuffleVector(V: Vec, Mask); |
2320 | return RValue::get(V: Vec); |
2321 | } |
2322 | |
2323 | /// Generates lvalue for partial ext_vector access. |
2324 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2325 | Address VectorAddress = LV.getExtVectorAddress(); |
2326 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2327 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(T: EQT); |
2328 | |
2329 | Address CastToPointerElement = VectorAddress.withElementType(ElemTy: VectorElementTy); |
2330 | |
2331 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2332 | unsigned ix = getAccessedFieldNo(Idx: 0, Elts); |
2333 | |
2334 | Address VectorBasePtrPlusIx = |
2335 | Builder.CreateConstInBoundsGEP(Addr: CastToPointerElement, Index: ix, |
2336 | Name: "vector.elt" ); |
2337 | |
2338 | return VectorBasePtrPlusIx; |
2339 | } |
2340 | |
2341 | /// Load of global gamed gegisters are always calls to intrinsics. |
2342 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2343 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2344 | "Bad type for register variable" ); |
2345 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2346 | Val: cast<llvm::MetadataAsValue>(Val: LV.getGlobalReg())->getMetadata()); |
2347 | |
2348 | // We accept integer and pointer types only |
2349 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: LV.getType()); |
2350 | llvm::Type *Ty = OrigTy; |
2351 | if (OrigTy->isPointerTy()) |
2352 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2353 | llvm::Type *Types[] = { Ty }; |
2354 | |
2355 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: Types); |
2356 | llvm::Value *Call = Builder.CreateCall( |
2357 | Callee: F, Args: llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName)); |
2358 | if (OrigTy->isPointerTy()) |
2359 | Call = Builder.CreateIntToPtr(V: Call, DestTy: OrigTy); |
2360 | return RValue::get(V: Call); |
2361 | } |
2362 | |
2363 | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2364 | /// lvalue, where both are guaranteed to the have the same type, and that type |
2365 | /// is 'Ty'. |
2366 | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2367 | bool isInit) { |
2368 | if (!Dst.isSimple()) { |
2369 | if (Dst.isVectorElt()) { |
2370 | // Read/modify/write the vector, inserting the new element. |
2371 | llvm::Value *Vec = Builder.CreateLoad(Addr: Dst.getVectorAddress(), |
2372 | IsVolatile: Dst.isVolatileQualified()); |
2373 | auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Val: Vec->getType()); |
2374 | if (IRStoreTy) { |
2375 | auto *IRVecTy = llvm::FixedVectorType::get( |
2376 | ElementType: Builder.getInt1Ty(), NumElts: IRStoreTy->getPrimitiveSizeInBits()); |
2377 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRVecTy); |
2378 | // iN --> <N x i1>. |
2379 | } |
2380 | Vec = Builder.CreateInsertElement(Vec, NewElt: Src.getScalarVal(), |
2381 | Idx: Dst.getVectorIdx(), Name: "vecins" ); |
2382 | if (IRStoreTy) { |
2383 | // <N x i1> --> <iN>. |
2384 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRStoreTy); |
2385 | } |
2386 | Builder.CreateStore(Val: Vec, Addr: Dst.getVectorAddress(), |
2387 | IsVolatile: Dst.isVolatileQualified()); |
2388 | return; |
2389 | } |
2390 | |
2391 | // If this is an update of extended vector elements, insert them as |
2392 | // appropriate. |
2393 | if (Dst.isExtVectorElt()) |
2394 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2395 | |
2396 | if (Dst.isGlobalReg()) |
2397 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2398 | |
2399 | if (Dst.isMatrixElt()) { |
2400 | llvm::Value *Idx = Dst.getMatrixIdx(); |
2401 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2402 | const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); |
2403 | llvm::MatrixBuilder MB(Builder); |
2404 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2405 | } |
2406 | llvm::Instruction *Load = Builder.CreateLoad(Addr: Dst.getMatrixAddress()); |
2407 | llvm::Value *Vec = |
2408 | Builder.CreateInsertElement(Vec: Load, NewElt: Src.getScalarVal(), Idx, Name: "matins" ); |
2409 | Builder.CreateStore(Val: Vec, Addr: Dst.getMatrixAddress(), |
2410 | IsVolatile: Dst.isVolatileQualified()); |
2411 | return; |
2412 | } |
2413 | |
2414 | assert(Dst.isBitField() && "Unknown LValue type" ); |
2415 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2416 | } |
2417 | |
2418 | // There's special magic for assigning into an ARC-qualified l-value. |
2419 | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2420 | switch (Lifetime) { |
2421 | case Qualifiers::OCL_None: |
2422 | llvm_unreachable("present but none" ); |
2423 | |
2424 | case Qualifiers::OCL_ExplicitNone: |
2425 | // nothing special |
2426 | break; |
2427 | |
2428 | case Qualifiers::OCL_Strong: |
2429 | if (isInit) { |
2430 | Src = RValue::get(V: EmitARCRetain(type: Dst.getType(), value: Src.getScalarVal())); |
2431 | break; |
2432 | } |
2433 | EmitARCStoreStrong(lvalue: Dst, value: Src.getScalarVal(), /*ignore*/ resultIgnored: true); |
2434 | return; |
2435 | |
2436 | case Qualifiers::OCL_Weak: |
2437 | if (isInit) |
2438 | // Initialize and then skip the primitive store. |
2439 | EmitARCInitWeak(addr: Dst.getAddress(), value: Src.getScalarVal()); |
2440 | else |
2441 | EmitARCStoreWeak(addr: Dst.getAddress(), value: Src.getScalarVal(), |
2442 | /*ignore*/ ignored: true); |
2443 | return; |
2444 | |
2445 | case Qualifiers::OCL_Autoreleasing: |
2446 | Src = RValue::get(V: EmitObjCExtendObjectLifetime(T: Dst.getType(), |
2447 | Ptr: Src.getScalarVal())); |
2448 | // fall into the normal path |
2449 | break; |
2450 | } |
2451 | } |
2452 | |
2453 | if (Dst.isObjCWeak() && !Dst.isNonGC()) { |
2454 | // load of a __weak object. |
2455 | Address LvalueDst = Dst.getAddress(); |
2456 | llvm::Value *src = Src.getScalarVal(); |
2457 | CGM.getObjCRuntime().EmitObjCWeakAssign(CGF&: *this, src, dest: LvalueDst); |
2458 | return; |
2459 | } |
2460 | |
2461 | if (Dst.isObjCStrong() && !Dst.isNonGC()) { |
2462 | // load of a __strong object. |
2463 | Address LvalueDst = Dst.getAddress(); |
2464 | llvm::Value *src = Src.getScalarVal(); |
2465 | if (Dst.isObjCIvar()) { |
2466 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL" ); |
2467 | llvm::Type *ResultType = IntPtrTy; |
2468 | Address dst = EmitPointerWithAlignment(E: Dst.getBaseIvarExp()); |
2469 | llvm::Value *RHS = dst.emitRawPointer(CGF&: *this); |
2470 | RHS = Builder.CreatePtrToInt(V: RHS, DestTy: ResultType, Name: "sub.ptr.rhs.cast" ); |
2471 | llvm::Value *LHS = Builder.CreatePtrToInt(V: LvalueDst.emitRawPointer(CGF&: *this), |
2472 | DestTy: ResultType, Name: "sub.ptr.lhs.cast" ); |
2473 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, Name: "ivar.offset" ); |
2474 | CGM.getObjCRuntime().EmitObjCIvarAssign(CGF&: *this, src, dest: dst, ivarOffset: BytesBetween); |
2475 | } else if (Dst.isGlobalObjCRef()) { |
2476 | CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF&: *this, src, dest: LvalueDst, |
2477 | threadlocal: Dst.isThreadLocalRef()); |
2478 | } |
2479 | else |
2480 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(CGF&: *this, src, dest: LvalueDst); |
2481 | return; |
2482 | } |
2483 | |
2484 | assert(Src.isScalar() && "Can't emit an agg store with this method" ); |
2485 | EmitStoreOfScalar(value: Src.getScalarVal(), lvalue: Dst, isInit); |
2486 | } |
2487 | |
2488 | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2489 | llvm::Value **Result) { |
2490 | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2491 | llvm::Type *ResLTy = convertTypeForLoadStore(ASTTy: Dst.getType()); |
2492 | Address Ptr = Dst.getBitFieldAddress(); |
2493 | |
2494 | // Get the source value, truncated to the width of the bit-field. |
2495 | llvm::Value *SrcVal = Src.getScalarVal(); |
2496 | |
2497 | // Cast the source to the storage type and shift it into place. |
2498 | SrcVal = Builder.CreateIntCast(V: SrcVal, DestTy: Ptr.getElementType(), |
2499 | /*isSigned=*/false); |
2500 | llvm::Value *MaskedVal = SrcVal; |
2501 | |
2502 | const bool UseVolatile = |
2503 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && |
2504 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2505 | const unsigned StorageSize = |
2506 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2507 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2508 | // See if there are other bits in the bitfield's storage we'll need to load |
2509 | // and mask together with source before storing. |
2510 | if (StorageSize != Info.Size) { |
2511 | assert(StorageSize > Info.Size && "Invalid bitfield size." ); |
2512 | llvm::Value *Val = |
2513 | Builder.CreateLoad(Addr: Ptr, IsVolatile: Dst.isVolatileQualified(), Name: "bf.load" ); |
2514 | |
2515 | // Mask the source value as needed. |
2516 | if (!hasBooleanRepresentation(Ty: Dst.getType())) |
2517 | SrcVal = Builder.CreateAnd( |
2518 | LHS: SrcVal, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), |
2519 | Name: "bf.value" ); |
2520 | MaskedVal = SrcVal; |
2521 | if (Offset) |
2522 | SrcVal = Builder.CreateShl(LHS: SrcVal, RHS: Offset, Name: "bf.shl" ); |
2523 | |
2524 | // Mask out the original value. |
2525 | Val = Builder.CreateAnd( |
2526 | LHS: Val, RHS: ~llvm::APInt::getBitsSet(numBits: StorageSize, loBit: Offset, hiBit: Offset + Info.Size), |
2527 | Name: "bf.clear" ); |
2528 | |
2529 | // Or together the unchanged values and the source value. |
2530 | SrcVal = Builder.CreateOr(LHS: Val, RHS: SrcVal, Name: "bf.set" ); |
2531 | } else { |
2532 | assert(Offset == 0); |
2533 | // According to the AACPS: |
2534 | // When a volatile bit-field is written, and its container does not overlap |
2535 | // with any non-bit-field member, its container must be read exactly once |
2536 | // and written exactly once using the access width appropriate to the type |
2537 | // of the container. The two accesses are not atomic. |
2538 | if (Dst.isVolatileQualified() && isAAPCS(TargetInfo: CGM.getTarget()) && |
2539 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2540 | Builder.CreateLoad(Addr: Ptr, IsVolatile: true, Name: "bf.load" ); |
2541 | } |
2542 | |
2543 | // Write the new value back out. |
2544 | Builder.CreateStore(Val: SrcVal, Addr: Ptr, IsVolatile: Dst.isVolatileQualified()); |
2545 | |
2546 | // Return the new value of the bit-field, if requested. |
2547 | if (Result) { |
2548 | llvm::Value *ResultVal = MaskedVal; |
2549 | |
2550 | // Sign extend the value if needed. |
2551 | if (Info.IsSigned) { |
2552 | assert(Info.Size <= StorageSize); |
2553 | unsigned HighBits = StorageSize - Info.Size; |
2554 | if (HighBits) { |
2555 | ResultVal = Builder.CreateShl(LHS: ResultVal, RHS: HighBits, Name: "bf.result.shl" ); |
2556 | ResultVal = Builder.CreateAShr(LHS: ResultVal, RHS: HighBits, Name: "bf.result.ashr" ); |
2557 | } |
2558 | } |
2559 | |
2560 | ResultVal = Builder.CreateIntCast(V: ResultVal, DestTy: ResLTy, isSigned: Info.IsSigned, |
2561 | Name: "bf.result.cast" ); |
2562 | *Result = EmitFromMemory(Value: ResultVal, Ty: Dst.getType()); |
2563 | } |
2564 | } |
2565 | |
2566 | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2567 | LValue Dst) { |
2568 | // HLSL allows storing to scalar values through ExtVector component LValues. |
2569 | // To support this we need to handle the case where the destination address is |
2570 | // a scalar. |
2571 | Address DstAddr = Dst.getExtVectorAddress(); |
2572 | if (!DstAddr.getElementType()->isVectorTy()) { |
2573 | assert(!Dst.getType()->isVectorType() && |
2574 | "this should only occur for non-vector l-values" ); |
2575 | Builder.CreateStore(Val: Src.getScalarVal(), Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2576 | return; |
2577 | } |
2578 | |
2579 | // This access turns into a read/modify/write of the vector. Load the input |
2580 | // value now. |
2581 | llvm::Value *Vec = Builder.CreateLoad(Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2582 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2583 | |
2584 | llvm::Value *SrcVal = Src.getScalarVal(); |
2585 | |
2586 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2587 | unsigned NumSrcElts = VTy->getNumElements(); |
2588 | unsigned NumDstElts = |
2589 | cast<llvm::FixedVectorType>(Val: Vec->getType())->getNumElements(); |
2590 | if (NumDstElts == NumSrcElts) { |
2591 | // Use shuffle vector is the src and destination are the same number of |
2592 | // elements and restore the vector mask since it is on the side it will be |
2593 | // stored. |
2594 | SmallVector<int, 4> Mask(NumDstElts); |
2595 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2596 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i; |
2597 | |
2598 | Vec = Builder.CreateShuffleVector(V: SrcVal, Mask); |
2599 | } else if (NumDstElts > NumSrcElts) { |
2600 | // Extended the source vector to the same length and then shuffle it |
2601 | // into the destination. |
2602 | // FIXME: since we're shuffling with undef, can we just use the indices |
2603 | // into that? This could be simpler. |
2604 | SmallVector<int, 4> ExtMask; |
2605 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2606 | ExtMask.push_back(Elt: i); |
2607 | ExtMask.resize(N: NumDstElts, NV: -1); |
2608 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(V: SrcVal, Mask: ExtMask); |
2609 | // build identity |
2610 | SmallVector<int, 4> Mask; |
2611 | for (unsigned i = 0; i != NumDstElts; ++i) |
2612 | Mask.push_back(Elt: i); |
2613 | |
2614 | // When the vector size is odd and .odd or .hi is used, the last element |
2615 | // of the Elts constant array will be one past the size of the vector. |
2616 | // Ignore the last element here, if it is greater than the mask size. |
2617 | if (getAccessedFieldNo(Idx: NumSrcElts - 1, Elts) == Mask.size()) |
2618 | NumSrcElts--; |
2619 | |
2620 | // modify when what gets shuffled in |
2621 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2622 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i + NumDstElts; |
2623 | Vec = Builder.CreateShuffleVector(V1: Vec, V2: ExtSrcVal, Mask); |
2624 | } else { |
2625 | // We should never shorten the vector |
2626 | llvm_unreachable("unexpected shorten vector length" ); |
2627 | } |
2628 | } else { |
2629 | // If the Src is a scalar (not a vector), and the target is a vector it must |
2630 | // be updating one element. |
2631 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2632 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2633 | Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Elt); |
2634 | } |
2635 | |
2636 | Builder.CreateStore(Val: Vec, Addr: Dst.getExtVectorAddress(), |
2637 | IsVolatile: Dst.isVolatileQualified()); |
2638 | } |
2639 | |
2640 | /// Store of global named registers are always calls to intrinsics. |
2641 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2642 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2643 | "Bad type for register variable" ); |
2644 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2645 | Val: cast<llvm::MetadataAsValue>(Val: Dst.getGlobalReg())->getMetadata()); |
2646 | assert(RegName && "Register LValue is not metadata" ); |
2647 | |
2648 | // We accept integer and pointer types only |
2649 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: Dst.getType()); |
2650 | llvm::Type *Ty = OrigTy; |
2651 | if (OrigTy->isPointerTy()) |
2652 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2653 | llvm::Type *Types[] = { Ty }; |
2654 | |
2655 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types); |
2656 | llvm::Value *Value = Src.getScalarVal(); |
2657 | if (OrigTy->isPointerTy()) |
2658 | Value = Builder.CreatePtrToInt(V: Value, DestTy: Ty); |
2659 | Builder.CreateCall( |
2660 | Callee: F, Args: {llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName), Value}); |
2661 | } |
2662 | |
2663 | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2664 | // generating write-barries API. It is currently a global, ivar, |
2665 | // or neither. |
2666 | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2667 | LValue &LV, |
2668 | bool IsMemberAccess=false) { |
2669 | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2670 | return; |
2671 | |
2672 | if (isa<ObjCIvarRefExpr>(Val: E)) { |
2673 | QualType ExpTy = E->getType(); |
2674 | if (IsMemberAccess && ExpTy->isPointerType()) { |
2675 | // If ivar is a structure pointer, assigning to field of |
2676 | // this struct follows gcc's behavior and makes it a non-ivar |
2677 | // writer-barrier conservatively. |
2678 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2679 | if (ExpTy->isRecordType()) { |
2680 | LV.setObjCIvar(false); |
2681 | return; |
2682 | } |
2683 | } |
2684 | LV.setObjCIvar(true); |
2685 | auto *Exp = cast<ObjCIvarRefExpr>(Val: const_cast<Expr *>(E)); |
2686 | LV.setBaseIvarExp(Exp->getBase()); |
2687 | LV.setObjCArray(E->getType()->isArrayType()); |
2688 | return; |
2689 | } |
2690 | |
2691 | if (const auto *Exp = dyn_cast<DeclRefExpr>(Val: E)) { |
2692 | if (const auto *VD = dyn_cast<VarDecl>(Val: Exp->getDecl())) { |
2693 | if (VD->hasGlobalStorage()) { |
2694 | LV.setGlobalObjCRef(true); |
2695 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2696 | } |
2697 | } |
2698 | LV.setObjCArray(E->getType()->isArrayType()); |
2699 | return; |
2700 | } |
2701 | |
2702 | if (const auto *Exp = dyn_cast<UnaryOperator>(Val: E)) { |
2703 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2704 | return; |
2705 | } |
2706 | |
2707 | if (const auto *Exp = dyn_cast<ParenExpr>(Val: E)) { |
2708 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2709 | if (LV.isObjCIvar()) { |
2710 | // If cast is to a structure pointer, follow gcc's behavior and make it |
2711 | // a non-ivar write-barrier. |
2712 | QualType ExpTy = E->getType(); |
2713 | if (ExpTy->isPointerType()) |
2714 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2715 | if (ExpTy->isRecordType()) |
2716 | LV.setObjCIvar(false); |
2717 | } |
2718 | return; |
2719 | } |
2720 | |
2721 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(Val: E)) { |
2722 | setObjCGCLValueClass(Ctx, E: Exp->getResultExpr(), LV); |
2723 | return; |
2724 | } |
2725 | |
2726 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(Val: E)) { |
2727 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2728 | return; |
2729 | } |
2730 | |
2731 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(Val: E)) { |
2732 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2733 | return; |
2734 | } |
2735 | |
2736 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(Val: E)) { |
2737 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2738 | return; |
2739 | } |
2740 | |
2741 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
2742 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV); |
2743 | if (LV.isObjCIvar() && !LV.isObjCArray()) |
2744 | // Using array syntax to assigning to what an ivar points to is not |
2745 | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2746 | LV.setObjCIvar(false); |
2747 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) |
2748 | // Using array syntax to assigning to what global points to is not |
2749 | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2750 | LV.setGlobalObjCRef(false); |
2751 | return; |
2752 | } |
2753 | |
2754 | if (const auto *Exp = dyn_cast<MemberExpr>(Val: E)) { |
2755 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV, IsMemberAccess: true); |
2756 | // We don't know if member is an 'ivar', but this flag is looked at |
2757 | // only in the context of LV.isObjCIvar(). |
2758 | LV.setObjCArray(E->getType()->isArrayType()); |
2759 | return; |
2760 | } |
2761 | } |
2762 | |
2763 | static LValue EmitThreadPrivateVarDeclLValue( |
2764 | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2765 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2766 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2767 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2768 | CGF, VD, VDAddr: Addr, Loc); |
2769 | else |
2770 | Addr = |
2771 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, VDAddr: Addr, Loc); |
2772 | |
2773 | Addr = Addr.withElementType(ElemTy: RealVarTy); |
2774 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2775 | } |
2776 | |
2777 | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2778 | const VarDecl *VD, QualType T) { |
2779 | std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2780 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2781 | // Return an invalid address if variable is MT_To (or MT_Enter starting with |
2782 | // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link |
2783 | // and MT_To (or MT_Enter) with unified memory, return a valid address. |
2784 | if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2785 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2786 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2787 | return Address::invalid(); |
2788 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2789 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2790 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2791 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2792 | "Expected link clause OR to clause with unified memory enabled." ); |
2793 | QualType PtrTy = CGF.getContext().getPointerType(T: VD->getType()); |
2794 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2795 | return CGF.EmitLoadOfPointer(Ptr: Addr, PtrTy: PtrTy->castAs<PointerType>()); |
2796 | } |
2797 | |
2798 | Address |
2799 | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2800 | LValueBaseInfo *PointeeBaseInfo, |
2801 | TBAAAccessInfo *PointeeTBAAInfo) { |
2802 | llvm::LoadInst *Load = |
2803 | Builder.CreateLoad(Addr: RefLVal.getAddress(), IsVolatile: RefLVal.isVolatile()); |
2804 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo: RefLVal.getTBAAInfo()); |
2805 | return makeNaturalAddressForPointer(Ptr: Load, T: RefLVal.getType()->getPointeeType(), |
2806 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2807 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2808 | } |
2809 | |
2810 | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2811 | LValueBaseInfo PointeeBaseInfo; |
2812 | TBAAAccessInfo PointeeTBAAInfo; |
2813 | Address PointeeAddr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &PointeeBaseInfo, |
2814 | PointeeTBAAInfo: &PointeeTBAAInfo); |
2815 | return MakeAddrLValue(Addr: PointeeAddr, T: RefLVal.getType()->getPointeeType(), |
2816 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2817 | } |
2818 | |
2819 | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2820 | const PointerType *PtrTy, |
2821 | LValueBaseInfo *BaseInfo, |
2822 | TBAAAccessInfo *TBAAInfo) { |
2823 | llvm::Value *Addr = Builder.CreateLoad(Addr: Ptr); |
2824 | return makeNaturalAddressForPointer(Ptr: Addr, T: PtrTy->getPointeeType(), |
2825 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2826 | BaseInfo, TBAAInfo); |
2827 | } |
2828 | |
2829 | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2830 | const PointerType *PtrTy) { |
2831 | LValueBaseInfo BaseInfo; |
2832 | TBAAAccessInfo TBAAInfo; |
2833 | Address Addr = EmitLoadOfPointer(Ptr: PtrAddr, PtrTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
2834 | return MakeAddrLValue(Addr, T: PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2835 | } |
2836 | |
2837 | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2838 | const Expr *E, const VarDecl *VD) { |
2839 | QualType T = E->getType(); |
2840 | |
2841 | // If it's thread_local, emit a call to its wrapper function instead. |
2842 | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2843 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2844 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, LValType: T); |
2845 | // Check if the variable is marked as declare target with link clause in |
2846 | // device codegen. |
2847 | if (CGF.getLangOpts().OpenMPIsTargetDevice) { |
2848 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2849 | if (Addr.isValid()) |
2850 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2851 | } |
2852 | |
2853 | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(D: VD); |
2854 | |
2855 | if (VD->getTLSKind() != VarDecl::TLS_None) |
2856 | V = CGF.Builder.CreateThreadLocalAddress(Ptr: V); |
2857 | |
2858 | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(T: VD->getType()); |
2859 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: VD); |
2860 | Address Addr(V, RealVarTy, Alignment); |
2861 | // Emit reference to the private copy of the variable if it is an OpenMP |
2862 | // threadprivate variable. |
2863 | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && |
2864 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2865 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
2866 | Loc: E->getExprLoc()); |
2867 | } |
2868 | LValue LV = VD->getType()->isReferenceType() ? |
2869 | CGF.EmitLoadOfReferenceLValue(RefAddr: Addr, RefTy: VD->getType(), |
2870 | Source: AlignmentSource::Decl) : |
2871 | CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2872 | setObjCGCLValueClass(Ctx: CGF.getContext(), E, LV); |
2873 | return LV; |
2874 | } |
2875 | |
2876 | llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD, |
2877 | llvm::Type *Ty) { |
2878 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2879 | if (FD->hasAttr<WeakRefAttr>()) { |
2880 | ConstantAddress aliasee = GetWeakRefReference(VD: FD); |
2881 | return aliasee.getPointer(); |
2882 | } |
2883 | |
2884 | llvm::Constant *V = GetAddrOfFunction(GD, Ty); |
2885 | return V; |
2886 | } |
2887 | |
2888 | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
2889 | GlobalDecl GD) { |
2890 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2891 | llvm::Constant *V = CGF.CGM.getFunctionPointer(GD); |
2892 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: FD); |
2893 | return CGF.MakeAddrLValue(V, T: E->getType(), Alignment, |
2894 | Source: AlignmentSource::Decl); |
2895 | } |
2896 | |
2897 | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
2898 | llvm::Value *ThisValue) { |
2899 | |
2900 | return CGF.EmitLValueForLambdaField(Field: FD, ThisValue); |
2901 | } |
2902 | |
2903 | /// Named Registers are named metadata pointing to the register name |
2904 | /// which will be read from/written to as an argument to the intrinsic |
2905 | /// @llvm.read/write_register. |
2906 | /// So far, only the name is being passed down, but other options such as |
2907 | /// register type, allocation type or even optimization options could be |
2908 | /// passed down via the metadata node. |
2909 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
2910 | SmallString<64> Name("llvm.named.register." ); |
2911 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
2912 | assert(Asm->getLabel().size() < 64-Name.size() && |
2913 | "Register name too big" ); |
2914 | Name.append(RHS: Asm->getLabel()); |
2915 | llvm::NamedMDNode *M = |
2916 | CGM.getModule().getOrInsertNamedMetadata(Name); |
2917 | if (M->getNumOperands() == 0) { |
2918 | llvm::MDString *Str = llvm::MDString::get(Context&: CGM.getLLVMContext(), |
2919 | Str: Asm->getLabel()); |
2920 | llvm::Metadata *Ops[] = {Str}; |
2921 | M->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
2922 | } |
2923 | |
2924 | CharUnits Alignment = CGM.getContext().getDeclAlign(D: VD); |
2925 | |
2926 | llvm::Value *Ptr = |
2927 | llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD: M->getOperand(i: 0)); |
2928 | return LValue::MakeGlobalReg(V: Ptr, alignment: Alignment, type: VD->getType()); |
2929 | } |
2930 | |
2931 | /// Determine whether we can emit a reference to \p VD from the current |
2932 | /// context, despite not necessarily having seen an odr-use of the variable in |
2933 | /// this context. |
2934 | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
2935 | const DeclRefExpr *E, |
2936 | const VarDecl *VD) { |
2937 | // For a variable declared in an enclosing scope, do not emit a spurious |
2938 | // reference even if we have a capture, as that will emit an unwarranted |
2939 | // reference to our capture state, and will likely generate worse code than |
2940 | // emitting a local copy. |
2941 | if (E->refersToEnclosingVariableOrCapture()) |
2942 | return false; |
2943 | |
2944 | // For a local declaration declared in this function, we can always reference |
2945 | // it even if we don't have an odr-use. |
2946 | if (VD->hasLocalStorage()) { |
2947 | return VD->getDeclContext() == |
2948 | dyn_cast_or_null<DeclContext>(Val: CGF.CurCodeDecl); |
2949 | } |
2950 | |
2951 | // For a global declaration, we can emit a reference to it if we know |
2952 | // for sure that we are able to emit a definition of it. |
2953 | VD = VD->getDefinition(C&: CGF.getContext()); |
2954 | if (!VD) |
2955 | return false; |
2956 | |
2957 | // Don't emit a spurious reference if it might be to a variable that only |
2958 | // exists on a different device / target. |
2959 | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
2960 | // cross-target reference. |
2961 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || |
2962 | CGF.getLangOpts().OpenCL) { |
2963 | return false; |
2964 | } |
2965 | |
2966 | // We can emit a spurious reference only if the linkage implies that we'll |
2967 | // be emitting a non-interposable symbol that will be retained until link |
2968 | // time. |
2969 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { |
2970 | case llvm::GlobalValue::ExternalLinkage: |
2971 | case llvm::GlobalValue::LinkOnceODRLinkage: |
2972 | case llvm::GlobalValue::WeakODRLinkage: |
2973 | case llvm::GlobalValue::InternalLinkage: |
2974 | case llvm::GlobalValue::PrivateLinkage: |
2975 | return true; |
2976 | default: |
2977 | return false; |
2978 | } |
2979 | } |
2980 | |
2981 | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
2982 | const NamedDecl *ND = E->getDecl(); |
2983 | QualType T = E->getType(); |
2984 | |
2985 | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
2986 | "should not emit an unevaluated operand" ); |
2987 | |
2988 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
2989 | // Global Named registers access via intrinsics only |
2990 | if (VD->getStorageClass() == SC_Register && |
2991 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) |
2992 | return EmitGlobalNamedRegister(VD, CGM); |
2993 | |
2994 | // If this DeclRefExpr does not constitute an odr-use of the variable, |
2995 | // we're not permitted to emit a reference to it in general, and it might |
2996 | // not be captured if capture would be necessary for a use. Emit the |
2997 | // constant value directly instead. |
2998 | if (E->isNonOdrUse() == NOUR_Constant && |
2999 | (VD->getType()->isReferenceType() || |
3000 | !canEmitSpuriousReferenceToVariable(CGF&: *this, E, VD))) { |
3001 | VD->getAnyInitializer(D&: VD); |
3002 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
3003 | loc: E->getLocation(), value: *VD->evaluateValue(), T: VD->getType()); |
3004 | assert(Val && "failed to emit constant expression" ); |
3005 | |
3006 | Address Addr = Address::invalid(); |
3007 | if (!VD->getType()->isReferenceType()) { |
3008 | // Spill the constant value to a global. |
3009 | Addr = CGM.createUnnamedGlobalFrom(D: *VD, Constant: Val, |
3010 | Align: getContext().getDeclAlign(D: VD)); |
3011 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3012 | auto *PTy = llvm::PointerType::get( |
3013 | ElementType: VarTy, AddressSpace: getTypes().getTargetAddressSpace(T: VD->getType())); |
3014 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty: PTy, ElementTy: VarTy); |
3015 | } else { |
3016 | // Should we be using the alignment of the constant pointer we emitted? |
3017 | CharUnits Alignment = |
3018 | CGM.getNaturalTypeAlignment(T: E->getType(), |
3019 | /* BaseInfo= */ nullptr, |
3020 | /* TBAAInfo= */ nullptr, |
3021 | /* forPointeeType= */ true); |
3022 | Addr = makeNaturalAddressForPointer(Ptr: Val, T, Alignment); |
3023 | } |
3024 | return MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
3025 | } |
3026 | |
3027 | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
3028 | |
3029 | // Check for captured variables. |
3030 | if (E->refersToEnclosingVariableOrCapture()) { |
3031 | VD = VD->getCanonicalDecl(); |
3032 | if (auto *FD = LambdaCaptureFields.lookup(Val: VD)) |
3033 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3034 | if (CapturedStmtInfo) { |
3035 | auto I = LocalDeclMap.find(Val: VD); |
3036 | if (I != LocalDeclMap.end()) { |
3037 | LValue CapLVal; |
3038 | if (VD->getType()->isReferenceType()) |
3039 | CapLVal = EmitLoadOfReferenceLValue(RefAddr: I->second, RefTy: VD->getType(), |
3040 | Source: AlignmentSource::Decl); |
3041 | else |
3042 | CapLVal = MakeAddrLValue(Addr: I->second, T); |
3043 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3044 | // in simd context. |
3045 | if (getLangOpts().OpenMP && |
3046 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3047 | CapLVal.setNontemporal(/*Value=*/true); |
3048 | return CapLVal; |
3049 | } |
3050 | LValue CapLVal = |
3051 | EmitCapturedFieldLValue(CGF&: *this, FD: CapturedStmtInfo->lookup(VD), |
3052 | ThisValue: CapturedStmtInfo->getContextValue()); |
3053 | Address LValueAddress = CapLVal.getAddress(); |
3054 | CapLVal = MakeAddrLValue(Addr: Address(LValueAddress.emitRawPointer(CGF&: *this), |
3055 | LValueAddress.getElementType(), |
3056 | getContext().getDeclAlign(D: VD)), |
3057 | T: CapLVal.getType(), |
3058 | BaseInfo: LValueBaseInfo(AlignmentSource::Decl), |
3059 | TBAAInfo: CapLVal.getTBAAInfo()); |
3060 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3061 | // in simd context. |
3062 | if (getLangOpts().OpenMP && |
3063 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3064 | CapLVal.setNontemporal(/*Value=*/true); |
3065 | return CapLVal; |
3066 | } |
3067 | |
3068 | assert(isa<BlockDecl>(CurCodeDecl)); |
3069 | Address addr = GetAddrOfBlockDecl(var: VD); |
3070 | return MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3071 | } |
3072 | } |
3073 | |
3074 | // FIXME: We should be able to assert this for FunctionDecls as well! |
3075 | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
3076 | // those with a valid source location. |
3077 | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
3078 | !E->getLocation().isValid()) && |
3079 | "Should not use decl without marking it used!" ); |
3080 | |
3081 | if (ND->hasAttr<WeakRefAttr>()) { |
3082 | const auto *VD = cast<ValueDecl>(Val: ND); |
3083 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); |
3084 | return MakeAddrLValue(Addr: Aliasee, T, Source: AlignmentSource::Decl); |
3085 | } |
3086 | |
3087 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
3088 | // Check if this is a global variable. |
3089 | if (VD->hasLinkage() || VD->isStaticDataMember()) |
3090 | return EmitGlobalVarDeclLValue(CGF&: *this, E, VD); |
3091 | |
3092 | Address addr = Address::invalid(); |
3093 | |
3094 | // The variable should generally be present in the local decl map. |
3095 | auto iter = LocalDeclMap.find(Val: VD); |
3096 | if (iter != LocalDeclMap.end()) { |
3097 | addr = iter->second; |
3098 | |
3099 | // Otherwise, it might be static local we haven't emitted yet for |
3100 | // some reason; most likely, because it's in an outer function. |
3101 | } else if (VD->isStaticLocal()) { |
3102 | llvm::Constant *var = CGM.getOrCreateStaticVarDecl( |
3103 | D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD)); |
3104 | addr = Address( |
3105 | var, ConvertTypeForMem(T: VD->getType()), getContext().getDeclAlign(D: VD)); |
3106 | |
3107 | // No other cases for now. |
3108 | } else { |
3109 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?" ); |
3110 | } |
3111 | |
3112 | // Handle threadlocal function locals. |
3113 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3114 | addr = addr.withPointer( |
3115 | NewPointer: Builder.CreateThreadLocalAddress(Ptr: addr.getBasePointer()), |
3116 | IsKnownNonNull: NotKnownNonNull); |
3117 | |
3118 | // Check for OpenMP threadprivate variables. |
3119 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
3120 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3121 | return EmitThreadPrivateVarDeclLValue( |
3122 | CGF&: *this, VD, T, Addr: addr, RealVarTy: getTypes().ConvertTypeForMem(T: VD->getType()), |
3123 | Loc: E->getExprLoc()); |
3124 | } |
3125 | |
3126 | // Drill into block byref variables. |
3127 | bool isBlockByref = VD->isEscapingByref(); |
3128 | if (isBlockByref) { |
3129 | addr = emitBlockByrefAddress(baseAddr: addr, V: VD); |
3130 | } |
3131 | |
3132 | // Drill into reference types. |
3133 | LValue LV = VD->getType()->isReferenceType() ? |
3134 | EmitLoadOfReferenceLValue(RefAddr: addr, RefTy: VD->getType(), Source: AlignmentSource::Decl) : |
3135 | MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3136 | |
3137 | bool isLocalStorage = VD->hasLocalStorage(); |
3138 | |
3139 | bool NonGCable = isLocalStorage && |
3140 | !VD->getType()->isReferenceType() && |
3141 | !isBlockByref; |
3142 | if (NonGCable) { |
3143 | LV.getQuals().removeObjCGCAttr(); |
3144 | LV.setNonGC(true); |
3145 | } |
3146 | |
3147 | bool isImpreciseLifetime = |
3148 | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); |
3149 | if (isImpreciseLifetime) |
3150 | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
3151 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
3152 | return LV; |
3153 | } |
3154 | |
3155 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
3156 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
3157 | |
3158 | // FIXME: While we're emitting a binding from an enclosing scope, all other |
3159 | // DeclRefExprs we see should be implicitly treated as if they also refer to |
3160 | // an enclosing scope. |
3161 | if (const auto *BD = dyn_cast<BindingDecl>(Val: ND)) { |
3162 | if (E->refersToEnclosingVariableOrCapture()) { |
3163 | auto *FD = LambdaCaptureFields.lookup(Val: BD); |
3164 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3165 | } |
3166 | return EmitLValue(E: BD->getBinding()); |
3167 | } |
3168 | |
3169 | // We can form DeclRefExprs naming GUID declarations when reconstituting |
3170 | // non-type template parameters into expressions. |
3171 | if (const auto *GD = dyn_cast<MSGuidDecl>(Val: ND)) |
3172 | return MakeAddrLValue(Addr: CGM.GetAddrOfMSGuidDecl(GD), T, |
3173 | Source: AlignmentSource::Decl); |
3174 | |
3175 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: ND)) { |
3176 | auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO); |
3177 | auto AS = getLangASFromTargetAS(TargetAS: ATPO.getAddressSpace()); |
3178 | |
3179 | if (AS != T.getAddressSpace()) { |
3180 | auto TargetAS = getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
3181 | auto PtrTy = ATPO.getElementType()->getPointerTo(AddrSpace: TargetAS); |
3182 | auto ASC = getTargetHooks().performAddrSpaceCast( |
3183 | CGM, V: ATPO.getPointer(), SrcAddr: AS, DestAddr: T.getAddressSpace(), DestTy: PtrTy); |
3184 | ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); |
3185 | } |
3186 | |
3187 | return MakeAddrLValue(Addr: ATPO, T, Source: AlignmentSource::Decl); |
3188 | } |
3189 | |
3190 | llvm_unreachable("Unhandled DeclRefExpr" ); |
3191 | } |
3192 | |
3193 | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
3194 | // __extension__ doesn't affect lvalue-ness. |
3195 | if (E->getOpcode() == UO_Extension) |
3196 | return EmitLValue(E: E->getSubExpr()); |
3197 | |
3198 | QualType ExprTy = getContext().getCanonicalType(T: E->getSubExpr()->getType()); |
3199 | switch (E->getOpcode()) { |
3200 | default: llvm_unreachable("Unknown unary operator lvalue!" ); |
3201 | case UO_Deref: { |
3202 | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
3203 | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type" ); |
3204 | |
3205 | LValueBaseInfo BaseInfo; |
3206 | TBAAAccessInfo TBAAInfo; |
3207 | Address Addr = EmitPointerWithAlignment(E: E->getSubExpr(), BaseInfo: &BaseInfo, |
3208 | TBAAInfo: &TBAAInfo); |
3209 | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
3210 | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
3211 | |
3212 | // We should not generate __weak write barrier on indirect reference |
3213 | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
3214 | // But, we continue to generate __strong write barrier on indirect write |
3215 | // into a pointer to object. |
3216 | if (getLangOpts().ObjC && |
3217 | getLangOpts().getGC() != LangOptions::NonGC && |
3218 | LV.isObjCWeak()) |
3219 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
3220 | return LV; |
3221 | } |
3222 | case UO_Real: |
3223 | case UO_Imag: { |
3224 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3225 | assert(LV.isSimple() && "real/imag on non-ordinary l-value" ); |
3226 | |
3227 | // __real is valid on scalars. This is a faster way of testing that. |
3228 | // __imag can only produce an rvalue on scalars. |
3229 | if (E->getOpcode() == UO_Real && |
3230 | !LV.getAddress().getElementType()->isStructTy()) { |
3231 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
3232 | return LV; |
3233 | } |
3234 | |
3235 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
3236 | |
3237 | Address Component = |
3238 | (E->getOpcode() == UO_Real |
3239 | ? emitAddrOfRealComponent(complex: LV.getAddress(), complexType: LV.getType()) |
3240 | : emitAddrOfImagComponent(complex: LV.getAddress(), complexType: LV.getType())); |
3241 | LValue ElemLV = MakeAddrLValue(Addr: Component, T, BaseInfo: LV.getBaseInfo(), |
3242 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: T)); |
3243 | ElemLV.getQuals().addQualifiers(Q: LV.getQuals()); |
3244 | return ElemLV; |
3245 | } |
3246 | case UO_PreInc: |
3247 | case UO_PreDec: { |
3248 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3249 | bool isInc = E->getOpcode() == UO_PreInc; |
3250 | |
3251 | if (E->getType()->isAnyComplexType()) |
3252 | EmitComplexPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3253 | else |
3254 | EmitScalarPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3255 | return LV; |
3256 | } |
3257 | } |
3258 | } |
3259 | |
3260 | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
3261 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromLiteral(S: E), |
3262 | T: E->getType(), Source: AlignmentSource::Decl); |
3263 | } |
3264 | |
3265 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
3266 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromObjCEncode(E), |
3267 | T: E->getType(), Source: AlignmentSource::Decl); |
3268 | } |
3269 | |
3270 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
3271 | auto SL = E->getFunctionName(); |
3272 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr" ); |
3273 | StringRef FnName = CurFn->getName(); |
3274 | if (FnName.starts_with(Prefix: "\01" )) |
3275 | FnName = FnName.substr(Start: 1); |
3276 | StringRef NameItems[] = { |
3277 | PredefinedExpr::getIdentKindName(IK: E->getIdentKind()), FnName}; |
3278 | std::string GVName = llvm::join(Begin: NameItems, End: NameItems + 2, Separator: "." ); |
3279 | if (auto *BD = dyn_cast_or_null<BlockDecl>(Val: CurCodeDecl)) { |
3280 | std::string Name = std::string(SL->getString()); |
3281 | if (!Name.empty()) { |
3282 | unsigned Discriminator = |
3283 | CGM.getCXXABI().getMangleContext().getBlockId(BD, Local: true); |
3284 | if (Discriminator) |
3285 | Name += "_" + Twine(Discriminator + 1).str(); |
3286 | auto C = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: GVName.c_str()); |
3287 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3288 | } else { |
3289 | auto C = |
3290 | CGM.GetAddrOfConstantCString(Str: std::string(FnName), GlobalName: GVName.c_str()); |
3291 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3292 | } |
3293 | } |
3294 | auto C = CGM.GetAddrOfConstantStringFromLiteral(S: SL, Name: GVName); |
3295 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3296 | } |
3297 | |
3298 | /// Emit a type description suitable for use by a runtime sanitizer library. The |
3299 | /// format of a type descriptor is |
3300 | /// |
3301 | /// \code |
3302 | /// { i16 TypeKind, i16 TypeInfo } |
3303 | /// \endcode |
3304 | /// |
3305 | /// followed by an array of i8 containing the type name. TypeKind is 0 for an |
3306 | /// integer, 1 for a floating point value, and -1 for anything else. |
3307 | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
3308 | // Only emit each type's descriptor once. |
3309 | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(Ty: T)) |
3310 | return C; |
3311 | |
3312 | uint16_t TypeKind = -1; |
3313 | uint16_t TypeInfo = 0; |
3314 | |
3315 | if (T->isIntegerType()) { |
3316 | TypeKind = 0; |
3317 | TypeInfo = (llvm::Log2_32(Value: getContext().getTypeSize(T)) << 1) | |
3318 | (T->isSignedIntegerType() ? 1 : 0); |
3319 | } else if (T->isFloatingType()) { |
3320 | TypeKind = 1; |
3321 | TypeInfo = getContext().getTypeSize(T); |
3322 | } |
3323 | |
3324 | // Format the type name as if for a diagnostic, including quotes and |
3325 | // optionally an 'aka'. |
3326 | SmallString<32> Buffer; |
3327 | CGM.getDiags().ConvertArgToString( |
3328 | Kind: DiagnosticsEngine::ak_qualtype, Val: (intptr_t)T.getAsOpaquePtr(), Modifier: StringRef(), |
3329 | Argument: StringRef(), PrevArgs: std::nullopt, Output&: Buffer, QualTypeVals: std::nullopt); |
3330 | |
3331 | llvm::Constant *Components[] = { |
3332 | Builder.getInt16(C: TypeKind), Builder.getInt16(C: TypeInfo), |
3333 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Buffer) |
3334 | }; |
3335 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(V: Components); |
3336 | |
3337 | auto *GV = new llvm::GlobalVariable( |
3338 | CGM.getModule(), Descriptor->getType(), |
3339 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3340 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3341 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3342 | |
3343 | // Remember the descriptor for this type. |
3344 | CGM.setTypeDescriptorInMap(Ty: T, C: GV); |
3345 | |
3346 | return GV; |
3347 | } |
3348 | |
3349 | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3350 | llvm::Type *TargetTy = IntPtrTy; |
3351 | |
3352 | if (V->getType() == TargetTy) |
3353 | return V; |
3354 | |
3355 | // Floating-point types which fit into intptr_t are bitcast to integers |
3356 | // and then passed directly (after zero-extension, if necessary). |
3357 | if (V->getType()->isFloatingPointTy()) { |
3358 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); |
3359 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3360 | V = Builder.CreateBitCast(V, DestTy: llvm::Type::getIntNTy(C&: getLLVMContext(), |
3361 | N: Bits)); |
3362 | } |
3363 | |
3364 | // Integers which fit in intptr_t are zero-extended and passed directly. |
3365 | if (V->getType()->isIntegerTy() && |
3366 | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3367 | return Builder.CreateZExt(V, DestTy: TargetTy); |
3368 | |
3369 | // Pointers are passed directly, everything else is passed by address. |
3370 | if (!V->getType()->isPointerTy()) { |
3371 | RawAddress Ptr = CreateDefaultAlignTempAlloca(Ty: V->getType()); |
3372 | Builder.CreateStore(Val: V, Addr: Ptr); |
3373 | V = Ptr.getPointer(); |
3374 | } |
3375 | return Builder.CreatePtrToInt(V, DestTy: TargetTy); |
3376 | } |
3377 | |
3378 | /// Emit a representation of a SourceLocation for passing to a handler |
3379 | /// in a sanitizer runtime library. The format for this data is: |
3380 | /// \code |
3381 | /// struct SourceLocation { |
3382 | /// const char *Filename; |
3383 | /// int32_t Line, Column; |
3384 | /// }; |
3385 | /// \endcode |
3386 | /// For an invalid SourceLocation, the Filename pointer is null. |
3387 | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3388 | llvm::Constant *Filename; |
3389 | int Line, Column; |
3390 | |
3391 | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3392 | if (PLoc.isValid()) { |
3393 | StringRef FilenameString = PLoc.getFilename(); |
3394 | |
3395 | int PathComponentsToStrip = |
3396 | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3397 | if (PathComponentsToStrip < 0) { |
3398 | assert(PathComponentsToStrip != INT_MIN); |
3399 | int PathComponentsToKeep = -PathComponentsToStrip; |
3400 | auto I = llvm::sys::path::rbegin(path: FilenameString); |
3401 | auto E = llvm::sys::path::rend(path: FilenameString); |
3402 | while (I != E && --PathComponentsToKeep) |
3403 | ++I; |
3404 | |
3405 | FilenameString = FilenameString.substr(Start: I - E); |
3406 | } else if (PathComponentsToStrip > 0) { |
3407 | auto I = llvm::sys::path::begin(path: FilenameString); |
3408 | auto E = llvm::sys::path::end(path: FilenameString); |
3409 | while (I != E && PathComponentsToStrip--) |
3410 | ++I; |
3411 | |
3412 | if (I != E) |
3413 | FilenameString = |
3414 | FilenameString.substr(Start: I - llvm::sys::path::begin(path: FilenameString)); |
3415 | else |
3416 | FilenameString = llvm::sys::path::filename(path: FilenameString); |
3417 | } |
3418 | |
3419 | auto FilenameGV = |
3420 | CGM.GetAddrOfConstantCString(Str: std::string(FilenameString), GlobalName: ".src" ); |
3421 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3422 | GV: cast<llvm::GlobalVariable>( |
3423 | Val: FilenameGV.getPointer()->stripPointerCasts())); |
3424 | Filename = FilenameGV.getPointer(); |
3425 | Line = PLoc.getLine(); |
3426 | Column = PLoc.getColumn(); |
3427 | } else { |
3428 | Filename = llvm::Constant::getNullValue(Ty: Int8PtrTy); |
3429 | Line = Column = 0; |
3430 | } |
3431 | |
3432 | llvm::Constant *Data[] = {Filename, Builder.getInt32(C: Line), |
3433 | Builder.getInt32(C: Column)}; |
3434 | |
3435 | return llvm::ConstantStruct::getAnon(V: Data); |
3436 | } |
3437 | |
3438 | namespace { |
3439 | /// Specify under what conditions this check can be recovered |
3440 | enum class CheckRecoverableKind { |
3441 | /// Always terminate program execution if this check fails. |
3442 | Unrecoverable, |
3443 | /// Check supports recovering, runtime has both fatal (noreturn) and |
3444 | /// non-fatal handlers for this check. |
3445 | Recoverable, |
3446 | /// Runtime conditionally aborts, always need to support recovery. |
3447 | AlwaysRecoverable |
3448 | }; |
3449 | } |
3450 | |
3451 | static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { |
3452 | assert(Kind.countPopulation() == 1); |
3453 | if (Kind == SanitizerKind::Vptr) |
3454 | return CheckRecoverableKind::AlwaysRecoverable; |
3455 | else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) |
3456 | return CheckRecoverableKind::Unrecoverable; |
3457 | else |
3458 | return CheckRecoverableKind::Recoverable; |
3459 | } |
3460 | |
3461 | namespace { |
3462 | struct SanitizerHandlerInfo { |
3463 | char const *const Name; |
3464 | unsigned Version; |
3465 | }; |
3466 | } |
3467 | |
3468 | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3469 | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3470 | LIST_SANITIZER_CHECKS |
3471 | #undef SANITIZER_CHECK |
3472 | }; |
3473 | |
3474 | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3475 | llvm::FunctionType *FnType, |
3476 | ArrayRef<llvm::Value *> FnArgs, |
3477 | SanitizerHandler CheckHandler, |
3478 | CheckRecoverableKind RecoverKind, bool IsFatal, |
3479 | llvm::BasicBlock *ContBB) { |
3480 | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3481 | std::optional<ApplyDebugLocation> DL; |
3482 | if (!CGF.Builder.getCurrentDebugLocation()) { |
3483 | // Ensure that the call has at least an artificial debug location. |
3484 | DL.emplace(args&: CGF, args: SourceLocation()); |
3485 | } |
3486 | bool NeedsAbortSuffix = |
3487 | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; |
3488 | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3489 | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3490 | const StringRef CheckName = CheckInfo.Name; |
3491 | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3492 | if (CheckInfo.Version && !MinimalRuntime) |
3493 | FnName += "_v" + llvm::utostr(X: CheckInfo.Version); |
3494 | if (MinimalRuntime) |
3495 | FnName += "_minimal" ; |
3496 | if (NeedsAbortSuffix) |
3497 | FnName += "_abort" ; |
3498 | bool MayReturn = |
3499 | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; |
3500 | |
3501 | llvm::AttrBuilder B(CGF.getLLVMContext()); |
3502 | if (!MayReturn) { |
3503 | B.addAttribute(Val: llvm::Attribute::NoReturn) |
3504 | .addAttribute(Val: llvm::Attribute::NoUnwind); |
3505 | } |
3506 | B.addUWTableAttr(Kind: llvm::UWTableKind::Default); |
3507 | |
3508 | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3509 | Ty: FnType, Name: FnName, |
3510 | ExtraAttrs: llvm::AttributeList::get(C&: CGF.getLLVMContext(), |
3511 | Index: llvm::AttributeList::FunctionIndex, B), |
3512 | /*Local=*/true); |
3513 | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(callee: Fn, args: FnArgs); |
3514 | if (!MayReturn) { |
3515 | HandlerCall->setDoesNotReturn(); |
3516 | CGF.Builder.CreateUnreachable(); |
3517 | } else { |
3518 | CGF.Builder.CreateBr(Dest: ContBB); |
3519 | } |
3520 | } |
3521 | |
3522 | void CodeGenFunction::EmitCheck( |
3523 | ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, |
3524 | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3525 | ArrayRef<llvm::Value *> DynamicArgs) { |
3526 | assert(IsSanitizerScope); |
3527 | assert(Checked.size() > 0); |
3528 | assert(CheckHandler >= 0 && |
3529 | size_t(CheckHandler) < std::size(SanitizerHandlers)); |
3530 | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3531 | |
3532 | llvm::Value *FatalCond = nullptr; |
3533 | llvm::Value *RecoverableCond = nullptr; |
3534 | llvm::Value *TrapCond = nullptr; |
3535 | for (int i = 0, n = Checked.size(); i < n; ++i) { |
3536 | llvm::Value *Check = Checked[i].first; |
3537 | // -fsanitize-trap= overrides -fsanitize-recover=. |
3538 | llvm::Value *&Cond = |
3539 | CGM.getCodeGenOpts().SanitizeTrap.has(K: Checked[i].second) |
3540 | ? TrapCond |
3541 | : CGM.getCodeGenOpts().SanitizeRecover.has(K: Checked[i].second) |
3542 | ? RecoverableCond |
3543 | : FatalCond; |
3544 | Cond = Cond ? Builder.CreateAnd(LHS: Cond, RHS: Check) : Check; |
3545 | } |
3546 | |
3547 | if (ClSanitizeGuardChecks) { |
3548 | llvm::Value *Allow = |
3549 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::allow_ubsan_check), |
3550 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: CheckHandler)); |
3551 | |
3552 | for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) { |
3553 | if (*Cond) |
3554 | *Cond = Builder.CreateOr(LHS: *Cond, RHS: Builder.CreateNot(V: Allow)); |
3555 | } |
3556 | } |
3557 | |
3558 | if (TrapCond) |
3559 | EmitTrapCheck(Checked: TrapCond, CheckHandlerID: CheckHandler); |
3560 | if (!FatalCond && !RecoverableCond) |
3561 | return; |
3562 | |
3563 | llvm::Value *JointCond; |
3564 | if (FatalCond && RecoverableCond) |
3565 | JointCond = Builder.CreateAnd(LHS: FatalCond, RHS: RecoverableCond); |
3566 | else |
3567 | JointCond = FatalCond ? FatalCond : RecoverableCond; |
3568 | assert(JointCond); |
3569 | |
3570 | CheckRecoverableKind RecoverKind = getRecoverableKind(Kind: Checked[0].second); |
3571 | assert(SanOpts.has(Checked[0].second)); |
3572 | #ifndef NDEBUG |
3573 | for (int i = 1, n = Checked.size(); i < n; ++i) { |
3574 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3575 | "All recoverable kinds in a single check must be same!" ); |
3576 | assert(SanOpts.has(Checked[i].second)); |
3577 | } |
3578 | #endif |
3579 | |
3580 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3581 | llvm::BasicBlock *Handlers = createBasicBlock(name: "handler." + CheckName); |
3582 | llvm::Instruction *Branch = Builder.CreateCondBr(Cond: JointCond, True: Cont, False: Handlers); |
3583 | // Give hint that we very much don't expect to execute the handler |
3584 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3585 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3586 | Branch->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3587 | EmitBlock(BB: Handlers); |
3588 | |
3589 | // Handler functions take an i8* pointing to the (handler-specific) static |
3590 | // information block, followed by a sequence of intptr_t arguments |
3591 | // representing operand values. |
3592 | SmallVector<llvm::Value *, 4> Args; |
3593 | SmallVector<llvm::Type *, 4> ArgTypes; |
3594 | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3595 | Args.reserve(N: DynamicArgs.size() + 1); |
3596 | ArgTypes.reserve(N: DynamicArgs.size() + 1); |
3597 | |
3598 | // Emit handler arguments and create handler function type. |
3599 | if (!StaticArgs.empty()) { |
3600 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3601 | auto *InfoPtr = new llvm::GlobalVariable( |
3602 | CGM.getModule(), Info->getType(), false, |
3603 | llvm::GlobalVariable::PrivateLinkage, Info, "" , nullptr, |
3604 | llvm::GlobalVariable::NotThreadLocal, |
3605 | CGM.getDataLayout().getDefaultGlobalsAddressSpace()); |
3606 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3607 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3608 | Args.push_back(Elt: InfoPtr); |
3609 | ArgTypes.push_back(Elt: Args.back()->getType()); |
3610 | } |
3611 | |
3612 | for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { |
3613 | Args.push_back(Elt: EmitCheckValue(V: DynamicArgs[i])); |
3614 | ArgTypes.push_back(Elt: IntPtrTy); |
3615 | } |
3616 | } |
3617 | |
3618 | llvm::FunctionType *FnType = |
3619 | llvm::FunctionType::get(Result: CGM.VoidTy, Params: ArgTypes, isVarArg: false); |
3620 | |
3621 | if (!FatalCond || !RecoverableCond) { |
3622 | // Simple case: we need to generate a single handler call, either |
3623 | // fatal, or non-fatal. |
3624 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, |
3625 | IsFatal: (FatalCond != nullptr), ContBB: Cont); |
3626 | } else { |
3627 | // Emit two handler calls: first one for set of unrecoverable checks, |
3628 | // another one for recoverable. |
3629 | llvm::BasicBlock *NonFatalHandlerBB = |
3630 | createBasicBlock(name: "non_fatal." + CheckName); |
3631 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock(name: "fatal." + CheckName); |
3632 | Builder.CreateCondBr(Cond: FatalCond, True: NonFatalHandlerBB, False: FatalHandlerBB); |
3633 | EmitBlock(BB: FatalHandlerBB); |
3634 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: true, |
3635 | ContBB: NonFatalHandlerBB); |
3636 | EmitBlock(BB: NonFatalHandlerBB); |
3637 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: false, |
3638 | ContBB: Cont); |
3639 | } |
3640 | |
3641 | EmitBlock(BB: Cont); |
3642 | } |
3643 | |
3644 | void CodeGenFunction::EmitCfiSlowPathCheck( |
3645 | SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, |
3646 | llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { |
3647 | llvm::BasicBlock *Cont = createBasicBlock(name: "cfi.cont" ); |
3648 | |
3649 | llvm::BasicBlock *CheckBB = createBasicBlock(name: "cfi.slowpath" ); |
3650 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, True: Cont, False: CheckBB); |
3651 | |
3652 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3653 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3654 | BI->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3655 | |
3656 | EmitBlock(BB: CheckBB); |
3657 | |
3658 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(K: Kind); |
3659 | |
3660 | llvm::CallInst *CheckCall; |
3661 | llvm::FunctionCallee SlowPathFn; |
3662 | if (WithDiag) { |
3663 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3664 | auto *InfoPtr = |
3665 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3666 | llvm::GlobalVariable::PrivateLinkage, Info); |
3667 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3668 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3669 | |
3670 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3671 | Name: "__cfi_slowpath_diag" , |
3672 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3673 | isVarArg: false)); |
3674 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr, InfoPtr}); |
3675 | } else { |
3676 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3677 | Name: "__cfi_slowpath" , |
3678 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy}, isVarArg: false)); |
3679 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr}); |
3680 | } |
3681 | |
3682 | CGM.setDSOLocal( |
3683 | cast<llvm::GlobalValue>(Val: SlowPathFn.getCallee()->stripPointerCasts())); |
3684 | CheckCall->setDoesNotThrow(); |
3685 | |
3686 | EmitBlock(BB: Cont); |
3687 | } |
3688 | |
3689 | // Emit a stub for __cfi_check function so that the linker knows about this |
3690 | // symbol in LTO mode. |
3691 | void CodeGenFunction::EmitCfiCheckStub() { |
3692 | llvm::Module *M = &CGM.getModule(); |
3693 | ASTContext &C = getContext(); |
3694 | QualType QInt64Ty = C.getIntTypeForBitwidth(DestWidth: 64, Signed: false); |
3695 | |
3696 | FunctionArgList FnArgs; |
3697 | ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other); |
3698 | ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other); |
3699 | ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy, |
3700 | ImplicitParamKind::Other); |
3701 | FnArgs.push_back(Elt: &ArgCallsiteTypeId); |
3702 | FnArgs.push_back(Elt: &ArgAddr); |
3703 | FnArgs.push_back(Elt: &ArgCFICheckFailData); |
3704 | const CGFunctionInfo &FI = |
3705 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: C.VoidTy, args: FnArgs); |
3706 | |
3707 | llvm::Function *F = llvm::Function::Create( |
3708 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3709 | Linkage: llvm::GlobalValue::WeakAnyLinkage, N: "__cfi_check" , M); |
3710 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3711 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3712 | F->setAlignment(llvm::Align(4096)); |
3713 | CGM.setDSOLocal(F); |
3714 | |
3715 | llvm::LLVMContext &Ctx = M->getContext(); |
3716 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Context&: Ctx, Name: "entry" , Parent: F); |
3717 | // CrossDSOCFI pass is not executed if there is no executable code. |
3718 | SmallVector<llvm::Value*> Args{F->getArg(i: 2), F->getArg(i: 1)}; |
3719 | llvm::CallInst::Create(Func: M->getFunction(Name: "__cfi_check_fail" ), Args, NameStr: "" , InsertBefore: BB); |
3720 | llvm::ReturnInst::Create(C&: Ctx, retVal: nullptr, InsertBefore: BB); |
3721 | } |
3722 | |
3723 | // This function is basically a switch over the CFI failure kind, which is |
3724 | // extracted from CFICheckFailData (1st function argument). Each case is either |
3725 | // llvm.trap or a call to one of the two runtime handlers, based on |
3726 | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3727 | // failure kind) traps, but this should really never happen. CFICheckFailData |
3728 | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3729 | // check kind; in this case __cfi_check_fail traps as well. |
3730 | void CodeGenFunction::EmitCfiCheckFail() { |
3731 | SanitizerScope SanScope(this); |
3732 | FunctionArgList Args; |
3733 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3734 | ImplicitParamKind::Other); |
3735 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3736 | ImplicitParamKind::Other); |
3737 | Args.push_back(Elt: &ArgData); |
3738 | Args.push_back(Elt: &ArgAddr); |
3739 | |
3740 | const CGFunctionInfo &FI = |
3741 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: getContext().VoidTy, args: Args); |
3742 | |
3743 | llvm::Function *F = llvm::Function::Create( |
3744 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3745 | Linkage: llvm::GlobalValue::WeakODRLinkage, N: "__cfi_check_fail" , M: &CGM.getModule()); |
3746 | |
3747 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3748 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3749 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3750 | |
3751 | StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: F, FnInfo: FI, Args, |
3752 | Loc: SourceLocation()); |
3753 | |
3754 | // This function is not affected by NoSanitizeList. This function does |
3755 | // not have a source location, but "src:*" would still apply. Revert any |
3756 | // changes to SanOpts made in StartFunction. |
3757 | SanOpts = CGM.getLangOpts().Sanitize; |
3758 | |
3759 | llvm::Value *Data = |
3760 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgData), /*Volatile=*/false, |
3761 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgData.getLocation()); |
3762 | llvm::Value *Addr = |
3763 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgAddr), /*Volatile=*/false, |
3764 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgAddr.getLocation()); |
3765 | |
3766 | // Data == nullptr means the calling module has trap behaviour for this check. |
3767 | llvm::Value *DataIsNotNullPtr = |
3768 | Builder.CreateICmpNE(LHS: Data, RHS: llvm::ConstantPointerNull::get(T: Int8PtrTy)); |
3769 | EmitTrapCheck(Checked: DataIsNotNullPtr, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3770 | |
3771 | llvm::StructType *SourceLocationTy = |
3772 | llvm::StructType::get(elt1: VoidPtrTy, elts: Int32Ty, elts: Int32Ty); |
3773 | llvm::StructType *CfiCheckFailDataTy = |
3774 | llvm::StructType::get(elt1: Int8Ty, elts: SourceLocationTy, elts: VoidPtrTy); |
3775 | |
3776 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3777 | Ty: CfiCheckFailDataTy, |
3778 | Ptr: Builder.CreatePointerCast(V: Data, DestTy: CfiCheckFailDataTy->getPointerTo(AddrSpace: 0)), Idx0: 0, |
3779 | Idx1: 0); |
3780 | |
3781 | Address CheckKindAddr(V, Int8Ty, getIntAlign()); |
3782 | llvm::Value *CheckKind = Builder.CreateLoad(Addr: CheckKindAddr); |
3783 | |
3784 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3785 | Context&: CGM.getLLVMContext(), |
3786 | MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables" )); |
3787 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3788 | V: Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), |
3789 | Args: {Addr, AllVtables}), |
3790 | DestTy: IntPtrTy); |
3791 | |
3792 | const std::pair<int, SanitizerMask> CheckKinds[] = { |
3793 | {CFITCK_VCall, SanitizerKind::CFIVCall}, |
3794 | {CFITCK_NVCall, SanitizerKind::CFINVCall}, |
3795 | {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, |
3796 | {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, |
3797 | {CFITCK_ICall, SanitizerKind::CFIICall}}; |
3798 | |
3799 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; |
3800 | for (auto CheckKindMaskPair : CheckKinds) { |
3801 | int Kind = CheckKindMaskPair.first; |
3802 | SanitizerMask Mask = CheckKindMaskPair.second; |
3803 | llvm::Value *Cond = |
3804 | Builder.CreateICmpNE(LHS: CheckKind, RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: Kind)); |
3805 | if (CGM.getLangOpts().Sanitize.has(K: Mask)) |
3806 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: Mask), CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: {}, |
3807 | DynamicArgs: {Data, Addr, ValidVtable}); |
3808 | else |
3809 | EmitTrapCheck(Checked: Cond, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3810 | } |
3811 | |
3812 | FinishFunction(); |
3813 | // The only reference to this function will be created during LTO link. |
3814 | // Make sure it survives until then. |
3815 | CGM.addUsedGlobal(GV: F); |
3816 | } |
3817 | |
3818 | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
3819 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
3820 | SanitizerScope SanScope(this); |
3821 | EmitCheck(Checked: std::make_pair(x: static_cast<llvm::Value *>(Builder.getFalse()), |
3822 | y: SanitizerKind::Unreachable), |
3823 | CheckHandler: SanitizerHandler::BuiltinUnreachable, |
3824 | StaticArgs: EmitCheckSourceLocation(Loc), DynamicArgs: std::nullopt); |
3825 | } |
3826 | Builder.CreateUnreachable(); |
3827 | } |
3828 | |
3829 | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
3830 | SanitizerHandler CheckHandlerID) { |
3831 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3832 | |
3833 | // If we're optimizing, collapse all calls to trap down to just one per |
3834 | // check-type per function to save on code size. |
3835 | if ((int)TrapBBs.size() <= CheckHandlerID) |
3836 | TrapBBs.resize(N: CheckHandlerID + 1); |
3837 | |
3838 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
3839 | |
3840 | if (!ClSanitizeDebugDeoptimization && |
3841 | CGM.getCodeGenOpts().OptimizationLevel && TrapBB && |
3842 | (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) { |
3843 | auto Call = TrapBB->begin(); |
3844 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB" ); |
3845 | |
3846 | Call->applyMergedLocation(LocA: Call->getDebugLoc(), |
3847 | LocB: Builder.getCurrentDebugLocation()); |
3848 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3849 | } else { |
3850 | TrapBB = createBasicBlock(name: "trap" ); |
3851 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3852 | EmitBlock(BB: TrapBB); |
3853 | |
3854 | llvm::CallInst *TrapCall = Builder.CreateCall( |
3855 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::ubsantrap), |
3856 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, |
3857 | V: ClSanitizeDebugDeoptimization |
3858 | ? TrapBB->getParent()->size() |
3859 | : static_cast<uint64_t>(CheckHandlerID))); |
3860 | |
3861 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3862 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3863 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3864 | TrapCall->addFnAttr(Attr: A); |
3865 | } |
3866 | TrapCall->setDoesNotReturn(); |
3867 | TrapCall->setDoesNotThrow(); |
3868 | Builder.CreateUnreachable(); |
3869 | } |
3870 | |
3871 | EmitBlock(BB: Cont); |
3872 | } |
3873 | |
3874 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
3875 | llvm::CallInst *TrapCall = |
3876 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntrID)); |
3877 | |
3878 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3879 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3880 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3881 | TrapCall->addFnAttr(Attr: A); |
3882 | } |
3883 | |
3884 | return TrapCall; |
3885 | } |
3886 | |
3887 | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
3888 | LValueBaseInfo *BaseInfo, |
3889 | TBAAAccessInfo *TBAAInfo) { |
3890 | assert(E->getType()->isArrayType() && |
3891 | "Array to pointer decay must have array source type!" ); |
3892 | |
3893 | // Expressions of array type can't be bitfields or vector elements. |
3894 | LValue LV = EmitLValue(E); |
3895 | Address Addr = LV.getAddress(); |
3896 | |
3897 | // If the array type was an incomplete type, we need to make sure |
3898 | // the decay ends up being the right type. |
3899 | llvm::Type *NewTy = ConvertType(T: E->getType()); |
3900 | Addr = Addr.withElementType(ElemTy: NewTy); |
3901 | |
3902 | // Note that VLA pointers are always decayed, so we don't need to do |
3903 | // anything here. |
3904 | if (!E->getType()->isVariableArrayType()) { |
3905 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3906 | "Expected pointer to array" ); |
3907 | Addr = Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
3908 | } |
3909 | |
3910 | // The result of this decay conversion points to an array element within the |
3911 | // base lvalue. However, since TBAA currently does not support representing |
3912 | // accesses to elements of member arrays, we conservatively represent accesses |
3913 | // to the pointee object as if it had no any base lvalue specified. |
3914 | // TODO: Support TBAA for member arrays. |
3915 | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
3916 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
3917 | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(AccessType: EltType); |
3918 | |
3919 | return Addr.withElementType(ElemTy: ConvertTypeForMem(T: EltType)); |
3920 | } |
3921 | |
3922 | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
3923 | /// array to pointer, return the array subexpression. |
3924 | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
3925 | // If this isn't just an array->pointer decay, bail out. |
3926 | const auto *CE = dyn_cast<CastExpr>(Val: E); |
3927 | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) |
3928 | return nullptr; |
3929 | |
3930 | // If this is a decay from variable width array, bail out. |
3931 | const Expr *SubExpr = CE->getSubExpr(); |
3932 | if (SubExpr->getType()->isVariableArrayType()) |
3933 | return nullptr; |
3934 | |
3935 | return SubExpr; |
3936 | } |
3937 | |
3938 | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
3939 | llvm::Type *elemType, |
3940 | llvm::Value *ptr, |
3941 | ArrayRef<llvm::Value*> indices, |
3942 | bool inbounds, |
3943 | bool signedIndices, |
3944 | SourceLocation loc, |
3945 | const llvm::Twine &name = "arrayidx" ) { |
3946 | if (inbounds) { |
3947 | return CGF.EmitCheckedInBoundsGEP(ElemTy: elemType, Ptr: ptr, IdxList: indices, SignedIndices: signedIndices, |
3948 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3949 | Name: name); |
3950 | } else { |
3951 | return CGF.Builder.CreateGEP(Ty: elemType, Ptr: ptr, IdxList: indices, Name: name); |
3952 | } |
3953 | } |
3954 | |
3955 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
3956 | ArrayRef<llvm::Value *> indices, |
3957 | llvm::Type *elementType, bool inbounds, |
3958 | bool signedIndices, SourceLocation loc, |
3959 | CharUnits align, |
3960 | const llvm::Twine &name = "arrayidx" ) { |
3961 | if (inbounds) { |
3962 | return CGF.EmitCheckedInBoundsGEP(Addr: addr, IdxList: indices, elementType, SignedIndices: signedIndices, |
3963 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3964 | Align: align, Name: name); |
3965 | } else { |
3966 | return CGF.Builder.CreateGEP(Addr: addr, IdxList: indices, ElementType: elementType, Align: align, Name: name); |
3967 | } |
3968 | } |
3969 | |
3970 | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
3971 | llvm::Value *idx, |
3972 | CharUnits eltSize) { |
3973 | // If we have a constant index, we can use the exact offset of the |
3974 | // element we're accessing. |
3975 | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(Val: idx)) { |
3976 | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
3977 | return arrayAlign.alignmentAtOffset(offset); |
3978 | |
3979 | // Otherwise, use the worst-case alignment for any element. |
3980 | } else { |
3981 | return arrayAlign.alignmentOfArrayElement(elementSize: eltSize); |
3982 | } |
3983 | } |
3984 | |
3985 | static QualType getFixedSizeElementType(const ASTContext &ctx, |
3986 | const VariableArrayType *vla) { |
3987 | QualType eltType; |
3988 | do { |
3989 | eltType = vla->getElementType(); |
3990 | } while ((vla = ctx.getAsVariableArrayType(T: eltType))); |
3991 | return eltType; |
3992 | } |
3993 | |
3994 | static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { |
3995 | return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); |
3996 | } |
3997 | |
3998 | static bool hasBPFPreserveStaticOffset(const Expr *E) { |
3999 | if (!E) |
4000 | return false; |
4001 | QualType PointeeType = E->getType()->getPointeeType(); |
4002 | if (PointeeType.isNull()) |
4003 | return false; |
4004 | if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) |
4005 | return hasBPFPreserveStaticOffset(D: BaseDecl); |
4006 | return false; |
4007 | } |
4008 | |
4009 | // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. |
4010 | static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, |
4011 | Address &Addr) { |
4012 | if (!CGF.getTarget().getTriple().isBPF()) |
4013 | return Addr; |
4014 | |
4015 | llvm::Function *Fn = |
4016 | CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::preserve_static_offset); |
4017 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: Fn, Args: {Addr.emitRawPointer(CGF)}); |
4018 | return Address(Call, Addr.getElementType(), Addr.getAlignment()); |
4019 | } |
4020 | |
4021 | /// Given an array base, check whether its member access belongs to a record |
4022 | /// with preserve_access_index attribute or not. |
4023 | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
4024 | if (!ArrayBase || !CGF.getDebugInfo()) |
4025 | return false; |
4026 | |
4027 | // Only support base as either a MemberExpr or DeclRefExpr. |
4028 | // DeclRefExpr to cover cases like: |
4029 | // struct s { int a; int b[10]; }; |
4030 | // struct s *p; |
4031 | // p[1].a |
4032 | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
4033 | // p->b[5] is a MemberExpr example. |
4034 | const Expr *E = ArrayBase->IgnoreImpCasts(); |
4035 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) |
4036 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4037 | |
4038 | if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
4039 | const auto *VarDef = dyn_cast<VarDecl>(Val: DRE->getDecl()); |
4040 | if (!VarDef) |
4041 | return false; |
4042 | |
4043 | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
4044 | if (!PtrT) |
4045 | return false; |
4046 | |
4047 | const auto *PointeeT = PtrT->getPointeeType() |
4048 | ->getUnqualifiedDesugaredType(); |
4049 | if (const auto *RecT = dyn_cast<RecordType>(Val: PointeeT)) |
4050 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4051 | return false; |
4052 | } |
4053 | |
4054 | return false; |
4055 | } |
4056 | |
4057 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4058 | ArrayRef<llvm::Value *> indices, |
4059 | QualType eltType, bool inbounds, |
4060 | bool signedIndices, SourceLocation loc, |
4061 | QualType *arrayType = nullptr, |
4062 | const Expr *Base = nullptr, |
4063 | const llvm::Twine &name = "arrayidx" ) { |
4064 | // All the indices except that last must be zero. |
4065 | #ifndef NDEBUG |
4066 | for (auto *idx : indices.drop_back()) |
4067 | assert(isa<llvm::ConstantInt>(idx) && |
4068 | cast<llvm::ConstantInt>(idx)->isZero()); |
4069 | #endif |
4070 | |
4071 | // Determine the element size of the statically-sized base. This is |
4072 | // the thing that the indices are expressed in terms of. |
4073 | if (auto vla = CGF.getContext().getAsVariableArrayType(T: eltType)) { |
4074 | eltType = getFixedSizeElementType(ctx: CGF.getContext(), vla); |
4075 | } |
4076 | |
4077 | // We can use that to compute the best alignment of the element. |
4078 | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: eltType); |
4079 | CharUnits eltAlign = |
4080 | getArrayElementAlign(arrayAlign: addr.getAlignment(), idx: indices.back(), eltSize); |
4081 | |
4082 | if (hasBPFPreserveStaticOffset(E: Base)) |
4083 | addr = wrapWithBPFPreserveStaticOffset(CGF, Addr&: addr); |
4084 | |
4085 | llvm::Value *eltPtr; |
4086 | auto LastIndex = dyn_cast<llvm::ConstantInt>(Val: indices.back()); |
4087 | if (!LastIndex || |
4088 | (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, ArrayBase: Base))) { |
4089 | addr = emitArraySubscriptGEP(CGF, addr, indices, |
4090 | elementType: CGF.ConvertTypeForMem(T: eltType), inbounds, |
4091 | signedIndices, loc, align: eltAlign, name); |
4092 | return addr; |
4093 | } else { |
4094 | // Remember the original array subscript for bpf target |
4095 | unsigned idx = LastIndex->getZExtValue(); |
4096 | llvm::DIType *DbgInfo = nullptr; |
4097 | if (arrayType) |
4098 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(Ty: *arrayType, Loc: loc); |
4099 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( |
4100 | ElTy: addr.getElementType(), Base: addr.emitRawPointer(CGF), Dimension: indices.size() - 1, |
4101 | LastIndex: idx, DbgInfo); |
4102 | } |
4103 | |
4104 | return Address(eltPtr, CGF.ConvertTypeForMem(T: eltType), eltAlign); |
4105 | } |
4106 | |
4107 | /// The offset of a field from the beginning of the record. |
4108 | static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, |
4109 | const FieldDecl *FD, int64_t &Offset) { |
4110 | ASTContext &Ctx = CGF.getContext(); |
4111 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
4112 | unsigned FieldNo = 0; |
4113 | |
4114 | for (const Decl *D : RD->decls()) { |
4115 | if (const auto *Record = dyn_cast<RecordDecl>(Val: D)) |
4116 | if (getFieldOffsetInBits(CGF, RD: Record, FD, Offset)) { |
4117 | Offset += Layout.getFieldOffset(FieldNo); |
4118 | return true; |
4119 | } |
4120 | |
4121 | if (const auto *Field = dyn_cast<FieldDecl>(Val: D)) |
4122 | if (FD == Field) { |
4123 | Offset += Layout.getFieldOffset(FieldNo); |
4124 | return true; |
4125 | } |
4126 | |
4127 | if (isa<FieldDecl>(Val: D)) |
4128 | ++FieldNo; |
4129 | } |
4130 | |
4131 | return false; |
4132 | } |
4133 | |
4134 | /// Returns the relative offset difference between \p FD1 and \p FD2. |
4135 | /// \code |
4136 | /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) |
4137 | /// \endcode |
4138 | /// Both fields must be within the same struct. |
4139 | static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, |
4140 | const FieldDecl *FD1, |
4141 | const FieldDecl *FD2) { |
4142 | const RecordDecl *FD1OuterRec = |
4143 | FD1->getParent()->getOuterLexicalRecordContext(); |
4144 | const RecordDecl *FD2OuterRec = |
4145 | FD2->getParent()->getOuterLexicalRecordContext(); |
4146 | |
4147 | if (FD1OuterRec != FD2OuterRec) |
4148 | // Fields must be within the same RecordDecl. |
4149 | return std::optional<int64_t>(); |
4150 | |
4151 | int64_t FD1Offset = 0; |
4152 | if (!getFieldOffsetInBits(CGF, RD: FD1OuterRec, FD: FD1, Offset&: FD1Offset)) |
4153 | return std::optional<int64_t>(); |
4154 | |
4155 | int64_t FD2Offset = 0; |
4156 | if (!getFieldOffsetInBits(CGF, RD: FD2OuterRec, FD: FD2, Offset&: FD2Offset)) |
4157 | return std::optional<int64_t>(); |
4158 | |
4159 | return std::make_optional<int64_t>(t: FD1Offset - FD2Offset); |
4160 | } |
4161 | |
4162 | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
4163 | bool Accessed) { |
4164 | // The index must always be an integer, which is not an aggregate. Emit it |
4165 | // in lexical order (this complexity is, sadly, required by C++17). |
4166 | llvm::Value *IdxPre = |
4167 | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E: E->getIdx()) : nullptr; |
4168 | bool SignedIndices = false; |
4169 | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
4170 | auto *Idx = IdxPre; |
4171 | if (E->getLHS() != E->getIdx()) { |
4172 | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS" ); |
4173 | Idx = EmitScalarExpr(E: E->getIdx()); |
4174 | } |
4175 | |
4176 | QualType IdxTy = E->getIdx()->getType(); |
4177 | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
4178 | SignedIndices |= IdxSigned; |
4179 | |
4180 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) |
4181 | EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, Accessed); |
4182 | |
4183 | // Extend or truncate the index type to 32 or 64-bits. |
4184 | if (Promote && Idx->getType() != IntPtrTy) |
4185 | Idx = Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IdxSigned, Name: "idxprom" ); |
4186 | |
4187 | return Idx; |
4188 | }; |
4189 | IdxPre = nullptr; |
4190 | |
4191 | // If the base is a vector type, then we are forming a vector element lvalue |
4192 | // with this subscript. |
4193 | if (E->getBase()->getType()->isSubscriptableVectorType() && |
4194 | !isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4195 | // Emit the vector as an lvalue to get its address. |
4196 | LValue LHS = EmitLValue(E: E->getBase()); |
4197 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
4198 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!" ); |
4199 | return LValue::MakeVectorElt(vecAddress: LHS.getAddress(), Idx, type: E->getBase()->getType(), |
4200 | BaseInfo: LHS.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4201 | } |
4202 | |
4203 | // All the other cases basically behave like simple offsetting. |
4204 | |
4205 | // Handle the extvector case we ignored above. |
4206 | if (isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4207 | LValue LV = EmitLValue(E: E->getBase()); |
4208 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4209 | Address Addr = EmitExtVectorElementLValue(LV); |
4210 | |
4211 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
4212 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: EltType, /*inbounds*/ true, |
4213 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4214 | return MakeAddrLValue(Addr, T: EltType, BaseInfo: LV.getBaseInfo(), |
4215 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: EltType)); |
4216 | } |
4217 | |
4218 | LValueBaseInfo EltBaseInfo; |
4219 | TBAAAccessInfo EltTBAAInfo; |
4220 | Address Addr = Address::invalid(); |
4221 | if (const VariableArrayType *vla = |
4222 | getContext().getAsVariableArrayType(T: E->getType())) { |
4223 | // The base must be a pointer, which is not an aggregate. Emit |
4224 | // it. It needs to be emitted first in case it's what captures |
4225 | // the VLA bounds. |
4226 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4227 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4228 | |
4229 | // The element count here is the total number of non-VLA elements. |
4230 | llvm::Value *numElements = getVLASize(vla).NumElts; |
4231 | |
4232 | // Effectively, the multiply by the VLA size is part of the GEP. |
4233 | // GEP indexes are signed, and scaling an index isn't permitted to |
4234 | // signed-overflow, so we use the same semantics for our explicit |
4235 | // multiply. We suppress this if overflow is not undefined behavior. |
4236 | if (getLangOpts().isSignedOverflowDefined()) { |
4237 | Idx = Builder.CreateMul(LHS: Idx, RHS: numElements); |
4238 | } else { |
4239 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: numElements); |
4240 | } |
4241 | |
4242 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: vla->getElementType(), |
4243 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4244 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4245 | |
4246 | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
4247 | // Indexing over an interface, as in "NSString *P; P[4];" |
4248 | |
4249 | // Emit the base pointer. |
4250 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4251 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4252 | |
4253 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(T: OIT); |
4254 | llvm::Value *InterfaceSizeVal = |
4255 | llvm::ConstantInt::get(Ty: Idx->getType(), V: InterfaceSize.getQuantity()); |
4256 | |
4257 | llvm::Value *ScaledIdx = Builder.CreateMul(LHS: Idx, RHS: InterfaceSizeVal); |
4258 | |
4259 | // We don't necessarily build correct LLVM struct types for ObjC |
4260 | // interfaces, so we can't rely on GEP to do this scaling |
4261 | // correctly, so we need to cast to i8*. FIXME: is this actually |
4262 | // true? A lot of other things in the fragile ABI would break... |
4263 | llvm::Type *OrigBaseElemTy = Addr.getElementType(); |
4264 | |
4265 | // Do the GEP. |
4266 | CharUnits EltAlign = |
4267 | getArrayElementAlign(arrayAlign: Addr.getAlignment(), idx: Idx, eltSize: InterfaceSize); |
4268 | llvm::Value *EltPtr = |
4269 | emitArraySubscriptGEP(CGF&: *this, elemType: Int8Ty, ptr: Addr.emitRawPointer(CGF&: *this), |
4270 | indices: ScaledIdx, inbounds: false, signedIndices: SignedIndices, loc: E->getExprLoc()); |
4271 | Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); |
4272 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4273 | // If this is A[i] where A is an array, the frontend will have decayed the |
4274 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4275 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4276 | // "gep x, i" here. Emit one "gep A, 0, i". |
4277 | assert(Array->getType()->isArrayType() && |
4278 | "Array to pointer decay must have array source type!" ); |
4279 | LValue ArrayLV; |
4280 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4281 | // better bounds-checking of the base expression. |
4282 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4283 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4284 | else |
4285 | ArrayLV = EmitLValue(E: Array); |
4286 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4287 | |
4288 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) { |
4289 | // If the array being accessed has a "counted_by" attribute, generate |
4290 | // bounds checking code. The "count" field is at the top level of the |
4291 | // struct or in an anonymous struct, that's also at the top level. Future |
4292 | // expansions may allow the "count" to reside at any place in the struct, |
4293 | // but the value of "counted_by" will be a "simple" path to the count, |
4294 | // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or |
4295 | // similar to emit the correct GEP. |
4296 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
4297 | getLangOpts().getStrictFlexArraysLevel(); |
4298 | |
4299 | if (const auto *ME = dyn_cast<MemberExpr>(Val: Array); |
4300 | ME && |
4301 | ME->isFlexibleArrayMemberLike(Context&: getContext(), StrictFlexArraysLevel) && |
4302 | ME->getMemberDecl()->getType()->isCountAttributedType()) { |
4303 | const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()); |
4304 | if (const FieldDecl *CountFD = FindCountedByField(FD: FAMDecl)) { |
4305 | if (std::optional<int64_t> Diff = |
4306 | getOffsetDifferenceInBits(CGF&: *this, FD1: CountFD, FD2: FAMDecl)) { |
4307 | CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(BitSize: *Diff); |
4308 | |
4309 | // Create a GEP with a byte offset between the FAM and count and |
4310 | // use that to load the count value. |
4311 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast( |
4312 | Addr: ArrayLV.getAddress(), Ty: Int8PtrTy, ElementTy: Int8Ty); |
4313 | |
4314 | llvm::Type *CountTy = ConvertType(T: CountFD->getType()); |
4315 | llvm::Value *Res = Builder.CreateInBoundsGEP( |
4316 | Ty: Int8Ty, Ptr: Addr.emitRawPointer(CGF&: *this), |
4317 | IdxList: Builder.getInt32(C: OffsetDiff.getQuantity()), Name: ".counted_by.gep" ); |
4318 | Res = Builder.CreateAlignedLoad(Ty: CountTy, Addr: Res, Align: getIntAlign(), |
4319 | Name: ".counted_by.load" ); |
4320 | |
4321 | // Now emit the bounds checking. |
4322 | EmitBoundsCheckImpl(E, Bound: Res, Index: Idx, IndexType: E->getIdx()->getType(), |
4323 | IndexedType: Array->getType(), Accessed); |
4324 | } |
4325 | } |
4326 | } |
4327 | } |
4328 | |
4329 | // Propagate the alignment from the array itself to the result. |
4330 | QualType arrayType = Array->getType(); |
4331 | Addr = emitArraySubscriptGEP( |
4332 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4333 | eltType: E->getType(), inbounds: !getLangOpts().isSignedOverflowDefined(), signedIndices: SignedIndices, |
4334 | loc: E->getExprLoc(), arrayType: &arrayType, Base: E->getBase()); |
4335 | EltBaseInfo = ArrayLV.getBaseInfo(); |
4336 | EltTBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: E->getType()); |
4337 | } else { |
4338 | // The base must be a pointer; emit it with an estimate of its alignment. |
4339 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4340 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4341 | QualType ptrType = E->getBase()->getType(); |
4342 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: E->getType(), |
4343 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4344 | signedIndices: SignedIndices, loc: E->getExprLoc(), arrayType: &ptrType, |
4345 | Base: E->getBase()); |
4346 | } |
4347 | |
4348 | LValue LV = MakeAddrLValue(Addr, T: E->getType(), BaseInfo: EltBaseInfo, TBAAInfo: EltTBAAInfo); |
4349 | |
4350 | if (getLangOpts().ObjC && |
4351 | getLangOpts().getGC() != LangOptions::NonGC) { |
4352 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
4353 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4354 | } |
4355 | return LV; |
4356 | } |
4357 | |
4358 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
4359 | assert( |
4360 | !E->isIncomplete() && |
4361 | "incomplete matrix subscript expressions should be rejected during Sema" ); |
4362 | LValue Base = EmitLValue(E: E->getBase()); |
4363 | llvm::Value *RowIdx = EmitScalarExpr(E: E->getRowIdx()); |
4364 | llvm::Value *ColIdx = EmitScalarExpr(E: E->getColumnIdx()); |
4365 | llvm::Value *NumRows = Builder.getIntN( |
4366 | N: RowIdx->getType()->getScalarSizeInBits(), |
4367 | C: E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
4368 | llvm::Value *FinalIdx = |
4369 | Builder.CreateAdd(LHS: Builder.CreateMul(LHS: ColIdx, RHS: NumRows), RHS: RowIdx); |
4370 | return LValue::MakeMatrixElt( |
4371 | matAddress: MaybeConvertMatrixAddress(Addr: Base.getAddress(), CGF&: *this), Idx: FinalIdx, |
4372 | type: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4373 | } |
4374 | |
4375 | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
4376 | LValueBaseInfo &BaseInfo, |
4377 | TBAAAccessInfo &TBAAInfo, |
4378 | QualType BaseTy, QualType ElTy, |
4379 | bool IsLowerBound) { |
4380 | LValue BaseLVal; |
4381 | if (auto *ASE = dyn_cast<ArraySectionExpr>(Val: Base->IgnoreParenImpCasts())) { |
4382 | BaseLVal = CGF.EmitArraySectionExpr(E: ASE, IsLowerBound); |
4383 | if (BaseTy->isArrayType()) { |
4384 | Address Addr = BaseLVal.getAddress(); |
4385 | BaseInfo = BaseLVal.getBaseInfo(); |
4386 | |
4387 | // If the array type was an incomplete type, we need to make sure |
4388 | // the decay ends up being the right type. |
4389 | llvm::Type *NewTy = CGF.ConvertType(T: BaseTy); |
4390 | Addr = Addr.withElementType(ElemTy: NewTy); |
4391 | |
4392 | // Note that VLA pointers are always decayed, so we don't need to do |
4393 | // anything here. |
4394 | if (!BaseTy->isVariableArrayType()) { |
4395 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4396 | "Expected pointer to array" ); |
4397 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
4398 | } |
4399 | |
4400 | return Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: ElTy)); |
4401 | } |
4402 | LValueBaseInfo TypeBaseInfo; |
4403 | TBAAAccessInfo TypeTBAAInfo; |
4404 | CharUnits Align = |
4405 | CGF.CGM.getNaturalTypeAlignment(T: ElTy, BaseInfo: &TypeBaseInfo, TBAAInfo: &TypeTBAAInfo); |
4406 | BaseInfo.mergeForCast(Info: TypeBaseInfo); |
4407 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(SourceInfo: TBAAInfo, TargetInfo: TypeTBAAInfo); |
4408 | return Address(CGF.Builder.CreateLoad(Addr: BaseLVal.getAddress()), |
4409 | CGF.ConvertTypeForMem(T: ElTy), Align); |
4410 | } |
4411 | return CGF.EmitPointerWithAlignment(E: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4412 | } |
4413 | |
4414 | LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E, |
4415 | bool IsLowerBound) { |
4416 | |
4417 | assert(!E->isOpenACCArraySection() && |
4418 | "OpenACC Array section codegen not implemented" ); |
4419 | |
4420 | QualType BaseTy = ArraySectionExpr::getBaseOriginalType(Base: E->getBase()); |
4421 | QualType ResultExprTy; |
4422 | if (auto *AT = getContext().getAsArrayType(T: BaseTy)) |
4423 | ResultExprTy = AT->getElementType(); |
4424 | else |
4425 | ResultExprTy = BaseTy->getPointeeType(); |
4426 | llvm::Value *Idx = nullptr; |
4427 | if (IsLowerBound || E->getColonLocFirst().isInvalid()) { |
4428 | // Requesting lower bound or upper bound, but without provided length and |
4429 | // without ':' symbol for the default length -> length = 1. |
4430 | // Idx = LowerBound ?: 0; |
4431 | if (auto *LowerBound = E->getLowerBound()) { |
4432 | Idx = Builder.CreateIntCast( |
4433 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4434 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()); |
4435 | } else |
4436 | Idx = llvm::ConstantInt::getNullValue(Ty: IntPtrTy); |
4437 | } else { |
4438 | // Try to emit length or lower bound as constant. If this is possible, 1 |
4439 | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
4440 | // IR (LB + Len) - 1. |
4441 | auto &C = CGM.getContext(); |
4442 | auto *Length = E->getLength(); |
4443 | llvm::APSInt ConstLength; |
4444 | if (Length) { |
4445 | // Idx = LowerBound + Length - 1; |
4446 | if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(Ctx: C)) { |
4447 | ConstLength = CL->zextOrTrunc(width: PointerWidthInBits); |
4448 | Length = nullptr; |
4449 | } |
4450 | auto *LowerBound = E->getLowerBound(); |
4451 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
4452 | if (LowerBound) { |
4453 | if (std::optional<llvm::APSInt> LB = |
4454 | LowerBound->getIntegerConstantExpr(Ctx: C)) { |
4455 | ConstLowerBound = LB->zextOrTrunc(width: PointerWidthInBits); |
4456 | LowerBound = nullptr; |
4457 | } |
4458 | } |
4459 | if (!Length) |
4460 | --ConstLength; |
4461 | else if (!LowerBound) |
4462 | --ConstLowerBound; |
4463 | |
4464 | if (Length || LowerBound) { |
4465 | auto *LowerBoundVal = |
4466 | LowerBound |
4467 | ? Builder.CreateIntCast( |
4468 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4469 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()) |
4470 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLowerBound); |
4471 | auto *LengthVal = |
4472 | Length |
4473 | ? Builder.CreateIntCast( |
4474 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4475 | isSigned: Length->getType()->hasSignedIntegerRepresentation()) |
4476 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4477 | Idx = Builder.CreateAdd(LHS: LowerBoundVal, RHS: LengthVal, Name: "lb_add_len" , |
4478 | /*HasNUW=*/false, |
4479 | HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4480 | if (Length && LowerBound) { |
4481 | Idx = Builder.CreateSub( |
4482 | LHS: Idx, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "idx_sub_1" , |
4483 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4484 | } |
4485 | } else |
4486 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength + ConstLowerBound); |
4487 | } else { |
4488 | // Idx = ArraySize - 1; |
4489 | QualType ArrayTy = BaseTy->isPointerType() |
4490 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
4491 | : BaseTy; |
4492 | if (auto *VAT = C.getAsVariableArrayType(T: ArrayTy)) { |
4493 | Length = VAT->getSizeExpr(); |
4494 | if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(Ctx: C)) { |
4495 | ConstLength = *L; |
4496 | Length = nullptr; |
4497 | } |
4498 | } else { |
4499 | auto *CAT = C.getAsConstantArrayType(T: ArrayTy); |
4500 | assert(CAT && "unexpected type for array initializer" ); |
4501 | ConstLength = CAT->getSize(); |
4502 | } |
4503 | if (Length) { |
4504 | auto *LengthVal = Builder.CreateIntCast( |
4505 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4506 | isSigned: Length->getType()->hasSignedIntegerRepresentation()); |
4507 | Idx = Builder.CreateSub( |
4508 | LHS: LengthVal, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "len_sub_1" , |
4509 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4510 | } else { |
4511 | ConstLength = ConstLength.zextOrTrunc(width: PointerWidthInBits); |
4512 | --ConstLength; |
4513 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4514 | } |
4515 | } |
4516 | } |
4517 | assert(Idx); |
4518 | |
4519 | Address EltPtr = Address::invalid(); |
4520 | LValueBaseInfo BaseInfo; |
4521 | TBAAAccessInfo TBAAInfo; |
4522 | if (auto *VLA = getContext().getAsVariableArrayType(T: ResultExprTy)) { |
4523 | // The base must be a pointer, which is not an aggregate. Emit |
4524 | // it. It needs to be emitted first in case it's what captures |
4525 | // the VLA bounds. |
4526 | Address Base = |
4527 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, |
4528 | BaseTy, ElTy: VLA->getElementType(), IsLowerBound); |
4529 | // The element count here is the total number of non-VLA elements. |
4530 | llvm::Value *NumElements = getVLASize(vla: VLA).NumElts; |
4531 | |
4532 | // Effectively, the multiply by the VLA size is part of the GEP. |
4533 | // GEP indexes are signed, and scaling an index isn't permitted to |
4534 | // signed-overflow, so we use the same semantics for our explicit |
4535 | // multiply. We suppress this if overflow is not undefined behavior. |
4536 | if (getLangOpts().isSignedOverflowDefined()) |
4537 | Idx = Builder.CreateMul(LHS: Idx, RHS: NumElements); |
4538 | else |
4539 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: NumElements); |
4540 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: VLA->getElementType(), |
4541 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4542 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4543 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4544 | // If this is A[i] where A is an array, the frontend will have decayed the |
4545 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4546 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4547 | // "gep x, i" here. Emit one "gep A, 0, i". |
4548 | assert(Array->getType()->isArrayType() && |
4549 | "Array to pointer decay must have array source type!" ); |
4550 | LValue ArrayLV; |
4551 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4552 | // better bounds-checking of the base expression. |
4553 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4554 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4555 | else |
4556 | ArrayLV = EmitLValue(E: Array); |
4557 | |
4558 | // Propagate the alignment from the array itself to the result. |
4559 | EltPtr = emitArraySubscriptGEP( |
4560 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4561 | eltType: ResultExprTy, inbounds: !getLangOpts().isSignedOverflowDefined(), |
4562 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4563 | BaseInfo = ArrayLV.getBaseInfo(); |
4564 | TBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: ResultExprTy); |
4565 | } else { |
4566 | Address Base = |
4567 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, BaseTy, |
4568 | ElTy: ResultExprTy, IsLowerBound); |
4569 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: ResultExprTy, |
4570 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4571 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4572 | } |
4573 | |
4574 | return MakeAddrLValue(Addr: EltPtr, T: ResultExprTy, BaseInfo, TBAAInfo); |
4575 | } |
4576 | |
4577 | LValue CodeGenFunction:: |
4578 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4579 | // Emit the base vector as an l-value. |
4580 | LValue Base; |
4581 | |
4582 | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4583 | if (E->isArrow()) { |
4584 | // If it is a pointer to a vector, emit the address and form an lvalue with |
4585 | // it. |
4586 | LValueBaseInfo BaseInfo; |
4587 | TBAAAccessInfo TBAAInfo; |
4588 | Address Ptr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4589 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4590 | Base = MakeAddrLValue(Addr: Ptr, T: PT->getPointeeType(), BaseInfo, TBAAInfo); |
4591 | Base.getQuals().removeObjCGCAttr(); |
4592 | } else if (E->getBase()->isGLValue()) { |
4593 | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4594 | // emit the base as an lvalue. |
4595 | assert(E->getBase()->getType()->isVectorType()); |
4596 | Base = EmitLValue(E: E->getBase()); |
4597 | } else { |
4598 | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4599 | assert(E->getBase()->getType()->isVectorType() && |
4600 | "Result must be a vector" ); |
4601 | llvm::Value *Vec = EmitScalarExpr(E: E->getBase()); |
4602 | |
4603 | // Store the vector to memory (because LValue wants an address). |
4604 | Address VecMem = CreateMemTemp(Ty: E->getBase()->getType()); |
4605 | Builder.CreateStore(Val: Vec, Addr: VecMem); |
4606 | Base = MakeAddrLValue(Addr: VecMem, T: E->getBase()->getType(), |
4607 | Source: AlignmentSource::Decl); |
4608 | } |
4609 | |
4610 | QualType type = |
4611 | E->getType().withCVRQualifiers(CVR: Base.getQuals().getCVRQualifiers()); |
4612 | |
4613 | // Encode the element access list into a vector of unsigned indices. |
4614 | SmallVector<uint32_t, 4> Indices; |
4615 | E->getEncodedElementAccess(Elts&: Indices); |
4616 | |
4617 | if (Base.isSimple()) { |
4618 | llvm::Constant *CV = |
4619 | llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices); |
4620 | return LValue::MakeExtVectorElt(Addr: Base.getAddress(), Elts: CV, type, |
4621 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4622 | } |
4623 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!" ); |
4624 | |
4625 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4626 | SmallVector<llvm::Constant *, 4> CElts; |
4627 | |
4628 | for (unsigned i = 0, e = Indices.size(); i != e; ++i) |
4629 | CElts.push_back(Elt: BaseElts->getAggregateElement(Elt: Indices[i])); |
4630 | llvm::Constant *CV = llvm::ConstantVector::get(V: CElts); |
4631 | return LValue::MakeExtVectorElt(Addr: Base.getExtVectorAddress(), Elts: CV, type, |
4632 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4633 | } |
4634 | |
4635 | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4636 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME: E)) { |
4637 | EmitIgnoredExpr(E: E->getBase()); |
4638 | return EmitDeclRefLValue(E: DRE); |
4639 | } |
4640 | |
4641 | Expr *BaseExpr = E->getBase(); |
4642 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4643 | LValue BaseLV; |
4644 | if (E->isArrow()) { |
4645 | LValueBaseInfo BaseInfo; |
4646 | TBAAAccessInfo TBAAInfo; |
4647 | Address Addr = EmitPointerWithAlignment(E: BaseExpr, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4648 | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4649 | SanitizerSet SkippedChecks; |
4650 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: BaseExpr); |
4651 | if (IsBaseCXXThis) |
4652 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
4653 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: BaseExpr)) |
4654 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
4655 | EmitTypeCheck(TCK: TCK_MemberAccess, Loc: E->getExprLoc(), Addr, Type: PtrTy, |
4656 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4657 | BaseLV = MakeAddrLValue(Addr, T: PtrTy, BaseInfo, TBAAInfo); |
4658 | } else |
4659 | BaseLV = EmitCheckedLValue(E: BaseExpr, TCK: TCK_MemberAccess); |
4660 | |
4661 | NamedDecl *ND = E->getMemberDecl(); |
4662 | if (auto *Field = dyn_cast<FieldDecl>(Val: ND)) { |
4663 | LValue LV = EmitLValueForField(Base: BaseLV, Field); |
4664 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4665 | if (getLangOpts().OpenMP) { |
4666 | // If the member was explicitly marked as nontemporal, mark it as |
4667 | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4668 | // to children as nontemporal too. |
4669 | if ((IsWrappedCXXThis(Obj: BaseExpr) && |
4670 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: Field)) || |
4671 | BaseLV.isNontemporal()) |
4672 | LV.setNontemporal(/*Value=*/true); |
4673 | } |
4674 | return LV; |
4675 | } |
4676 | |
4677 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
4678 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
4679 | |
4680 | llvm_unreachable("Unhandled member declaration!" ); |
4681 | } |
4682 | |
4683 | /// Given that we are currently emitting a lambda, emit an l-value for |
4684 | /// one of its members. |
4685 | /// |
4686 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, |
4687 | llvm::Value *ThisValue) { |
4688 | bool HasExplicitObjectParameter = false; |
4689 | const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Val: CurCodeDecl); |
4690 | if (MD) { |
4691 | HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); |
4692 | assert(MD->getParent()->isLambda()); |
4693 | assert(MD->getParent() == Field->getParent()); |
4694 | } |
4695 | LValue LambdaLV; |
4696 | if (HasExplicitObjectParameter) { |
4697 | const VarDecl *D = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParamDecl(i: 0); |
4698 | auto It = LocalDeclMap.find(Val: D); |
4699 | assert(It != LocalDeclMap.end() && "explicit parameter not loaded?" ); |
4700 | Address AddrOfExplicitObject = It->getSecond(); |
4701 | if (D->getType()->isReferenceType()) |
4702 | LambdaLV = EmitLoadOfReferenceLValue(RefAddr: AddrOfExplicitObject, RefTy: D->getType(), |
4703 | Source: AlignmentSource::Decl); |
4704 | else |
4705 | LambdaLV = MakeAddrLValue(Addr: AddrOfExplicitObject, |
4706 | T: D->getType().getNonReferenceType()); |
4707 | |
4708 | // Make sure we have an lvalue to the lambda itself and not a derived class. |
4709 | auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl(); |
4710 | auto *LambdaTy = cast<CXXRecordDecl>(Val: Field->getParent()); |
4711 | if (ThisTy != LambdaTy) { |
4712 | const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(Val: MD); |
4713 | Address Base = GetAddressOfBaseClass( |
4714 | Value: LambdaLV.getAddress(), Derived: ThisTy, PathBegin: BasePathArray.begin(), |
4715 | PathEnd: BasePathArray.end(), /*NullCheckValue=*/false, Loc: SourceLocation()); |
4716 | LambdaLV = MakeAddrLValue(Addr: Base, T: QualType{LambdaTy->getTypeForDecl(), 0}); |
4717 | } |
4718 | } else { |
4719 | QualType LambdaTagType = getContext().getTagDeclType(Decl: Field->getParent()); |
4720 | LambdaLV = MakeNaturalAlignAddrLValue(V: ThisValue, T: LambdaTagType); |
4721 | } |
4722 | return EmitLValueForField(Base: LambdaLV, Field); |
4723 | } |
4724 | |
4725 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
4726 | return EmitLValueForLambdaField(Field, ThisValue: CXXABIThisValue); |
4727 | } |
4728 | |
4729 | /// Get the field index in the debug info. The debug info structure/union |
4730 | /// will ignore the unnamed bitfields. |
4731 | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
4732 | unsigned FieldIndex) { |
4733 | unsigned I = 0, Skipped = 0; |
4734 | |
4735 | for (auto *F : Rec->getDefinition()->fields()) { |
4736 | if (I == FieldIndex) |
4737 | break; |
4738 | if (F->isUnnamedBitField()) |
4739 | Skipped++; |
4740 | I++; |
4741 | } |
4742 | |
4743 | return FieldIndex - Skipped; |
4744 | } |
4745 | |
4746 | /// Get the address of a zero-sized field within a record. The resulting |
4747 | /// address doesn't necessarily have the right type. |
4748 | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
4749 | const FieldDecl *Field) { |
4750 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
4751 | BitSize: CGF.getContext().getFieldOffset(FD: Field)); |
4752 | if (Offset.isZero()) |
4753 | return Base; |
4754 | Base = Base.withElementType(ElemTy: CGF.Int8Ty); |
4755 | return CGF.Builder.CreateConstInBoundsByteGEP(Addr: Base, Offset); |
4756 | } |
4757 | |
4758 | /// Drill down to the storage of a field without walking into |
4759 | /// reference types. |
4760 | /// |
4761 | /// The resulting address doesn't necessarily have the right type. |
4762 | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
4763 | const FieldDecl *field) { |
4764 | if (isEmptyFieldForLayout(Context: CGF.getContext(), FD: field)) |
4765 | return emitAddrOfZeroSizeField(CGF, Base: base, Field: field); |
4766 | |
4767 | const RecordDecl *rec = field->getParent(); |
4768 | |
4769 | unsigned idx = |
4770 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4771 | |
4772 | return CGF.Builder.CreateStructGEP(Addr: base, Index: idx, Name: field->getName()); |
4773 | } |
4774 | |
4775 | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
4776 | Address addr, const FieldDecl *field) { |
4777 | const RecordDecl *rec = field->getParent(); |
4778 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
4779 | Ty: base.getType(), Loc: rec->getLocation()); |
4780 | |
4781 | unsigned idx = |
4782 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4783 | |
4784 | return CGF.Builder.CreatePreserveStructAccessIndex( |
4785 | Addr: addr, Index: idx, FieldIndex: CGF.getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo); |
4786 | } |
4787 | |
4788 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
4789 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
4790 | if (!RD) |
4791 | return false; |
4792 | |
4793 | if (RD->isDynamicClass()) |
4794 | return true; |
4795 | |
4796 | for (const auto &Base : RD->bases()) |
4797 | if (hasAnyVptr(Type: Base.getType(), Context)) |
4798 | return true; |
4799 | |
4800 | for (const FieldDecl *Field : RD->fields()) |
4801 | if (hasAnyVptr(Type: Field->getType(), Context)) |
4802 | return true; |
4803 | |
4804 | return false; |
4805 | } |
4806 | |
4807 | LValue CodeGenFunction::EmitLValueForField(LValue base, |
4808 | const FieldDecl *field) { |
4809 | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
4810 | |
4811 | if (field->isBitField()) { |
4812 | const CGRecordLayout &RL = |
4813 | CGM.getTypes().getCGRecordLayout(field->getParent()); |
4814 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: field); |
4815 | const bool UseVolatile = isAAPCS(TargetInfo: CGM.getTarget()) && |
4816 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
4817 | Info.VolatileStorageSize != 0 && |
4818 | field->getType() |
4819 | .withCVRQualifiers(CVR: base.getVRQualifiers()) |
4820 | .isVolatileQualified(); |
4821 | Address Addr = base.getAddress(); |
4822 | unsigned Idx = RL.getLLVMFieldNo(FD: field); |
4823 | const RecordDecl *rec = field->getParent(); |
4824 | if (hasBPFPreserveStaticOffset(D: rec)) |
4825 | Addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr); |
4826 | if (!UseVolatile) { |
4827 | if (!IsInPreservedAIRegion && |
4828 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4829 | if (Idx != 0) |
4830 | // For structs, we GEP to the field that the record layout suggests. |
4831 | Addr = Builder.CreateStructGEP(Addr, Index: Idx, Name: field->getName()); |
4832 | } else { |
4833 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
4834 | Ty: getContext().getRecordType(Decl: rec), L: rec->getLocation()); |
4835 | Addr = Builder.CreatePreserveStructAccessIndex( |
4836 | Addr, Index: Idx, FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), |
4837 | DbgInfo); |
4838 | } |
4839 | } |
4840 | const unsigned SS = |
4841 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
4842 | // Get the access type. |
4843 | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), N: SS); |
4844 | Addr = Addr.withElementType(ElemTy: FieldIntTy); |
4845 | if (UseVolatile) { |
4846 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
4847 | if (VolatileOffset) |
4848 | Addr = Builder.CreateConstInBoundsGEP(Addr, Index: VolatileOffset); |
4849 | } |
4850 | |
4851 | QualType fieldType = |
4852 | field->getType().withCVRQualifiers(CVR: base.getVRQualifiers()); |
4853 | // TODO: Support TBAA for bit fields. |
4854 | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
4855 | return LValue::MakeBitfield(Addr, Info, type: fieldType, BaseInfo: FieldBaseInfo, |
4856 | TBAAInfo: TBAAAccessInfo()); |
4857 | } |
4858 | |
4859 | // Fields of may-alias structures are may-alias themselves. |
4860 | // FIXME: this should get propagated down through anonymous structs |
4861 | // and unions. |
4862 | QualType FieldType = field->getType(); |
4863 | const RecordDecl *rec = field->getParent(); |
4864 | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
4865 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: BaseAlignSource)); |
4866 | TBAAAccessInfo FieldTBAAInfo; |
4867 | if (base.getTBAAInfo().isMayAlias() || |
4868 | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { |
4869 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4870 | } else if (rec->isUnion()) { |
4871 | // TODO: Support TBAA for unions. |
4872 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4873 | } else { |
4874 | // If no base type been assigned for the base access, then try to generate |
4875 | // one for this base lvalue. |
4876 | FieldTBAAInfo = base.getTBAAInfo(); |
4877 | if (!FieldTBAAInfo.BaseType) { |
4878 | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(QTy: base.getType()); |
4879 | assert(!FieldTBAAInfo.Offset && |
4880 | "Nonzero offset for an access with no base type!" ); |
4881 | } |
4882 | |
4883 | // Adjust offset to be relative to the base type. |
4884 | const ASTRecordLayout &Layout = |
4885 | getContext().getASTRecordLayout(D: field->getParent()); |
4886 | unsigned CharWidth = getContext().getCharWidth(); |
4887 | if (FieldTBAAInfo.BaseType) |
4888 | FieldTBAAInfo.Offset += |
4889 | Layout.getFieldOffset(FieldNo: field->getFieldIndex()) / CharWidth; |
4890 | |
4891 | // Update the final access type and size. |
4892 | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(QTy: FieldType); |
4893 | FieldTBAAInfo.Size = |
4894 | getContext().getTypeSizeInChars(T: FieldType).getQuantity(); |
4895 | } |
4896 | |
4897 | Address addr = base.getAddress(); |
4898 | if (hasBPFPreserveStaticOffset(D: rec)) |
4899 | addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr&: addr); |
4900 | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(Val: rec)) { |
4901 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4902 | ClassDef->isDynamicClass()) { |
4903 | // Getting to any field of dynamic object requires stripping dynamic |
4904 | // information provided by invariant.group. This is because accessing |
4905 | // fields may leak the real address of dynamic object, which could result |
4906 | // in miscompilation when leaked pointer would be compared. |
4907 | auto *stripped = |
4908 | Builder.CreateStripInvariantGroup(Ptr: addr.emitRawPointer(CGF&: *this)); |
4909 | addr = Address(stripped, addr.getElementType(), addr.getAlignment()); |
4910 | } |
4911 | } |
4912 | |
4913 | unsigned RecordCVR = base.getVRQualifiers(); |
4914 | if (rec->isUnion()) { |
4915 | // For unions, there is no pointer adjustment. |
4916 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4917 | hasAnyVptr(Type: FieldType, Context: getContext())) |
4918 | // Because unions can easily skip invariant.barriers, we need to add |
4919 | // a barrier every time CXXRecord field with vptr is referenced. |
4920 | addr = Builder.CreateLaunderInvariantGroup(Addr: addr); |
4921 | |
4922 | if (IsInPreservedAIRegion || |
4923 | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4924 | // Remember the original union field index |
4925 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(Ty: base.getType(), |
4926 | Loc: rec->getLocation()); |
4927 | addr = |
4928 | Address(Builder.CreatePreserveUnionAccessIndex( |
4929 | Base: addr.emitRawPointer(CGF&: *this), |
4930 | FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo), |
4931 | addr.getElementType(), addr.getAlignment()); |
4932 | } |
4933 | |
4934 | if (FieldType->isReferenceType()) |
4935 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4936 | } else { |
4937 | if (!IsInPreservedAIRegion && |
4938 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) |
4939 | // For structs, we GEP to the field that the record layout suggests. |
4940 | addr = emitAddrOfFieldStorage(CGF&: *this, base: addr, field); |
4941 | else |
4942 | // Remember the original struct field index |
4943 | addr = emitPreserveStructAccess(CGF&: *this, base, addr, field); |
4944 | } |
4945 | |
4946 | // If this is a reference field, load the reference right now. |
4947 | if (FieldType->isReferenceType()) { |
4948 | LValue RefLVal = |
4949 | MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4950 | if (RecordCVR & Qualifiers::Volatile) |
4951 | RefLVal.getQuals().addVolatile(); |
4952 | addr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &FieldBaseInfo, PointeeTBAAInfo: &FieldTBAAInfo); |
4953 | |
4954 | // Qualifiers on the struct don't apply to the referencee. |
4955 | RecordCVR = 0; |
4956 | FieldType = FieldType->getPointeeType(); |
4957 | } |
4958 | |
4959 | // Make sure that the address is pointing to the right type. This is critical |
4960 | // for both unions and structs. |
4961 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4962 | |
4963 | if (field->hasAttr<AnnotateAttr>()) |
4964 | addr = EmitFieldAnnotations(D: field, V: addr); |
4965 | |
4966 | LValue LV = MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4967 | LV.getQuals().addCVRQualifiers(mask: RecordCVR); |
4968 | |
4969 | // __weak attribute on a field is ignored. |
4970 | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
4971 | LV.getQuals().removeObjCGCAttr(); |
4972 | |
4973 | return LV; |
4974 | } |
4975 | |
4976 | LValue |
4977 | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
4978 | const FieldDecl *Field) { |
4979 | QualType FieldType = Field->getType(); |
4980 | |
4981 | if (!FieldType->isReferenceType()) |
4982 | return EmitLValueForField(base: Base, field: Field); |
4983 | |
4984 | Address V = emitAddrOfFieldStorage(CGF&: *this, base: Base.getAddress(), field: Field); |
4985 | |
4986 | // Make sure that the address is pointing to the right type. |
4987 | llvm::Type *llvmType = ConvertTypeForMem(T: FieldType); |
4988 | V = V.withElementType(ElemTy: llvmType); |
4989 | |
4990 | // TODO: Generate TBAA information that describes this access as a structure |
4991 | // member access and not just an access to an object of the field's type. This |
4992 | // should be similar to what we do in EmitLValueForField(). |
4993 | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
4994 | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
4995 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: FieldAlignSource)); |
4996 | return MakeAddrLValue(Addr: V, T: FieldType, BaseInfo: FieldBaseInfo, |
4997 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base, AccessType: FieldType)); |
4998 | } |
4999 | |
5000 | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
5001 | if (E->isFileScope()) { |
5002 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
5003 | return MakeAddrLValue(Addr: GlobalPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5004 | } |
5005 | if (E->getType()->isVariablyModifiedType()) |
5006 | // make sure to emit the VLA size. |
5007 | EmitVariablyModifiedType(Ty: E->getType()); |
5008 | |
5009 | Address DeclPtr = CreateMemTemp(Ty: E->getType(), Name: ".compoundliteral" ); |
5010 | const Expr *InitExpr = E->getInitializer(); |
5011 | LValue Result = MakeAddrLValue(Addr: DeclPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5012 | |
5013 | EmitAnyExprToMem(E: InitExpr, Location: DeclPtr, Quals: E->getType().getQualifiers(), |
5014 | /*Init*/ IsInit: true); |
5015 | |
5016 | // Block-scope compound literals are destroyed at the end of the enclosing |
5017 | // scope in C. |
5018 | if (!getLangOpts().CPlusPlus) |
5019 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
5020 | pushLifetimeExtendedDestroy(kind: getCleanupKind(kind: DtorKind), addr: DeclPtr, |
5021 | type: E->getType(), destroyer: getDestroyer(destructionKind: DtorKind), |
5022 | useEHCleanupForArray: DtorKind & EHCleanup); |
5023 | |
5024 | return Result; |
5025 | } |
5026 | |
5027 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
5028 | if (!E->isGLValue()) |
5029 | // Initializing an aggregate temporary in C++11: T{...}. |
5030 | return EmitAggExprToLValue(E); |
5031 | |
5032 | // An lvalue initializer list must be initializing a reference. |
5033 | assert(E->isTransparent() && "non-transparent glvalue init list" ); |
5034 | return EmitLValue(E: E->getInit(Init: 0)); |
5035 | } |
5036 | |
5037 | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
5038 | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
5039 | /// LValue is returned and the current block has been terminated. |
5040 | static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
5041 | const Expr *Operand) { |
5042 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Operand->IgnoreParens())) { |
5043 | CGF.EmitCXXThrowExpr(E: ThrowExpr, /*KeepInsertionPoint*/false); |
5044 | return std::nullopt; |
5045 | } |
5046 | |
5047 | return CGF.EmitLValue(E: Operand); |
5048 | } |
5049 | |
5050 | namespace { |
5051 | // Handle the case where the condition is a constant evaluatable simple integer, |
5052 | // which means we don't have to separately handle the true/false blocks. |
5053 | std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( |
5054 | CodeGenFunction &CGF, const AbstractConditionalOperator *E) { |
5055 | const Expr *condExpr = E->getCond(); |
5056 | bool CondExprBool; |
5057 | if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) { |
5058 | const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); |
5059 | if (!CondExprBool) |
5060 | std::swap(a&: Live, b&: Dead); |
5061 | |
5062 | if (!CGF.ContainsLabel(S: Dead)) { |
5063 | // If the true case is live, we need to track its region. |
5064 | if (CondExprBool) |
5065 | CGF.incrementProfileCounter(S: E); |
5066 | // If a throw expression we emit it and return an undefined lvalue |
5067 | // because it can't be used. |
5068 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Live->IgnoreParens())) { |
5069 | CGF.EmitCXXThrowExpr(E: ThrowExpr); |
5070 | llvm::Type *ElemTy = CGF.ConvertType(T: Dead->getType()); |
5071 | llvm::Type *Ty = CGF.UnqualPtrTy; |
5072 | return CGF.MakeAddrLValue( |
5073 | Addr: Address(llvm::UndefValue::get(T: Ty), ElemTy, CharUnits::One()), |
5074 | T: Dead->getType()); |
5075 | } |
5076 | return CGF.EmitLValue(E: Live); |
5077 | } |
5078 | } |
5079 | return std::nullopt; |
5080 | } |
5081 | struct ConditionalInfo { |
5082 | llvm::BasicBlock *lhsBlock, *rhsBlock; |
5083 | std::optional<LValue> LHS, RHS; |
5084 | }; |
5085 | |
5086 | // Create and generate the 3 blocks for a conditional operator. |
5087 | // Leaves the 'current block' in the continuation basic block. |
5088 | template<typename FuncTy> |
5089 | ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, |
5090 | const AbstractConditionalOperator *E, |
5091 | const FuncTy &BranchGenFunc) { |
5092 | ConditionalInfo Info{.lhsBlock: CGF.createBasicBlock(name: "cond.true" ), |
5093 | .rhsBlock: CGF.createBasicBlock(name: "cond.false" ), .LHS: std::nullopt, |
5094 | .RHS: std::nullopt}; |
5095 | llvm::BasicBlock *endBlock = CGF.createBasicBlock(name: "cond.end" ); |
5096 | |
5097 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
5098 | CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: Info.lhsBlock, FalseBlock: Info.rhsBlock, |
5099 | TrueCount: CGF.getProfileCount(S: E)); |
5100 | |
5101 | // Any temporaries created here are conditional. |
5102 | CGF.EmitBlock(BB: Info.lhsBlock); |
5103 | CGF.incrementProfileCounter(S: E); |
5104 | eval.begin(CGF); |
5105 | Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); |
5106 | eval.end(CGF); |
5107 | Info.lhsBlock = CGF.Builder.GetInsertBlock(); |
5108 | |
5109 | if (Info.LHS) |
5110 | CGF.Builder.CreateBr(Dest: endBlock); |
5111 | |
5112 | // Any temporaries created here are conditional. |
5113 | CGF.EmitBlock(BB: Info.rhsBlock); |
5114 | eval.begin(CGF); |
5115 | Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); |
5116 | eval.end(CGF); |
5117 | Info.rhsBlock = CGF.Builder.GetInsertBlock(); |
5118 | CGF.EmitBlock(BB: endBlock); |
5119 | |
5120 | return Info; |
5121 | } |
5122 | } // namespace |
5123 | |
5124 | void CodeGenFunction::EmitIgnoredConditionalOperator( |
5125 | const AbstractConditionalOperator *E) { |
5126 | if (!E->isGLValue()) { |
5127 | // ?: here should be an aggregate. |
5128 | assert(hasAggregateEvaluationKind(E->getType()) && |
5129 | "Unexpected conditional operator!" ); |
5130 | return (void)EmitAggExprToLValue(E); |
5131 | } |
5132 | |
5133 | OpaqueValueMapping binding(*this, E); |
5134 | if (HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E)) |
5135 | return; |
5136 | |
5137 | EmitConditionalBlocks(CGF&: *this, E, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5138 | CGF.EmitIgnoredExpr(E); |
5139 | return LValue{}; |
5140 | }); |
5141 | } |
5142 | LValue CodeGenFunction::EmitConditionalOperatorLValue( |
5143 | const AbstractConditionalOperator *expr) { |
5144 | if (!expr->isGLValue()) { |
5145 | // ?: here should be an aggregate. |
5146 | assert(hasAggregateEvaluationKind(expr->getType()) && |
5147 | "Unexpected conditional operator!" ); |
5148 | return EmitAggExprToLValue(E: expr); |
5149 | } |
5150 | |
5151 | OpaqueValueMapping binding(*this, expr); |
5152 | if (std::optional<LValue> Res = |
5153 | HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E: expr)) |
5154 | return *Res; |
5155 | |
5156 | ConditionalInfo Info = EmitConditionalBlocks( |
5157 | CGF&: *this, E: expr, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5158 | return EmitLValueOrThrowExpression(CGF, Operand: E); |
5159 | }); |
5160 | |
5161 | if ((Info.LHS && !Info.LHS->isSimple()) || |
5162 | (Info.RHS && !Info.RHS->isSimple())) |
5163 | return EmitUnsupportedLValue(E: expr, Name: "conditional operator" ); |
5164 | |
5165 | if (Info.LHS && Info.RHS) { |
5166 | Address lhsAddr = Info.LHS->getAddress(); |
5167 | Address rhsAddr = Info.RHS->getAddress(); |
5168 | Address result = mergeAddressesInConditionalExpr( |
5169 | LHS: lhsAddr, RHS: rhsAddr, LHSBlock: Info.lhsBlock, RHSBlock: Info.rhsBlock, |
5170 | MergeBlock: Builder.GetInsertBlock(), MergedType: expr->getType()); |
5171 | AlignmentSource alignSource = |
5172 | std::max(a: Info.LHS->getBaseInfo().getAlignmentSource(), |
5173 | b: Info.RHS->getBaseInfo().getAlignmentSource()); |
5174 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
5175 | InfoA: Info.LHS->getTBAAInfo(), InfoB: Info.RHS->getTBAAInfo()); |
5176 | return MakeAddrLValue(Addr: result, T: expr->getType(), BaseInfo: LValueBaseInfo(alignSource), |
5177 | TBAAInfo); |
5178 | } else { |
5179 | assert((Info.LHS || Info.RHS) && |
5180 | "both operands of glvalue conditional are throw-expressions?" ); |
5181 | return Info.LHS ? *Info.LHS : *Info.RHS; |
5182 | } |
5183 | } |
5184 | |
5185 | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
5186 | /// type. If the cast is to a reference, we can have the usual lvalue result, |
5187 | /// otherwise if a cast is needed by the code generator in an lvalue context, |
5188 | /// then it must mean that we need the address of an aggregate in order to |
5189 | /// access one of its members. This can happen for all the reasons that casts |
5190 | /// are permitted with aggregate result, including noop aggregate casts, and |
5191 | /// cast from scalar to union. |
5192 | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
5193 | switch (E->getCastKind()) { |
5194 | case CK_ToVoid: |
5195 | case CK_BitCast: |
5196 | case CK_LValueToRValueBitCast: |
5197 | case CK_ArrayToPointerDecay: |
5198 | case CK_FunctionToPointerDecay: |
5199 | case CK_NullToMemberPointer: |
5200 | case CK_NullToPointer: |
5201 | case CK_IntegralToPointer: |
5202 | case CK_PointerToIntegral: |
5203 | case CK_PointerToBoolean: |
5204 | case CK_IntegralCast: |
5205 | case CK_BooleanToSignedIntegral: |
5206 | case CK_IntegralToBoolean: |
5207 | case CK_IntegralToFloating: |
5208 | case CK_FloatingToIntegral: |
5209 | case CK_FloatingToBoolean: |
5210 | case CK_FloatingCast: |
5211 | case CK_FloatingRealToComplex: |
5212 | case CK_FloatingComplexToReal: |
5213 | case CK_FloatingComplexToBoolean: |
5214 | case CK_FloatingComplexCast: |
5215 | case CK_FloatingComplexToIntegralComplex: |
5216 | case CK_IntegralRealToComplex: |
5217 | case CK_IntegralComplexToReal: |
5218 | case CK_IntegralComplexToBoolean: |
5219 | case CK_IntegralComplexCast: |
5220 | case CK_IntegralComplexToFloatingComplex: |
5221 | case CK_DerivedToBaseMemberPointer: |
5222 | case CK_BaseToDerivedMemberPointer: |
5223 | case CK_MemberPointerToBoolean: |
5224 | case CK_ReinterpretMemberPointer: |
5225 | case CK_AnyPointerToBlockPointerCast: |
5226 | case CK_ARCProduceObject: |
5227 | case CK_ARCConsumeObject: |
5228 | case CK_ARCReclaimReturnedObject: |
5229 | case CK_ARCExtendBlockObject: |
5230 | case CK_CopyAndAutoreleaseBlockObject: |
5231 | case CK_IntToOCLSampler: |
5232 | case CK_FloatingToFixedPoint: |
5233 | case CK_FixedPointToFloating: |
5234 | case CK_FixedPointCast: |
5235 | case CK_FixedPointToBoolean: |
5236 | case CK_FixedPointToIntegral: |
5237 | case CK_IntegralToFixedPoint: |
5238 | case CK_MatrixCast: |
5239 | case CK_HLSLVectorTruncation: |
5240 | case CK_HLSLArrayRValue: |
5241 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5242 | |
5243 | case CK_Dependent: |
5244 | llvm_unreachable("dependent cast kind in IR gen!" ); |
5245 | |
5246 | case CK_BuiltinFnToFnPtr: |
5247 | llvm_unreachable("builtin functions are handled elsewhere" ); |
5248 | |
5249 | // These are never l-values; just use the aggregate emission code. |
5250 | case CK_NonAtomicToAtomic: |
5251 | case CK_AtomicToNonAtomic: |
5252 | return EmitAggExprToLValue(E); |
5253 | |
5254 | case CK_Dynamic: { |
5255 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5256 | Address V = LV.getAddress(); |
5257 | const auto *DCE = cast<CXXDynamicCastExpr>(Val: E); |
5258 | return MakeNaturalAlignRawAddrLValue(V: EmitDynamicCast(V, DCE), T: E->getType()); |
5259 | } |
5260 | |
5261 | case CK_ConstructorConversion: |
5262 | case CK_UserDefinedConversion: |
5263 | case CK_CPointerToObjCPointerCast: |
5264 | case CK_BlockPointerToObjCPointerCast: |
5265 | case CK_LValueToRValue: |
5266 | return EmitLValue(E: E->getSubExpr()); |
5267 | |
5268 | case CK_NoOp: { |
5269 | // CK_NoOp can model a qualification conversion, which can remove an array |
5270 | // bound and change the IR type. |
5271 | // FIXME: Once pointee types are removed from IR, remove this. |
5272 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5273 | // Propagate the volatile qualifer to LValue, if exist in E. |
5274 | if (E->changesVolatileQualification()) |
5275 | LV.getQuals() = E->getType().getQualifiers(); |
5276 | if (LV.isSimple()) { |
5277 | Address V = LV.getAddress(); |
5278 | if (V.isValid()) { |
5279 | llvm::Type *T = ConvertTypeForMem(T: E->getType()); |
5280 | if (V.getElementType() != T) |
5281 | LV.setAddress(V.withElementType(ElemTy: T)); |
5282 | } |
5283 | } |
5284 | return LV; |
5285 | } |
5286 | |
5287 | case CK_UncheckedDerivedToBase: |
5288 | case CK_DerivedToBase: { |
5289 | const auto *DerivedClassTy = |
5290 | E->getSubExpr()->getType()->castAs<RecordType>(); |
5291 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5292 | |
5293 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5294 | Address This = LV.getAddress(); |
5295 | |
5296 | // Perform the derived-to-base conversion |
5297 | Address Base = GetAddressOfBaseClass( |
5298 | Value: This, Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5299 | /*NullCheckValue=*/false, Loc: E->getExprLoc()); |
5300 | |
5301 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
5302 | // conservatively pretend that the complete object is of the base class |
5303 | // type. |
5304 | return MakeAddrLValue(Addr: Base, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5305 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5306 | } |
5307 | case CK_ToUnion: |
5308 | return EmitAggExprToLValue(E); |
5309 | case CK_BaseToDerived: { |
5310 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
5311 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5312 | |
5313 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5314 | |
5315 | // Perform the base-to-derived conversion |
5316 | Address Derived = GetAddressOfDerivedClass( |
5317 | Value: LV.getAddress(), Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5318 | /*NullCheckValue=*/false); |
5319 | |
5320 | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
5321 | // performed and the object is not of the derived type. |
5322 | if (sanitizePerformTypeCheck()) |
5323 | EmitTypeCheck(TCK: TCK_DowncastReference, Loc: E->getExprLoc(), Addr: Derived, |
5324 | Type: E->getType()); |
5325 | |
5326 | if (SanOpts.has(K: SanitizerKind::CFIDerivedCast)) |
5327 | EmitVTablePtrCheckForCast(T: E->getType(), Derived, |
5328 | /*MayBeNull=*/false, TCK: CFITCK_DerivedCast, |
5329 | Loc: E->getBeginLoc()); |
5330 | |
5331 | return MakeAddrLValue(Addr: Derived, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5332 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5333 | } |
5334 | case CK_LValueBitCast: { |
5335 | // This must be a reinterpret_cast (or c-style equivalent). |
5336 | const auto *CE = cast<ExplicitCastExpr>(Val: E); |
5337 | |
5338 | CGM.EmitExplicitCastExprType(E: CE, CGF: this); |
5339 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5340 | Address V = LV.getAddress().withElementType( |
5341 | ElemTy: ConvertTypeForMem(T: CE->getTypeAsWritten()->getPointeeType())); |
5342 | |
5343 | if (SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) |
5344 | EmitVTablePtrCheckForCast(T: E->getType(), Derived: V, |
5345 | /*MayBeNull=*/false, TCK: CFITCK_UnrelatedCast, |
5346 | Loc: E->getBeginLoc()); |
5347 | |
5348 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5349 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5350 | } |
5351 | case CK_AddressSpaceConversion: { |
5352 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5353 | QualType DestTy = getContext().getPointerType(T: E->getType()); |
5354 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
5355 | CGF&: *this, V: LV.getPointer(CGF&: *this), |
5356 | SrcAddr: E->getSubExpr()->getType().getAddressSpace(), |
5357 | DestAddr: E->getType().getAddressSpace(), DestTy: ConvertType(T: DestTy)); |
5358 | return MakeAddrLValue(Addr: Address(V, ConvertTypeForMem(T: E->getType()), |
5359 | LV.getAddress().getAlignment()), |
5360 | T: E->getType(), BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
5361 | } |
5362 | case CK_ObjCObjectLValueCast: { |
5363 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5364 | Address V = LV.getAddress().withElementType(ElemTy: ConvertType(T: E->getType())); |
5365 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5366 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5367 | } |
5368 | case CK_ZeroToOCLOpaqueType: |
5369 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid" ); |
5370 | |
5371 | case CK_VectorSplat: { |
5372 | // LValue results of vector splats are only supported in HLSL. |
5373 | if (!getLangOpts().HLSL) |
5374 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5375 | return EmitLValue(E: E->getSubExpr()); |
5376 | } |
5377 | } |
5378 | |
5379 | llvm_unreachable("Unhandled lvalue cast kind?" ); |
5380 | } |
5381 | |
5382 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
5383 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
5384 | return getOrCreateOpaqueLValueMapping(e); |
5385 | } |
5386 | |
5387 | LValue |
5388 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
5389 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
5390 | |
5391 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
5392 | it = OpaqueLValues.find(Val: e); |
5393 | |
5394 | if (it != OpaqueLValues.end()) |
5395 | return it->second; |
5396 | |
5397 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted" ); |
5398 | return EmitLValue(E: e->getSourceExpr()); |
5399 | } |
5400 | |
5401 | RValue |
5402 | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
5403 | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
5404 | |
5405 | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
5406 | it = OpaqueRValues.find(Val: e); |
5407 | |
5408 | if (it != OpaqueRValues.end()) |
5409 | return it->second; |
5410 | |
5411 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted" ); |
5412 | return EmitAnyExpr(E: e->getSourceExpr()); |
5413 | } |
5414 | |
5415 | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
5416 | const FieldDecl *FD, |
5417 | SourceLocation Loc) { |
5418 | QualType FT = FD->getType(); |
5419 | LValue FieldLV = EmitLValueForField(base: LV, field: FD); |
5420 | switch (getEvaluationKind(T: FT)) { |
5421 | case TEK_Complex: |
5422 | return RValue::getComplex(C: EmitLoadOfComplex(src: FieldLV, loc: Loc)); |
5423 | case TEK_Aggregate: |
5424 | return FieldLV.asAggregateRValue(); |
5425 | case TEK_Scalar: |
5426 | // This routine is used to load fields one-by-one to perform a copy, so |
5427 | // don't load reference fields. |
5428 | if (FD->getType()->isReferenceType()) |
5429 | return RValue::get(V: FieldLV.getPointer(CGF&: *this)); |
5430 | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
5431 | // primitive load. |
5432 | if (FieldLV.isBitField()) |
5433 | return EmitLoadOfLValue(LV: FieldLV, Loc); |
5434 | return RValue::get(V: EmitLoadOfScalar(lvalue: FieldLV, Loc)); |
5435 | } |
5436 | llvm_unreachable("bad evaluation kind" ); |
5437 | } |
5438 | |
5439 | //===--------------------------------------------------------------------===// |
5440 | // Expression Emission |
5441 | //===--------------------------------------------------------------------===// |
5442 | |
5443 | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
5444 | ReturnValueSlot ReturnValue) { |
5445 | // Builtins never have block type. |
5446 | if (E->getCallee()->getType()->isBlockPointerType()) |
5447 | return EmitBlockCallExpr(E, ReturnValue); |
5448 | |
5449 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: E)) |
5450 | return EmitCXXMemberCallExpr(E: CE, ReturnValue); |
5451 | |
5452 | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(Val: E)) |
5453 | return EmitCUDAKernelCallExpr(E: CE, ReturnValue); |
5454 | |
5455 | // A CXXOperatorCallExpr is created even for explicit object methods, but |
5456 | // these should be treated like static function call. |
5457 | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(Val: E)) |
5458 | if (const auto *MD = |
5459 | dyn_cast_if_present<CXXMethodDecl>(Val: CE->getCalleeDecl()); |
5460 | MD && MD->isImplicitObjectMemberFunction()) |
5461 | return EmitCXXOperatorMemberCallExpr(E: CE, MD, ReturnValue); |
5462 | |
5463 | CGCallee callee = EmitCallee(E: E->getCallee()); |
5464 | |
5465 | if (callee.isBuiltin()) { |
5466 | return EmitBuiltinExpr(GD: callee.getBuiltinDecl(), BuiltinID: callee.getBuiltinID(), |
5467 | E, ReturnValue); |
5468 | } |
5469 | |
5470 | if (callee.isPseudoDestructor()) { |
5471 | return EmitCXXPseudoDestructorExpr(E: callee.getPseudoDestructorExpr()); |
5472 | } |
5473 | |
5474 | return EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue); |
5475 | } |
5476 | |
5477 | /// Emit a CallExpr without considering whether it might be a subclass. |
5478 | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
5479 | ReturnValueSlot ReturnValue) { |
5480 | CGCallee Callee = EmitCallee(E: E->getCallee()); |
5481 | return EmitCall(FnType: E->getCallee()->getType(), Callee, E, ReturnValue); |
5482 | } |
5483 | |
5484 | // Detect the unusual situation where an inline version is shadowed by a |
5485 | // non-inline version. In that case we should pick the external one |
5486 | // everywhere. That's GCC behavior too. |
5487 | static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { |
5488 | for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) |
5489 | if (!PD->isInlineBuiltinDeclaration()) |
5490 | return false; |
5491 | return true; |
5492 | } |
5493 | |
5494 | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
5495 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
5496 | |
5497 | if (auto builtinID = FD->getBuiltinID()) { |
5498 | std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); |
5499 | std::string NoBuiltins = "no-builtins" ; |
5500 | |
5501 | StringRef Ident = CGF.CGM.getMangledName(GD); |
5502 | std::string FDInlineName = (Ident + ".inline" ).str(); |
5503 | |
5504 | bool IsPredefinedLibFunction = |
5505 | CGF.getContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID); |
5506 | bool HasAttributeNoBuiltin = |
5507 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltinFD) || |
5508 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltins); |
5509 | |
5510 | // When directing calling an inline builtin, call it through it's mangled |
5511 | // name to make it clear it's not the actual builtin. |
5512 | if (CGF.CurFn->getName() != FDInlineName && |
5513 | OnlyHasInlineBuiltinDeclaration(FD)) { |
5514 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5515 | llvm::Function *Fn = llvm::cast<llvm::Function>(Val: CalleePtr); |
5516 | llvm::Module *M = Fn->getParent(); |
5517 | llvm::Function *Clone = M->getFunction(Name: FDInlineName); |
5518 | if (!Clone) { |
5519 | Clone = llvm::Function::Create(Ty: Fn->getFunctionType(), |
5520 | Linkage: llvm::GlobalValue::InternalLinkage, |
5521 | AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M); |
5522 | Clone->addFnAttr(Kind: llvm::Attribute::AlwaysInline); |
5523 | } |
5524 | return CGCallee::forDirect(functionPtr: Clone, abstractInfo: GD); |
5525 | } |
5526 | |
5527 | // Replaceable builtins provide their own implementation of a builtin. If we |
5528 | // are in an inline builtin implementation, avoid trivial infinite |
5529 | // recursion. Honor __attribute__((no_builtin("foo"))) or |
5530 | // __attribute__((no_builtin)) on the current function unless foo is |
5531 | // not a predefined library function which means we must generate the |
5532 | // builtin no matter what. |
5533 | else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) |
5534 | return CGCallee::forBuiltin(builtinID, builtinDecl: FD); |
5535 | } |
5536 | |
5537 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5538 | if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && |
5539 | FD->hasAttr<CUDAGlobalAttr>()) |
5540 | CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( |
5541 | Handle: cast<llvm::GlobalValue>(Val: CalleePtr->stripPointerCasts())); |
5542 | |
5543 | return CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GD); |
5544 | } |
5545 | |
5546 | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
5547 | E = E->IgnoreParens(); |
5548 | |
5549 | // Look through function-to-pointer decay. |
5550 | if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: E)) { |
5551 | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
5552 | ICE->getCastKind() == CK_BuiltinFnToFnPtr) { |
5553 | return EmitCallee(E: ICE->getSubExpr()); |
5554 | } |
5555 | |
5556 | // Resolve direct calls. |
5557 | } else if (auto DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
5558 | if (auto FD = dyn_cast<FunctionDecl>(Val: DRE->getDecl())) { |
5559 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5560 | } |
5561 | } else if (auto ME = dyn_cast<MemberExpr>(Val: E)) { |
5562 | if (auto FD = dyn_cast<FunctionDecl>(Val: ME->getMemberDecl())) { |
5563 | EmitIgnoredExpr(E: ME->getBase()); |
5564 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5565 | } |
5566 | |
5567 | // Look through template substitutions. |
5568 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(Val: E)) { |
5569 | return EmitCallee(E: NTTP->getReplacement()); |
5570 | |
5571 | // Treat pseudo-destructor calls differently. |
5572 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: E)) { |
5573 | return CGCallee::forPseudoDestructor(E: PDE); |
5574 | } |
5575 | |
5576 | // Otherwise, we have an indirect reference. |
5577 | llvm::Value *calleePtr; |
5578 | QualType functionType; |
5579 | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
5580 | calleePtr = EmitScalarExpr(E); |
5581 | functionType = ptrType->getPointeeType(); |
5582 | } else { |
5583 | functionType = E->getType(); |
5584 | calleePtr = EmitLValue(E, IsKnownNonNull: KnownNonNull).getPointer(CGF&: *this); |
5585 | } |
5586 | assert(functionType->isFunctionType()); |
5587 | |
5588 | GlobalDecl GD; |
5589 | if (const auto *VD = |
5590 | dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) |
5591 | GD = GlobalDecl(VD); |
5592 | |
5593 | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
5594 | CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(T: functionType); |
5595 | CGCallee callee(calleeInfo, calleePtr, pointerAuth); |
5596 | return callee; |
5597 | } |
5598 | |
5599 | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
5600 | // Comma expressions just emit their LHS then their RHS as an l-value. |
5601 | if (E->getOpcode() == BO_Comma) { |
5602 | EmitIgnoredExpr(E: E->getLHS()); |
5603 | EnsureInsertPoint(); |
5604 | return EmitLValue(E: E->getRHS()); |
5605 | } |
5606 | |
5607 | if (E->getOpcode() == BO_PtrMemD || |
5608 | E->getOpcode() == BO_PtrMemI) |
5609 | return EmitPointerToDataMemberBinaryExpr(E); |
5610 | |
5611 | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value" ); |
5612 | |
5613 | // Note that in all of these cases, __block variables need the RHS |
5614 | // evaluated first just in case the variable gets moved by the RHS. |
5615 | |
5616 | switch (getEvaluationKind(T: E->getType())) { |
5617 | case TEK_Scalar: { |
5618 | switch (E->getLHS()->getType().getObjCLifetime()) { |
5619 | case Qualifiers::OCL_Strong: |
5620 | return EmitARCStoreStrong(e: E, /*ignored*/ false).first; |
5621 | |
5622 | case Qualifiers::OCL_Autoreleasing: |
5623 | return EmitARCStoreAutoreleasing(e: E).first; |
5624 | |
5625 | // No reason to do any of these differently. |
5626 | case Qualifiers::OCL_None: |
5627 | case Qualifiers::OCL_ExplicitNone: |
5628 | case Qualifiers::OCL_Weak: |
5629 | break; |
5630 | } |
5631 | |
5632 | // TODO: Can we de-duplicate this code with the corresponding code in |
5633 | // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works? |
5634 | RValue RV; |
5635 | llvm::Value *Previous = nullptr; |
5636 | QualType SrcType = E->getRHS()->getType(); |
5637 | // Check if LHS is a bitfield, if RHS contains an implicit cast expression |
5638 | // we want to extract that value and potentially (if the bitfield sanitizer |
5639 | // is enabled) use it to check for an implicit conversion. |
5640 | if (E->getLHS()->refersToBitField()) { |
5641 | llvm::Value *RHS = |
5642 | EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType); |
5643 | RV = RValue::get(V: RHS); |
5644 | } else |
5645 | RV = EmitAnyExpr(E: E->getRHS()); |
5646 | |
5647 | LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store); |
5648 | |
5649 | if (RV.isScalar()) |
5650 | EmitNullabilityCheck(LHS: LV, RHS: RV.getScalarVal(), Loc: E->getExprLoc()); |
5651 | |
5652 | if (LV.isBitField()) { |
5653 | llvm::Value *Result = nullptr; |
5654 | // If bitfield sanitizers are enabled we want to use the result |
5655 | // to check whether a truncation or sign change has occurred. |
5656 | if (SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) |
5657 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV, Result: &Result); |
5658 | else |
5659 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV); |
5660 | |
5661 | // If the expression contained an implicit conversion, make sure |
5662 | // to use the value before the scalar conversion. |
5663 | llvm::Value *Src = Previous ? Previous : RV.getScalarVal(); |
5664 | QualType DstType = E->getLHS()->getType(); |
5665 | EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType, |
5666 | Info: LV.getBitFieldInfo(), Loc: E->getExprLoc()); |
5667 | } else |
5668 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
5669 | |
5670 | if (getLangOpts().OpenMP) |
5671 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
5672 | LHS: E->getLHS()); |
5673 | return LV; |
5674 | } |
5675 | |
5676 | case TEK_Complex: |
5677 | return EmitComplexAssignmentLValue(E); |
5678 | |
5679 | case TEK_Aggregate: |
5680 | return EmitAggExprToLValue(E); |
5681 | } |
5682 | llvm_unreachable("bad evaluation kind" ); |
5683 | } |
5684 | |
5685 | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { |
5686 | RValue RV = EmitCallExpr(E); |
5687 | |
5688 | if (!RV.isScalar()) |
5689 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5690 | Source: AlignmentSource::Decl); |
5691 | |
5692 | assert(E->getCallReturnType(getContext())->isReferenceType() && |
5693 | "Can't have a scalar return unless the return type is a " |
5694 | "reference type!" ); |
5695 | |
5696 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5697 | } |
5698 | |
5699 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
5700 | // FIXME: This shouldn't require another copy. |
5701 | return EmitAggExprToLValue(E); |
5702 | } |
5703 | |
5704 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
5705 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
5706 | && "binding l-value to type which needs a temporary" ); |
5707 | AggValueSlot Slot = CreateAggTemp(T: E->getType()); |
5708 | EmitCXXConstructExpr(E, Dest: Slot); |
5709 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
5710 | } |
5711 | |
5712 | LValue |
5713 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
5714 | return MakeNaturalAlignRawAddrLValue(V: EmitCXXTypeidExpr(E), T: E->getType()); |
5715 | } |
5716 | |
5717 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
5718 | return CGM.GetAddrOfMSGuidDecl(GD: E->getGuidDecl()) |
5719 | .withElementType(ElemTy: ConvertType(T: E->getType())); |
5720 | } |
5721 | |
5722 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
5723 | return MakeAddrLValue(Addr: EmitCXXUuidofExpr(E), T: E->getType(), |
5724 | Source: AlignmentSource::Decl); |
5725 | } |
5726 | |
5727 | LValue |
5728 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
5729 | AggValueSlot Slot = CreateAggTemp(T: E->getType(), Name: "temp.lvalue" ); |
5730 | Slot.setExternallyDestructed(); |
5731 | EmitAggExpr(E: E->getSubExpr(), AS: Slot); |
5732 | EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Slot.getAddress()); |
5733 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
5734 | } |
5735 | |
5736 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
5737 | RValue RV = EmitObjCMessageExpr(E); |
5738 | |
5739 | if (!RV.isScalar()) |
5740 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5741 | Source: AlignmentSource::Decl); |
5742 | |
5743 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
5744 | "Can't have a scalar return unless the return type is a " |
5745 | "reference type!" ); |
5746 | |
5747 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5748 | } |
5749 | |
5750 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
5751 | Address V = |
5752 | CGM.getObjCRuntime().GetAddrOfSelector(CGF&: *this, Sel: E->getSelector()); |
5753 | return MakeAddrLValue(Addr: V, T: E->getType(), Source: AlignmentSource::Decl); |
5754 | } |
5755 | |
5756 | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
5757 | const ObjCIvarDecl *Ivar) { |
5758 | return CGM.getObjCRuntime().EmitIvarOffset(CGF&: *this, Interface, Ivar); |
5759 | } |
5760 | |
5761 | llvm::Value * |
5762 | CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, |
5763 | const ObjCIvarDecl *Ivar) { |
5764 | llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); |
5765 | QualType PointerDiffType = getContext().getPointerDiffType(); |
5766 | return Builder.CreateZExtOrTrunc(V: OffsetValue, |
5767 | DestTy: getTypes().ConvertType(T: PointerDiffType)); |
5768 | } |
5769 | |
5770 | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
5771 | llvm::Value *BaseValue, |
5772 | const ObjCIvarDecl *Ivar, |
5773 | unsigned CVRQualifiers) { |
5774 | return CGM.getObjCRuntime().EmitObjCValueForIvar(CGF&: *this, ObjectTy, BaseValue, |
5775 | Ivar, CVRQualifiers); |
5776 | } |
5777 | |
5778 | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
5779 | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
5780 | llvm::Value *BaseValue = nullptr; |
5781 | const Expr *BaseExpr = E->getBase(); |
5782 | Qualifiers BaseQuals; |
5783 | QualType ObjectTy; |
5784 | if (E->isArrow()) { |
5785 | BaseValue = EmitScalarExpr(E: BaseExpr); |
5786 | ObjectTy = BaseExpr->getType()->getPointeeType(); |
5787 | BaseQuals = ObjectTy.getQualifiers(); |
5788 | } else { |
5789 | LValue BaseLV = EmitLValue(E: BaseExpr); |
5790 | BaseValue = BaseLV.getPointer(CGF&: *this); |
5791 | ObjectTy = BaseExpr->getType(); |
5792 | BaseQuals = ObjectTy.getQualifiers(); |
5793 | } |
5794 | |
5795 | LValue LV = |
5796 | EmitLValueForIvar(ObjectTy, BaseValue, Ivar: E->getDecl(), |
5797 | CVRQualifiers: BaseQuals.getCVRQualifiers()); |
5798 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
5799 | return LV; |
5800 | } |
5801 | |
5802 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
5803 | // Can only get l-value for message expression returning aggregate type |
5804 | RValue RV = EmitAnyExprToTemp(E); |
5805 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5806 | Source: AlignmentSource::Decl); |
5807 | } |
5808 | |
5809 | RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, |
5810 | const CallExpr *E, ReturnValueSlot ReturnValue, |
5811 | llvm::Value *Chain) { |
5812 | // Get the actual function type. The callee type will always be a pointer to |
5813 | // function type or a block pointer type. |
5814 | assert(CalleeType->isFunctionPointerType() && |
5815 | "Call must have function pointer type!" ); |
5816 | |
5817 | const Decl *TargetDecl = |
5818 | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
5819 | |
5820 | assert((!isa_and_present<FunctionDecl>(TargetDecl) || |
5821 | !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && |
5822 | "trying to emit a call to an immediate function" ); |
5823 | |
5824 | CalleeType = getContext().getCanonicalType(T: CalleeType); |
5825 | |
5826 | auto PointeeType = cast<PointerType>(Val&: CalleeType)->getPointeeType(); |
5827 | |
5828 | CGCallee Callee = OrigCallee; |
5829 | |
5830 | if (SanOpts.has(K: SanitizerKind::Function) && |
5831 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && |
5832 | !isa<FunctionNoProtoType>(Val: PointeeType)) { |
5833 | if (llvm::Constant *PrefixSig = |
5834 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
5835 | SanitizerScope SanScope(this); |
5836 | auto *TypeHash = getUBSanFunctionTypeHash(T: PointeeType); |
5837 | |
5838 | llvm::Type *PrefixSigType = PrefixSig->getType(); |
5839 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
5840 | Context&: CGM.getLLVMContext(), Elements: {PrefixSigType, Int32Ty}, /*isPacked=*/true); |
5841 | |
5842 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5843 | if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) { |
5844 | // Use raw pointer since we are using the callee pointer as data here. |
5845 | Address Addr = |
5846 | Address(CalleePtr, CalleePtr->getType(), |
5847 | CharUnits::fromQuantity( |
5848 | Quantity: CalleePtr->getPointerAlignment(DL: CGM.getDataLayout())), |
5849 | Callee.getPointerAuthInfo(), nullptr); |
5850 | CalleePtr = Addr.emitRawPointer(CGF&: *this); |
5851 | } |
5852 | |
5853 | // On 32-bit Arm, the low bit of a function pointer indicates whether |
5854 | // it's using the Arm or Thumb instruction set. The actual first |
5855 | // instruction lives at the same address either way, so we must clear |
5856 | // that low bit before using the function address to find the prefix |
5857 | // structure. |
5858 | // |
5859 | // This applies to both Arm and Thumb target triples, because |
5860 | // either one could be used in an interworking context where it |
5861 | // might be passed function pointers of both types. |
5862 | llvm::Value *AlignedCalleePtr; |
5863 | if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { |
5864 | llvm::Value *CalleeAddress = |
5865 | Builder.CreatePtrToInt(V: CalleePtr, DestTy: IntPtrTy); |
5866 | llvm::Value *Mask = llvm::ConstantInt::get(Ty: IntPtrTy, V: ~1); |
5867 | llvm::Value *AlignedCalleeAddress = |
5868 | Builder.CreateAnd(LHS: CalleeAddress, RHS: Mask); |
5869 | AlignedCalleePtr = |
5870 | Builder.CreateIntToPtr(V: AlignedCalleeAddress, DestTy: CalleePtr->getType()); |
5871 | } else { |
5872 | AlignedCalleePtr = CalleePtr; |
5873 | } |
5874 | |
5875 | llvm::Value *CalleePrefixStruct = AlignedCalleePtr; |
5876 | llvm::Value *CalleeSigPtr = |
5877 | Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 0); |
5878 | llvm::Value *CalleeSig = |
5879 | Builder.CreateAlignedLoad(Ty: PrefixSigType, Addr: CalleeSigPtr, Align: getIntAlign()); |
5880 | llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(LHS: CalleeSig, RHS: PrefixSig); |
5881 | |
5882 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
5883 | llvm::BasicBlock *TypeCheck = createBasicBlock(name: "typecheck" ); |
5884 | Builder.CreateCondBr(Cond: CalleeSigMatch, True: TypeCheck, False: Cont); |
5885 | |
5886 | EmitBlock(BB: TypeCheck); |
5887 | llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( |
5888 | Ty: Int32Ty, |
5889 | Addr: Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 1), |
5890 | Align: getPointerAlign()); |
5891 | llvm::Value *CalleeTypeHashMatch = |
5892 | Builder.CreateICmpEQ(LHS: CalleeTypeHash, RHS: TypeHash); |
5893 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5894 | EmitCheckTypeDescriptor(T: CalleeType)}; |
5895 | EmitCheck(Checked: std::make_pair(x&: CalleeTypeHashMatch, y: SanitizerKind::Function), |
5896 | CheckHandler: SanitizerHandler::FunctionTypeMismatch, StaticArgs: StaticData, |
5897 | DynamicArgs: {CalleePtr}); |
5898 | |
5899 | Builder.CreateBr(Dest: Cont); |
5900 | EmitBlock(BB: Cont); |
5901 | } |
5902 | } |
5903 | |
5904 | const auto *FnType = cast<FunctionType>(Val&: PointeeType); |
5905 | |
5906 | // If we are checking indirect calls and this call is indirect, check that the |
5907 | // function pointer is a member of the bit set for the function type. |
5908 | if (SanOpts.has(K: SanitizerKind::CFIICall) && |
5909 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
5910 | SanitizerScope SanScope(this); |
5911 | EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_ICall); |
5912 | |
5913 | llvm::Metadata *MD; |
5914 | if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) |
5915 | MD = CGM.CreateMetadataIdentifierGeneralized(T: QualType(FnType, 0)); |
5916 | else |
5917 | MD = CGM.CreateMetadataIdentifierForType(T: QualType(FnType, 0)); |
5918 | |
5919 | llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD); |
5920 | |
5921 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5922 | llvm::Value *TypeTest = Builder.CreateCall( |
5923 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {CalleePtr, TypeId}); |
5924 | |
5925 | auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); |
5926 | llvm::Constant *StaticData[] = { |
5927 | llvm::ConstantInt::get(Ty: Int8Ty, V: CFITCK_ICall), |
5928 | EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5929 | EmitCheckTypeDescriptor(T: QualType(FnType, 0)), |
5930 | }; |
5931 | if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { |
5932 | EmitCfiSlowPathCheck(Kind: SanitizerKind::CFIICall, Cond: TypeTest, TypeId: CrossDsoTypeId, |
5933 | Ptr: CalleePtr, StaticArgs: StaticData); |
5934 | } else { |
5935 | EmitCheck(Checked: std::make_pair(x&: TypeTest, y: SanitizerKind::CFIICall), |
5936 | CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: StaticData, |
5937 | DynamicArgs: {CalleePtr, llvm::UndefValue::get(T: IntPtrTy)}); |
5938 | } |
5939 | } |
5940 | |
5941 | CallArgList Args; |
5942 | if (Chain) |
5943 | Args.add(rvalue: RValue::get(V: Chain), type: CGM.getContext().VoidPtrTy); |
5944 | |
5945 | // C++17 requires that we evaluate arguments to a call using assignment syntax |
5946 | // right-to-left, and that we evaluate arguments to certain other operators |
5947 | // left-to-right. Note that we allow this to override the order dictated by |
5948 | // the calling convention on the MS ABI, which means that parameter |
5949 | // destruction order is not necessarily reverse construction order. |
5950 | // FIXME: Revisit this based on C++ committee response to unimplementability. |
5951 | EvaluationOrder Order = EvaluationOrder::Default; |
5952 | bool StaticOperator = false; |
5953 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) { |
5954 | if (OCE->isAssignmentOp()) |
5955 | Order = EvaluationOrder::ForceRightToLeft; |
5956 | else { |
5957 | switch (OCE->getOperator()) { |
5958 | case OO_LessLess: |
5959 | case OO_GreaterGreater: |
5960 | case OO_AmpAmp: |
5961 | case OO_PipePipe: |
5962 | case OO_Comma: |
5963 | case OO_ArrowStar: |
5964 | Order = EvaluationOrder::ForceLeftToRight; |
5965 | break; |
5966 | default: |
5967 | break; |
5968 | } |
5969 | } |
5970 | |
5971 | if (const auto *MD = |
5972 | dyn_cast_if_present<CXXMethodDecl>(Val: OCE->getCalleeDecl()); |
5973 | MD && MD->isStatic()) |
5974 | StaticOperator = true; |
5975 | } |
5976 | |
5977 | auto Arguments = E->arguments(); |
5978 | if (StaticOperator) { |
5979 | // If we're calling a static operator, we need to emit the object argument |
5980 | // and ignore it. |
5981 | EmitIgnoredExpr(E: E->getArg(Arg: 0)); |
5982 | Arguments = drop_begin(RangeOrContainer&: Arguments, N: 1); |
5983 | } |
5984 | EmitCallArgs(Args, Prototype: dyn_cast<FunctionProtoType>(Val: FnType), ArgRange: Arguments, |
5985 | AC: E->getDirectCallee(), /*ParamsToSkip=*/0, Order); |
5986 | |
5987 | const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( |
5988 | Args, Ty: FnType, /*ChainCall=*/Chain); |
5989 | |
5990 | // C99 6.5.2.2p6: |
5991 | // If the expression that denotes the called function has a type |
5992 | // that does not include a prototype, [the default argument |
5993 | // promotions are performed]. If the number of arguments does not |
5994 | // equal the number of parameters, the behavior is undefined. If |
5995 | // the function is defined with a type that includes a prototype, |
5996 | // and either the prototype ends with an ellipsis (, ...) or the |
5997 | // types of the arguments after promotion are not compatible with |
5998 | // the types of the parameters, the behavior is undefined. If the |
5999 | // function is defined with a type that does not include a |
6000 | // prototype, and the types of the arguments after promotion are |
6001 | // not compatible with those of the parameters after promotion, |
6002 | // the behavior is undefined [except in some trivial cases]. |
6003 | // That is, in the general case, we should assume that a call |
6004 | // through an unprototyped function type works like a *non-variadic* |
6005 | // call. The way we make this work is to cast to the exact type |
6006 | // of the promoted arguments. |
6007 | // |
6008 | // Chain calls use this same code path to add the invisible chain parameter |
6009 | // to the function type. |
6010 | if (isa<FunctionNoProtoType>(Val: FnType) || Chain) { |
6011 | llvm::Type *CalleeTy = getTypes().GetFunctionType(Info: FnInfo); |
6012 | int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); |
6013 | CalleeTy = CalleeTy->getPointerTo(AddrSpace: AS); |
6014 | |
6015 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
6016 | CalleePtr = Builder.CreateBitCast(V: CalleePtr, DestTy: CalleeTy, Name: "callee.knr.cast" ); |
6017 | Callee.setFunctionPointer(CalleePtr); |
6018 | } |
6019 | |
6020 | // HIP function pointer contains kernel handle when it is used in triple |
6021 | // chevron. The kernel stub needs to be loaded from kernel handle and used |
6022 | // as callee. |
6023 | if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && |
6024 | isa<CUDAKernelCallExpr>(Val: E) && |
6025 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
6026 | llvm::Value *Handle = Callee.getFunctionPointer(); |
6027 | auto *Stub = Builder.CreateLoad( |
6028 | Addr: Address(Handle, Handle->getType(), CGM.getPointerAlign())); |
6029 | Callee.setFunctionPointer(Stub); |
6030 | } |
6031 | llvm::CallBase *CallOrInvoke = nullptr; |
6032 | RValue Call = EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, callOrInvoke: &CallOrInvoke, |
6033 | IsMustTail: E == MustTailCall, Loc: E->getExprLoc()); |
6034 | |
6035 | // Generate function declaration DISuprogram in order to be used |
6036 | // in debug info about call sites. |
6037 | if (CGDebugInfo *DI = getDebugInfo()) { |
6038 | if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
6039 | FunctionArgList Args; |
6040 | QualType ResTy = BuildFunctionArgList(GD: CalleeDecl, Args); |
6041 | DI->EmitFuncDeclForCallSite(CallOrInvoke, |
6042 | CalleeType: DI->getFunctionType(FD: CalleeDecl, RetTy: ResTy, Args), |
6043 | CalleeDecl); |
6044 | } |
6045 | } |
6046 | |
6047 | return Call; |
6048 | } |
6049 | |
6050 | LValue CodeGenFunction:: |
6051 | EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { |
6052 | Address BaseAddr = Address::invalid(); |
6053 | if (E->getOpcode() == BO_PtrMemI) { |
6054 | BaseAddr = EmitPointerWithAlignment(E: E->getLHS()); |
6055 | } else { |
6056 | BaseAddr = EmitLValue(E: E->getLHS()).getAddress(); |
6057 | } |
6058 | |
6059 | llvm::Value *OffsetV = EmitScalarExpr(E: E->getRHS()); |
6060 | const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); |
6061 | |
6062 | LValueBaseInfo BaseInfo; |
6063 | TBAAAccessInfo TBAAInfo; |
6064 | Address MemberAddr = |
6065 | EmitCXXMemberDataPointerAddress(E, base: BaseAddr, memberPtr: OffsetV, memberPtrType: MPT, BaseInfo: &BaseInfo, |
6066 | TBAAInfo: &TBAAInfo); |
6067 | |
6068 | return MakeAddrLValue(Addr: MemberAddr, T: MPT->getPointeeType(), BaseInfo, TBAAInfo); |
6069 | } |
6070 | |
6071 | /// Given the address of a temporary variable, produce an r-value of |
6072 | /// its type. |
6073 | RValue CodeGenFunction::convertTempToRValue(Address addr, |
6074 | QualType type, |
6075 | SourceLocation loc) { |
6076 | LValue lvalue = MakeAddrLValue(Addr: addr, T: type, Source: AlignmentSource::Decl); |
6077 | switch (getEvaluationKind(T: type)) { |
6078 | case TEK_Complex: |
6079 | return RValue::getComplex(C: EmitLoadOfComplex(src: lvalue, loc)); |
6080 | case TEK_Aggregate: |
6081 | return lvalue.asAggregateRValue(); |
6082 | case TEK_Scalar: |
6083 | return RValue::get(V: EmitLoadOfScalar(lvalue, Loc: loc)); |
6084 | } |
6085 | llvm_unreachable("bad evaluation kind" ); |
6086 | } |
6087 | |
6088 | void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { |
6089 | assert(Val->getType()->isFPOrFPVectorTy()); |
6090 | if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) |
6091 | return; |
6092 | |
6093 | llvm::MDBuilder MDHelper(getLLVMContext()); |
6094 | llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); |
6095 | |
6096 | cast<llvm::Instruction>(Val)->setMetadata(KindID: llvm::LLVMContext::MD_fpmath, Node); |
6097 | } |
6098 | |
6099 | void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { |
6100 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6101 | if (!EltTy->isFloatTy()) |
6102 | return; |
6103 | |
6104 | if ((getLangOpts().OpenCL && |
6105 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6106 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6107 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6108 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp |
6109 | // |
6110 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6111 | // build option allows an application to specify that single precision |
6112 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6113 | // source are correctly rounded. |
6114 | // |
6115 | // TODO: CUDA has a prec-sqrt flag |
6116 | SetFPAccuracy(Val, Accuracy: 3.0f); |
6117 | } |
6118 | } |
6119 | |
6120 | void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { |
6121 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6122 | if (!EltTy->isFloatTy()) |
6123 | return; |
6124 | |
6125 | if ((getLangOpts().OpenCL && |
6126 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6127 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6128 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6129 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp |
6130 | // |
6131 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6132 | // build option allows an application to specify that single precision |
6133 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6134 | // source are correctly rounded. |
6135 | // |
6136 | // TODO: CUDA has a prec-div flag |
6137 | SetFPAccuracy(Val, Accuracy: 2.5f); |
6138 | } |
6139 | } |
6140 | |
6141 | namespace { |
6142 | struct LValueOrRValue { |
6143 | LValue LV; |
6144 | RValue RV; |
6145 | }; |
6146 | } |
6147 | |
6148 | static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, |
6149 | const PseudoObjectExpr *E, |
6150 | bool forLValue, |
6151 | AggValueSlot slot) { |
6152 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
6153 | |
6154 | // Find the result expression, if any. |
6155 | const Expr *resultExpr = E->getResultExpr(); |
6156 | LValueOrRValue result; |
6157 | |
6158 | for (PseudoObjectExpr::const_semantics_iterator |
6159 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
6160 | const Expr *semantic = *i; |
6161 | |
6162 | // If this semantic expression is an opaque value, bind it |
6163 | // to the result of its source expression. |
6164 | if (const auto *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) { |
6165 | // Skip unique OVEs. |
6166 | if (ov->isUnique()) { |
6167 | assert(ov != resultExpr && |
6168 | "A unique OVE cannot be used as the result expression" ); |
6169 | continue; |
6170 | } |
6171 | |
6172 | // If this is the result expression, we may need to evaluate |
6173 | // directly into the slot. |
6174 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
6175 | OVMA opaqueData; |
6176 | if (ov == resultExpr && ov->isPRValue() && !forLValue && |
6177 | CodeGenFunction::hasAggregateEvaluationKind(T: ov->getType())) { |
6178 | CGF.EmitAggExpr(E: ov->getSourceExpr(), AS: slot); |
6179 | LValue LV = CGF.MakeAddrLValue(Addr: slot.getAddress(), T: ov->getType(), |
6180 | Source: AlignmentSource::Decl); |
6181 | opaqueData = OVMA::bind(CGF, ov, lv: LV); |
6182 | result.RV = slot.asRValue(); |
6183 | |
6184 | // Otherwise, emit as normal. |
6185 | } else { |
6186 | opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr()); |
6187 | |
6188 | // If this is the result, also evaluate the result now. |
6189 | if (ov == resultExpr) { |
6190 | if (forLValue) |
6191 | result.LV = CGF.EmitLValue(E: ov); |
6192 | else |
6193 | result.RV = CGF.EmitAnyExpr(E: ov, aggSlot: slot); |
6194 | } |
6195 | } |
6196 | |
6197 | opaques.push_back(Elt: opaqueData); |
6198 | |
6199 | // Otherwise, if the expression is the result, evaluate it |
6200 | // and remember the result. |
6201 | } else if (semantic == resultExpr) { |
6202 | if (forLValue) |
6203 | result.LV = CGF.EmitLValue(E: semantic); |
6204 | else |
6205 | result.RV = CGF.EmitAnyExpr(E: semantic, aggSlot: slot); |
6206 | |
6207 | // Otherwise, evaluate the expression in an ignored context. |
6208 | } else { |
6209 | CGF.EmitIgnoredExpr(E: semantic); |
6210 | } |
6211 | } |
6212 | |
6213 | // Unbind all the opaques now. |
6214 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
6215 | opaques[i].unbind(CGF); |
6216 | |
6217 | return result; |
6218 | } |
6219 | |
6220 | RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, |
6221 | AggValueSlot slot) { |
6222 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: false, slot).RV; |
6223 | } |
6224 | |
6225 | LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { |
6226 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: true, slot: AggValueSlot::ignored()).LV; |
6227 | } |
6228 | |