1 | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "ABIInfoImpl.h" |
14 | #include "CGCUDARuntime.h" |
15 | #include "CGCXXABI.h" |
16 | #include "CGCall.h" |
17 | #include "CGCleanup.h" |
18 | #include "CGDebugInfo.h" |
19 | #include "CGObjCRuntime.h" |
20 | #include "CGOpenMPRuntime.h" |
21 | #include "CGRecordLayout.h" |
22 | #include "CodeGenFunction.h" |
23 | #include "CodeGenModule.h" |
24 | #include "ConstantEmitter.h" |
25 | #include "TargetInfo.h" |
26 | #include "clang/AST/ASTContext.h" |
27 | #include "clang/AST/Attr.h" |
28 | #include "clang/AST/DeclObjC.h" |
29 | #include "clang/AST/NSAPI.h" |
30 | #include "clang/AST/StmtVisitor.h" |
31 | #include "clang/Basic/Builtins.h" |
32 | #include "clang/Basic/CodeGenOptions.h" |
33 | #include "clang/Basic/SourceManager.h" |
34 | #include "llvm/ADT/Hashing.h" |
35 | #include "llvm/ADT/STLExtras.h" |
36 | #include "llvm/ADT/StringExtras.h" |
37 | #include "llvm/IR/DataLayout.h" |
38 | #include "llvm/IR/Intrinsics.h" |
39 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
40 | #include "llvm/IR/LLVMContext.h" |
41 | #include "llvm/IR/MDBuilder.h" |
42 | #include "llvm/IR/MatrixBuilder.h" |
43 | #include "llvm/Passes/OptimizationLevel.h" |
44 | #include "llvm/Support/ConvertUTF.h" |
45 | #include "llvm/Support/MathExtras.h" |
46 | #include "llvm/Support/Path.h" |
47 | #include "llvm/Support/SaveAndRestore.h" |
48 | #include "llvm/Support/xxhash.h" |
49 | #include "llvm/Transforms/Utils/SanitizerStats.h" |
50 | |
51 | #include <optional> |
52 | #include <string> |
53 | |
54 | using namespace clang; |
55 | using namespace CodeGen; |
56 | |
57 | // Experiment to make sanitizers easier to debug |
58 | static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization( |
59 | "ubsan-unique-traps" , llvm::cl::Optional, |
60 | llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check." )); |
61 | |
62 | // TODO: Introduce frontend options to enabled per sanitizers, similar to |
63 | // `fsanitize-trap`. |
64 | static llvm::cl::opt<bool> ClSanitizeGuardChecks( |
65 | "ubsan-guard-checks" , llvm::cl::Optional, |
66 | llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`." )); |
67 | |
68 | //===--------------------------------------------------------------------===// |
69 | // Miscellaneous Helper Methods |
70 | //===--------------------------------------------------------------------===// |
71 | |
72 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
73 | /// block. |
74 | RawAddress |
75 | CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, |
76 | const Twine &Name, |
77 | llvm::Value *ArraySize) { |
78 | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
79 | Alloca->setAlignment(Align.getAsAlign()); |
80 | return RawAddress(Alloca, Ty, Align, KnownNonNull); |
81 | } |
82 | |
83 | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
84 | /// block. The alloca is casted to default address space if necessary. |
85 | RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, |
86 | const Twine &Name, |
87 | llvm::Value *ArraySize, |
88 | RawAddress *AllocaAddr) { |
89 | auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
90 | if (AllocaAddr) |
91 | *AllocaAddr = Alloca; |
92 | llvm::Value *V = Alloca.getPointer(); |
93 | // Alloca always returns a pointer in alloca address space, which may |
94 | // be different from the type defined by the language. For example, |
95 | // in C++ the auto variables are in the default address space. Therefore |
96 | // cast alloca to the default address space when necessary. |
97 | if (getASTAllocaAddressSpace() != LangAS::Default) { |
98 | auto DestAddrSpace = getContext().getTargetAddressSpace(AS: LangAS::Default); |
99 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
100 | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
101 | // otherwise alloca is inserted at the current insertion point of the |
102 | // builder. |
103 | if (!ArraySize) |
104 | Builder.SetInsertPoint(getPostAllocaInsertPoint()); |
105 | V = getTargetHooks().performAddrSpaceCast( |
106 | CGF&: *this, V, SrcAddr: getASTAllocaAddressSpace(), DestAddr: LangAS::Default, |
107 | DestTy: Ty->getPointerTo(AddrSpace: DestAddrSpace), /*non-null*/ IsNonNull: true); |
108 | } |
109 | |
110 | return RawAddress(V, Ty, Align, KnownNonNull); |
111 | } |
112 | |
113 | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
114 | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
115 | /// insertion point of the builder. |
116 | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
117 | const Twine &Name, |
118 | llvm::Value *ArraySize) { |
119 | llvm::AllocaInst *Alloca; |
120 | if (ArraySize) |
121 | Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); |
122 | else |
123 | Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
124 | ArraySize, Name, &*AllocaInsertPt); |
125 | if (Allocas) { |
126 | Allocas->Add(I: Alloca); |
127 | } |
128 | return Alloca; |
129 | } |
130 | |
131 | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
132 | /// default alignment of the corresponding LLVM type, which is *not* |
133 | /// guaranteed to be related in any way to the expected alignment of |
134 | /// an AST type that might have been lowered to Ty. |
135 | RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
136 | const Twine &Name) { |
137 | CharUnits Align = |
138 | CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getPrefTypeAlign(Ty)); |
139 | return CreateTempAlloca(Ty, Align, Name); |
140 | } |
141 | |
142 | RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
143 | CharUnits Align = getContext().getTypeAlignInChars(T: Ty); |
144 | return CreateTempAlloca(Ty: ConvertType(T: Ty), Align, Name); |
145 | } |
146 | |
147 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
148 | RawAddress *Alloca) { |
149 | // FIXME: Should we prefer the preferred type alignment here? |
150 | return CreateMemTemp(T: Ty, Align: getContext().getTypeAlignInChars(T: Ty), Name, Alloca); |
151 | } |
152 | |
153 | RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
154 | const Twine &Name, |
155 | RawAddress *Alloca) { |
156 | RawAddress Result = CreateTempAlloca(Ty: ConvertTypeForMem(T: Ty), Align, Name, |
157 | /*ArraySize=*/nullptr, AllocaAddr: Alloca); |
158 | |
159 | if (Ty->isConstantMatrixType()) { |
160 | auto *ArrayTy = cast<llvm::ArrayType>(Val: Result.getElementType()); |
161 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
162 | NumElts: ArrayTy->getNumElements()); |
163 | |
164 | Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), |
165 | KnownNonNull); |
166 | } |
167 | return Result; |
168 | } |
169 | |
170 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
171 | CharUnits Align, |
172 | const Twine &Name) { |
173 | return CreateTempAllocaWithoutCast(Ty: ConvertTypeForMem(T: Ty), Align, Name); |
174 | } |
175 | |
176 | RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
177 | const Twine &Name) { |
178 | return CreateMemTempWithoutCast(Ty, Align: getContext().getTypeAlignInChars(T: Ty), |
179 | Name); |
180 | } |
181 | |
182 | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
183 | /// expression and compare the result against zero, returning an Int1Ty value. |
184 | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
185 | PGO.setCurrentStmt(E); |
186 | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
187 | llvm::Value *MemPtr = EmitScalarExpr(E); |
188 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr, MPT); |
189 | } |
190 | |
191 | QualType BoolTy = getContext().BoolTy; |
192 | SourceLocation Loc = E->getExprLoc(); |
193 | CGFPOptionsRAII FPOptsRAII(*this, E); |
194 | if (!E->getType()->isAnyComplexType()) |
195 | return EmitScalarConversion(Src: EmitScalarExpr(E), SrcTy: E->getType(), DstTy: BoolTy, Loc); |
196 | |
197 | return EmitComplexToScalarConversion(Src: EmitComplexExpr(E), SrcTy: E->getType(), DstTy: BoolTy, |
198 | Loc); |
199 | } |
200 | |
201 | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
202 | /// ignoring the result. |
203 | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
204 | if (E->isPRValue()) |
205 | return (void)EmitAnyExpr(E, aggSlot: AggValueSlot::ignored(), ignoreResult: true); |
206 | |
207 | // if this is a bitfield-resulting conditional operator, we can special case |
208 | // emit this. The normal 'EmitLValue' version of this is particularly |
209 | // difficult to codegen for, since creating a single "LValue" for two |
210 | // different sized arguments here is not particularly doable. |
211 | if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( |
212 | Val: E->IgnoreParenNoopCasts(Ctx: getContext()))) { |
213 | if (CondOp->getObjectKind() == OK_BitField) |
214 | return EmitIgnoredConditionalOperator(E: CondOp); |
215 | } |
216 | |
217 | // Just emit it as an l-value and drop the result. |
218 | EmitLValue(E); |
219 | } |
220 | |
221 | /// EmitAnyExpr - Emit code to compute the specified expression which |
222 | /// can have any type. The result is returned as an RValue struct. |
223 | /// If this is an aggregate expression, AggSlot indicates where the |
224 | /// result should be returned. |
225 | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
226 | AggValueSlot aggSlot, |
227 | bool ignoreResult) { |
228 | switch (getEvaluationKind(T: E->getType())) { |
229 | case TEK_Scalar: |
230 | return RValue::get(V: EmitScalarExpr(E, IgnoreResultAssign: ignoreResult)); |
231 | case TEK_Complex: |
232 | return RValue::getComplex(C: EmitComplexExpr(E, IgnoreReal: ignoreResult, IgnoreImag: ignoreResult)); |
233 | case TEK_Aggregate: |
234 | if (!ignoreResult && aggSlot.isIgnored()) |
235 | aggSlot = CreateAggTemp(T: E->getType(), Name: "agg-temp" ); |
236 | EmitAggExpr(E, AS: aggSlot); |
237 | return aggSlot.asRValue(); |
238 | } |
239 | llvm_unreachable("bad evaluation kind" ); |
240 | } |
241 | |
242 | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
243 | /// always be accessible even if no aggregate location is provided. |
244 | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
245 | AggValueSlot AggSlot = AggValueSlot::ignored(); |
246 | |
247 | if (hasAggregateEvaluationKind(T: E->getType())) |
248 | AggSlot = CreateAggTemp(T: E->getType(), Name: "agg.tmp" ); |
249 | return EmitAnyExpr(E, aggSlot: AggSlot); |
250 | } |
251 | |
252 | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
253 | /// location. |
254 | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
255 | Address Location, |
256 | Qualifiers Quals, |
257 | bool IsInit) { |
258 | // FIXME: This function should take an LValue as an argument. |
259 | switch (getEvaluationKind(T: E->getType())) { |
260 | case TEK_Complex: |
261 | EmitComplexExprIntoLValue(E, dest: MakeAddrLValue(Addr: Location, T: E->getType()), |
262 | /*isInit*/ false); |
263 | return; |
264 | |
265 | case TEK_Aggregate: { |
266 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Location, quals: Quals, |
267 | isDestructed: AggValueSlot::IsDestructed_t(IsInit), |
268 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
269 | isAliased: AggValueSlot::IsAliased_t(!IsInit), |
270 | mayOverlap: AggValueSlot::MayOverlap)); |
271 | return; |
272 | } |
273 | |
274 | case TEK_Scalar: { |
275 | RValue RV = RValue::get(V: EmitScalarExpr(E, /*Ignore*/ IgnoreResultAssign: false)); |
276 | LValue LV = MakeAddrLValue(Addr: Location, T: E->getType()); |
277 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
278 | return; |
279 | } |
280 | } |
281 | llvm_unreachable("bad evaluation kind" ); |
282 | } |
283 | |
284 | static void |
285 | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
286 | const Expr *E, Address ReferenceTemporary) { |
287 | // Objective-C++ ARC: |
288 | // If we are binding a reference to a temporary that has ownership, we |
289 | // need to perform retain/release operations on the temporary. |
290 | // |
291 | // FIXME: This should be looking at E, not M. |
292 | if (auto Lifetime = M->getType().getObjCLifetime()) { |
293 | switch (Lifetime) { |
294 | case Qualifiers::OCL_None: |
295 | case Qualifiers::OCL_ExplicitNone: |
296 | // Carry on to normal cleanup handling. |
297 | break; |
298 | |
299 | case Qualifiers::OCL_Autoreleasing: |
300 | // Nothing to do; cleaned up by an autorelease pool. |
301 | return; |
302 | |
303 | case Qualifiers::OCL_Strong: |
304 | case Qualifiers::OCL_Weak: |
305 | switch (StorageDuration Duration = M->getStorageDuration()) { |
306 | case SD_Static: |
307 | // Note: we intentionally do not register a cleanup to release |
308 | // the object on program termination. |
309 | return; |
310 | |
311 | case SD_Thread: |
312 | // FIXME: We should probably register a cleanup in this case. |
313 | return; |
314 | |
315 | case SD_Automatic: |
316 | case SD_FullExpression: |
317 | CodeGenFunction::Destroyer *Destroy; |
318 | CleanupKind CleanupKind; |
319 | if (Lifetime == Qualifiers::OCL_Strong) { |
320 | const ValueDecl *VD = M->getExtendingDecl(); |
321 | bool Precise = isa_and_nonnull<VarDecl>(Val: VD) && |
322 | VD->hasAttr<ObjCPreciseLifetimeAttr>(); |
323 | CleanupKind = CGF.getARCCleanupKind(); |
324 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
325 | : &CodeGenFunction::destroyARCStrongImprecise; |
326 | } else { |
327 | // __weak objects always get EH cleanups; otherwise, exceptions |
328 | // could cause really nasty crashes instead of mere leaks. |
329 | CleanupKind = NormalAndEHCleanup; |
330 | Destroy = &CodeGenFunction::destroyARCWeak; |
331 | } |
332 | if (Duration == SD_FullExpression) |
333 | CGF.pushDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
334 | type: M->getType(), destroyer: *Destroy, |
335 | useEHCleanupForArray: CleanupKind & EHCleanup); |
336 | else |
337 | CGF.pushLifetimeExtendedDestroy(kind: CleanupKind, addr: ReferenceTemporary, |
338 | type: M->getType(), |
339 | destroyer: *Destroy, useEHCleanupForArray: CleanupKind & EHCleanup); |
340 | return; |
341 | |
342 | case SD_Dynamic: |
343 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
344 | } |
345 | llvm_unreachable("unknown storage duration" ); |
346 | } |
347 | } |
348 | |
349 | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
350 | if (const RecordType *RT = |
351 | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
352 | // Get the destructor for the reference temporary. |
353 | auto *ClassDecl = cast<CXXRecordDecl>(Val: RT->getDecl()); |
354 | if (!ClassDecl->hasTrivialDestructor()) |
355 | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
356 | } |
357 | |
358 | if (!ReferenceTemporaryDtor) |
359 | return; |
360 | |
361 | // Call the destructor for the temporary. |
362 | switch (M->getStorageDuration()) { |
363 | case SD_Static: |
364 | case SD_Thread: { |
365 | llvm::FunctionCallee CleanupFn; |
366 | llvm::Constant *CleanupArg; |
367 | if (E->getType()->isArrayType()) { |
368 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
369 | addr: ReferenceTemporary, type: E->getType(), |
370 | destroyer: CodeGenFunction::destroyCXXObject, useEHCleanupForArray: CGF.getLangOpts().Exceptions, |
371 | VD: dyn_cast_or_null<VarDecl>(Val: M->getExtendingDecl())); |
372 | CleanupArg = llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy); |
373 | } else { |
374 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
375 | GD: GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
376 | CleanupArg = cast<llvm::Constant>(Val: ReferenceTemporary.emitRawPointer(CGF)); |
377 | } |
378 | CGF.CGM.getCXXABI().registerGlobalDtor( |
379 | CGF, D: *cast<VarDecl>(Val: M->getExtendingDecl()), Dtor: CleanupFn, Addr: CleanupArg); |
380 | break; |
381 | } |
382 | |
383 | case SD_FullExpression: |
384 | CGF.pushDestroy(kind: NormalAndEHCleanup, addr: ReferenceTemporary, type: E->getType(), |
385 | destroyer: CodeGenFunction::destroyCXXObject, |
386 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
387 | break; |
388 | |
389 | case SD_Automatic: |
390 | CGF.pushLifetimeExtendedDestroy(kind: NormalAndEHCleanup, |
391 | addr: ReferenceTemporary, type: E->getType(), |
392 | destroyer: CodeGenFunction::destroyCXXObject, |
393 | useEHCleanupForArray: CGF.getLangOpts().Exceptions); |
394 | break; |
395 | |
396 | case SD_Dynamic: |
397 | llvm_unreachable("temporary cannot have dynamic storage duration" ); |
398 | } |
399 | } |
400 | |
401 | static RawAddress createReferenceTemporary(CodeGenFunction &CGF, |
402 | const MaterializeTemporaryExpr *M, |
403 | const Expr *Inner, |
404 | RawAddress *Alloca = nullptr) { |
405 | auto &TCG = CGF.getTargetHooks(); |
406 | switch (M->getStorageDuration()) { |
407 | case SD_FullExpression: |
408 | case SD_Automatic: { |
409 | // If we have a constant temporary array or record try to promote it into a |
410 | // constant global under the same rules a normal constant would've been |
411 | // promoted. This is easier on the optimizer and generally emits fewer |
412 | // instructions. |
413 | QualType Ty = Inner->getType(); |
414 | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
415 | (Ty->isArrayType() || Ty->isRecordType()) && |
416 | Ty.isConstantStorage(Ctx: CGF.getContext(), ExcludeCtor: true, ExcludeDtor: false)) |
417 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(E: Inner, T: Ty)) { |
418 | auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); |
419 | auto *GV = new llvm::GlobalVariable( |
420 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
421 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp" , nullptr, |
422 | llvm::GlobalValue::NotThreadLocal, |
423 | CGF.getContext().getTargetAddressSpace(AS)); |
424 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(T: Ty); |
425 | GV->setAlignment(alignment.getAsAlign()); |
426 | llvm::Constant *C = GV; |
427 | if (AS != LangAS::Default) |
428 | C = TCG.performAddrSpaceCast( |
429 | CGM&: CGF.CGM, V: GV, SrcAddr: AS, DestAddr: LangAS::Default, |
430 | DestTy: GV->getValueType()->getPointerTo( |
431 | AddrSpace: CGF.getContext().getTargetAddressSpace(AS: LangAS::Default))); |
432 | // FIXME: Should we put the new global into a COMDAT? |
433 | return RawAddress(C, GV->getValueType(), alignment); |
434 | } |
435 | return CGF.CreateMemTemp(Ty, Name: "ref.tmp" , Alloca); |
436 | } |
437 | case SD_Thread: |
438 | case SD_Static: |
439 | return CGF.CGM.GetAddrOfGlobalTemporary(E: M, Inner); |
440 | |
441 | case SD_Dynamic: |
442 | llvm_unreachable("temporary can't have dynamic storage duration" ); |
443 | } |
444 | llvm_unreachable("unknown storage duration" ); |
445 | } |
446 | |
447 | /// Helper method to check if the underlying ABI is AAPCS |
448 | static bool isAAPCS(const TargetInfo &TargetInfo) { |
449 | return TargetInfo.getABI().starts_with(Prefix: "aapcs" ); |
450 | } |
451 | |
452 | LValue CodeGenFunction:: |
453 | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
454 | const Expr *E = M->getSubExpr(); |
455 | |
456 | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
457 | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
458 | "Reference should never be pseudo-strong!" ); |
459 | |
460 | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
461 | // as that will cause the lifetime adjustment to be lost for ARC |
462 | auto ownership = M->getType().getObjCLifetime(); |
463 | if (ownership != Qualifiers::OCL_None && |
464 | ownership != Qualifiers::OCL_ExplicitNone) { |
465 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E); |
466 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Val: Object.getPointer())) { |
467 | llvm::Type *Ty = ConvertTypeForMem(T: E->getType()); |
468 | Object = Object.withElementType(ElemTy: Ty); |
469 | |
470 | // createReferenceTemporary will promote the temporary to a global with a |
471 | // constant initializer if it can. It can only do this to a value of |
472 | // ARC-manageable type if the value is global and therefore "immune" to |
473 | // ref-counting operations. Therefore we have no need to emit either a |
474 | // dynamic initialization or a cleanup and we can just return the address |
475 | // of the temporary. |
476 | if (Var->hasInitializer()) |
477 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
478 | |
479 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
480 | } |
481 | LValue RefTempDst = MakeAddrLValue(Addr: Object, T: M->getType(), |
482 | Source: AlignmentSource::Decl); |
483 | |
484 | switch (getEvaluationKind(T: E->getType())) { |
485 | default: llvm_unreachable("expected scalar or aggregate expression" ); |
486 | case TEK_Scalar: |
487 | EmitScalarInit(init: E, D: M->getExtendingDecl(), lvalue: RefTempDst, capturedByInit: false); |
488 | break; |
489 | case TEK_Aggregate: { |
490 | EmitAggExpr(E, AS: AggValueSlot::forAddr(addr: Object, |
491 | quals: E->getType().getQualifiers(), |
492 | isDestructed: AggValueSlot::IsDestructed, |
493 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
494 | isAliased: AggValueSlot::IsNotAliased, |
495 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
496 | break; |
497 | } |
498 | } |
499 | |
500 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
501 | return RefTempDst; |
502 | } |
503 | |
504 | SmallVector<const Expr *, 2> CommaLHSs; |
505 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
506 | E = E->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments); |
507 | |
508 | for (const auto &Ignored : CommaLHSs) |
509 | EmitIgnoredExpr(E: Ignored); |
510 | |
511 | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(Val: E)) { |
512 | if (opaque->getType()->isRecordType()) { |
513 | assert(Adjustments.empty()); |
514 | return EmitOpaqueValueLValue(e: opaque); |
515 | } |
516 | } |
517 | |
518 | // Create and initialize the reference temporary. |
519 | RawAddress Alloca = Address::invalid(); |
520 | RawAddress Object = createReferenceTemporary(CGF&: *this, M, Inner: E, Alloca: &Alloca); |
521 | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
522 | Val: Object.getPointer()->stripPointerCasts())) { |
523 | llvm::Type *TemporaryType = ConvertTypeForMem(T: E->getType()); |
524 | Object = Object.withElementType(ElemTy: TemporaryType); |
525 | // If the temporary is a global and has a constant initializer or is a |
526 | // constant temporary that we promoted to a global, we may have already |
527 | // initialized it. |
528 | if (!Var->hasInitializer()) { |
529 | Var->setInitializer(CGM.EmitNullConstant(T: E->getType())); |
530 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
531 | } |
532 | } else { |
533 | switch (M->getStorageDuration()) { |
534 | case SD_Automatic: |
535 | if (auto *Size = EmitLifetimeStart( |
536 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
537 | Addr: Alloca.getPointer())) { |
538 | pushCleanupAfterFullExpr<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker, |
539 | A: Alloca, A: Size); |
540 | } |
541 | break; |
542 | |
543 | case SD_FullExpression: { |
544 | if (!ShouldEmitLifetimeMarkers) |
545 | break; |
546 | |
547 | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
548 | // marker. Instead, start the lifetime of a conditional temporary earlier |
549 | // so that it's unconditional. Don't do this with sanitizers which need |
550 | // more precise lifetime marks. However when inside an "await.suspend" |
551 | // block, we should always avoid conditional cleanup because it creates |
552 | // boolean marker that lives across await_suspend, which can destroy coro |
553 | // frame. |
554 | ConditionalEvaluation *OldConditional = nullptr; |
555 | CGBuilderTy::InsertPoint OldIP; |
556 | if (isInConditionalBranch() && !E->getType().isDestructedType() && |
557 | ((!SanOpts.has(K: SanitizerKind::HWAddress) && |
558 | !SanOpts.has(K: SanitizerKind::Memory) && |
559 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || |
560 | inSuspendBlock())) { |
561 | OldConditional = OutermostConditional; |
562 | OutermostConditional = nullptr; |
563 | |
564 | OldIP = Builder.saveIP(); |
565 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
566 | Builder.restoreIP(IP: CGBuilderTy::InsertPoint( |
567 | Block, llvm::BasicBlock::iterator(Block->back()))); |
568 | } |
569 | |
570 | if (auto *Size = EmitLifetimeStart( |
571 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Alloca.getElementType()), |
572 | Addr: Alloca.getPointer())) { |
573 | pushFullExprCleanup<CallLifetimeEnd>(kind: NormalEHLifetimeMarker, A: Alloca, |
574 | A: Size); |
575 | } |
576 | |
577 | if (OldConditional) { |
578 | OutermostConditional = OldConditional; |
579 | Builder.restoreIP(IP: OldIP); |
580 | } |
581 | break; |
582 | } |
583 | |
584 | default: |
585 | break; |
586 | } |
587 | EmitAnyExprToMem(E, Location: Object, Quals: Qualifiers(), /*IsInit*/true); |
588 | } |
589 | pushTemporaryCleanup(CGF&: *this, M, E, ReferenceTemporary: Object); |
590 | |
591 | // Perform derived-to-base casts and/or field accesses, to get from the |
592 | // temporary object we created (and, potentially, for which we extended |
593 | // the lifetime) to the subobject we're binding the reference to. |
594 | for (SubobjectAdjustment &Adjustment : llvm::reverse(C&: Adjustments)) { |
595 | switch (Adjustment.Kind) { |
596 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
597 | Object = |
598 | GetAddressOfBaseClass(Value: Object, Derived: Adjustment.DerivedToBase.DerivedClass, |
599 | PathBegin: Adjustment.DerivedToBase.BasePath->path_begin(), |
600 | PathEnd: Adjustment.DerivedToBase.BasePath->path_end(), |
601 | /*NullCheckValue=*/ false, Loc: E->getExprLoc()); |
602 | break; |
603 | |
604 | case SubobjectAdjustment::FieldAdjustment: { |
605 | LValue LV = MakeAddrLValue(Addr: Object, T: E->getType(), Source: AlignmentSource::Decl); |
606 | LV = EmitLValueForField(Base: LV, Field: Adjustment.Field); |
607 | assert(LV.isSimple() && |
608 | "materialized temporary field is not a simple lvalue" ); |
609 | Object = LV.getAddress(); |
610 | break; |
611 | } |
612 | |
613 | case SubobjectAdjustment::MemberPointerAdjustment: { |
614 | llvm::Value *Ptr = EmitScalarExpr(E: Adjustment.Ptr.RHS); |
615 | Object = EmitCXXMemberDataPointerAddress(E, base: Object, memberPtr: Ptr, |
616 | memberPtrType: Adjustment.Ptr.MPT); |
617 | break; |
618 | } |
619 | } |
620 | } |
621 | |
622 | return MakeAddrLValue(Addr: Object, T: M->getType(), Source: AlignmentSource::Decl); |
623 | } |
624 | |
625 | RValue |
626 | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
627 | // Emit the expression as an lvalue. |
628 | LValue LV = EmitLValue(E); |
629 | assert(LV.isSimple()); |
630 | llvm::Value *Value = LV.getPointer(CGF&: *this); |
631 | |
632 | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { |
633 | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
634 | // If a glvalue to which a reference is directly bound designates neither |
635 | // an existing object or function of an appropriate type nor a region of |
636 | // storage of suitable size and alignment to contain an object of the |
637 | // reference's type, the behavior is undefined. |
638 | QualType Ty = E->getType(); |
639 | EmitTypeCheck(TCK: TCK_ReferenceBinding, Loc: E->getExprLoc(), V: Value, Type: Ty); |
640 | } |
641 | |
642 | return RValue::get(V: Value); |
643 | } |
644 | |
645 | |
646 | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
647 | /// input field number being accessed. |
648 | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
649 | const llvm::Constant *Elts) { |
650 | return cast<llvm::ConstantInt>(Val: Elts->getAggregateElement(Elt: Idx)) |
651 | ->getZExtValue(); |
652 | } |
653 | |
654 | static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, |
655 | llvm::Value *Ptr) { |
656 | llvm::Value *A0 = |
657 | Builder.CreateMul(LHS: Ptr, RHS: Builder.getInt64(C: 0xbf58476d1ce4e5b9u)); |
658 | llvm::Value *A1 = |
659 | Builder.CreateXor(LHS: A0, RHS: Builder.CreateLShr(LHS: A0, RHS: Builder.getInt64(C: 31))); |
660 | return Builder.CreateXor(LHS: Acc, RHS: A1); |
661 | } |
662 | |
663 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
664 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || |
665 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; |
666 | } |
667 | |
668 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
669 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
670 | return (RD && RD->hasDefinition() && RD->isDynamicClass()) && |
671 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || |
672 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || |
673 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); |
674 | } |
675 | |
676 | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
677 | return SanOpts.has(K: SanitizerKind::Null) || |
678 | SanOpts.has(K: SanitizerKind::Alignment) || |
679 | SanOpts.has(K: SanitizerKind::ObjectSize) || |
680 | SanOpts.has(K: SanitizerKind::Vptr); |
681 | } |
682 | |
683 | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
684 | llvm::Value *Ptr, QualType Ty, |
685 | CharUnits Alignment, |
686 | SanitizerSet SkippedChecks, |
687 | llvm::Value *ArraySize) { |
688 | if (!sanitizePerformTypeCheck()) |
689 | return; |
690 | |
691 | // Don't check pointers outside the default address space. The null check |
692 | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
693 | // communicate the addresses to the runtime handler for the vptr check. |
694 | if (Ptr->getType()->getPointerAddressSpace()) |
695 | return; |
696 | |
697 | // Don't check pointers to volatile data. The behavior here is implementation- |
698 | // defined. |
699 | if (Ty.isVolatileQualified()) |
700 | return; |
701 | |
702 | SanitizerScope SanScope(this); |
703 | |
704 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; |
705 | llvm::BasicBlock *Done = nullptr; |
706 | |
707 | // Quickly determine whether we have a pointer to an alloca. It's possible |
708 | // to skip null checks, and some alignment checks, for these pointers. This |
709 | // can reduce compile-time significantly. |
710 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Val: Ptr->stripPointerCasts()); |
711 | |
712 | llvm::Value *True = llvm::ConstantInt::getTrue(Context&: getLLVMContext()); |
713 | llvm::Value *IsNonNull = nullptr; |
714 | bool IsGuaranteedNonNull = |
715 | SkippedChecks.has(K: SanitizerKind::Null) || PtrToAlloca; |
716 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
717 | if ((SanOpts.has(K: SanitizerKind::Null) || AllowNullPointers) && |
718 | !IsGuaranteedNonNull) { |
719 | // The glvalue must not be an empty glvalue. |
720 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
721 | |
722 | // The IR builder can constant-fold the null check if the pointer points to |
723 | // a constant. |
724 | IsGuaranteedNonNull = IsNonNull == True; |
725 | |
726 | // Skip the null check if the pointer is known to be non-null. |
727 | if (!IsGuaranteedNonNull) { |
728 | if (AllowNullPointers) { |
729 | // When performing pointer casts, it's OK if the value is null. |
730 | // Skip the remaining checks in that case. |
731 | Done = createBasicBlock(name: "null" ); |
732 | llvm::BasicBlock *Rest = createBasicBlock(name: "not.null" ); |
733 | Builder.CreateCondBr(Cond: IsNonNull, True: Rest, False: Done); |
734 | EmitBlock(BB: Rest); |
735 | } else { |
736 | Checks.push_back(Elt: std::make_pair(x&: IsNonNull, y: SanitizerKind::Null)); |
737 | } |
738 | } |
739 | } |
740 | |
741 | if (SanOpts.has(K: SanitizerKind::ObjectSize) && |
742 | !SkippedChecks.has(K: SanitizerKind::ObjectSize) && |
743 | !Ty->isIncompleteType()) { |
744 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
745 | llvm::Value *Size = llvm::ConstantInt::get(Ty: IntPtrTy, V: TySize); |
746 | if (ArraySize) |
747 | Size = Builder.CreateMul(LHS: Size, RHS: ArraySize); |
748 | |
749 | // Degenerate case: new X[0] does not need an objectsize check. |
750 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Val: Size); |
751 | if (!ConstantSize || !ConstantSize->isNullValue()) { |
752 | // The glvalue must refer to a large enough storage region. |
753 | // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation |
754 | // to check this. |
755 | // FIXME: Get object address space |
756 | llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; |
757 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::objectsize, Tys); |
758 | llvm::Value *Min = Builder.getFalse(); |
759 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
760 | llvm::Value *Dynamic = Builder.getFalse(); |
761 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
762 | LHS: Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}), RHS: Size); |
763 | Checks.push_back(Elt: std::make_pair(x&: LargeEnough, y: SanitizerKind::ObjectSize)); |
764 | } |
765 | } |
766 | |
767 | llvm::MaybeAlign AlignVal; |
768 | llvm::Value *PtrAsInt = nullptr; |
769 | |
770 | if (SanOpts.has(K: SanitizerKind::Alignment) && |
771 | !SkippedChecks.has(K: SanitizerKind::Alignment)) { |
772 | AlignVal = Alignment.getAsMaybeAlign(); |
773 | if (!Ty->isIncompleteType() && !AlignVal) |
774 | AlignVal = CGM.getNaturalTypeAlignment(T: Ty, BaseInfo: nullptr, TBAAInfo: nullptr, |
775 | /*ForPointeeType=*/forPointeeType: true) |
776 | .getAsMaybeAlign(); |
777 | |
778 | // The glvalue must be suitably aligned. |
779 | if (AlignVal && *AlignVal > llvm::Align(1) && |
780 | (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { |
781 | PtrAsInt = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy); |
782 | llvm::Value *Align = Builder.CreateAnd( |
783 | LHS: PtrAsInt, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: AlignVal->value() - 1)); |
784 | llvm::Value *Aligned = |
785 | Builder.CreateICmpEQ(LHS: Align, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, V: 0)); |
786 | if (Aligned != True) |
787 | Checks.push_back(Elt: std::make_pair(x&: Aligned, y: SanitizerKind::Alignment)); |
788 | } |
789 | } |
790 | |
791 | if (Checks.size() > 0) { |
792 | llvm::Constant *StaticData[] = { |
793 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: Ty), |
794 | llvm::ConstantInt::get(Ty: Int8Ty, V: AlignVal ? llvm::Log2(A: *AlignVal) : 1), |
795 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK)}; |
796 | EmitCheck(Checked: Checks, Check: SanitizerHandler::TypeMismatch, StaticArgs: StaticData, |
797 | DynamicArgs: PtrAsInt ? PtrAsInt : Ptr); |
798 | } |
799 | |
800 | // If possible, check that the vptr indicates that there is a subobject of |
801 | // type Ty at offset zero within this object. |
802 | // |
803 | // C++11 [basic.life]p5,6: |
804 | // [For storage which does not refer to an object within its lifetime] |
805 | // The program has undefined behavior if: |
806 | // -- the [pointer or glvalue] is used to access a non-static data member |
807 | // or call a non-static member function |
808 | if (SanOpts.has(K: SanitizerKind::Vptr) && |
809 | !SkippedChecks.has(K: SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
810 | // Ensure that the pointer is non-null before loading it. If there is no |
811 | // compile-time guarantee, reuse the run-time null check or emit a new one. |
812 | if (!IsGuaranteedNonNull) { |
813 | if (!IsNonNull) |
814 | IsNonNull = Builder.CreateIsNotNull(Arg: Ptr); |
815 | if (!Done) |
816 | Done = createBasicBlock(name: "vptr.null" ); |
817 | llvm::BasicBlock *VptrNotNull = createBasicBlock(name: "vptr.not.null" ); |
818 | Builder.CreateCondBr(Cond: IsNonNull, True: VptrNotNull, False: Done); |
819 | EmitBlock(BB: VptrNotNull); |
820 | } |
821 | |
822 | // Compute a deterministic hash of the mangled name of the type. |
823 | SmallString<64> MangledName; |
824 | llvm::raw_svector_ostream Out(MangledName); |
825 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T: Ty.getUnqualifiedType(), |
826 | Out); |
827 | |
828 | // Contained in NoSanitizeList based on the mangled type. |
829 | if (!CGM.getContext().getNoSanitizeList().containsType(Mask: SanitizerKind::Vptr, |
830 | MangledTypeName: Out.str())) { |
831 | // Load the vptr, and mix it with TypeHash. |
832 | llvm::Value *TypeHash = |
833 | llvm::ConstantInt::get(Ty: Int64Ty, V: xxh3_64bits(data: Out.str())); |
834 | |
835 | llvm::Type *VPtrTy = llvm::PointerType::get(ElementType: IntPtrTy, AddressSpace: 0); |
836 | Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); |
837 | llvm::Value *VPtrVal = GetVTablePtr(This: VPtrAddr, VTableTy: VPtrTy, |
838 | VTableClass: Ty->getAsCXXRecordDecl(), |
839 | AuthMode: VTableAuthMode::UnsafeUbsanStrip); |
840 | VPtrVal = Builder.CreateBitOrPointerCast(V: VPtrVal, DestTy: IntPtrTy); |
841 | |
842 | llvm::Value *Hash = |
843 | emitHashMix(Builder, Acc: TypeHash, Ptr: Builder.CreateZExt(V: VPtrVal, DestTy: Int64Ty)); |
844 | Hash = Builder.CreateTrunc(V: Hash, DestTy: IntPtrTy); |
845 | |
846 | // Look the hash up in our cache. |
847 | const int CacheSize = 128; |
848 | llvm::Type *HashTable = llvm::ArrayType::get(ElementType: IntPtrTy, NumElements: CacheSize); |
849 | llvm::Value *Cache = CGM.CreateRuntimeVariable(Ty: HashTable, |
850 | Name: "__ubsan_vptr_type_cache" ); |
851 | llvm::Value *Slot = Builder.CreateAnd(LHS: Hash, |
852 | RHS: llvm::ConstantInt::get(Ty: IntPtrTy, |
853 | V: CacheSize-1)); |
854 | llvm::Value *Indices[] = { Builder.getInt32(C: 0), Slot }; |
855 | llvm::Value *CacheVal = Builder.CreateAlignedLoad( |
856 | Ty: IntPtrTy, Addr: Builder.CreateInBoundsGEP(Ty: HashTable, Ptr: Cache, IdxList: Indices), |
857 | Align: getPointerAlign()); |
858 | |
859 | // If the hash isn't in the cache, call a runtime handler to perform the |
860 | // hard work of checking whether the vptr is for an object of the right |
861 | // type. This will either fill in the cache and return, or produce a |
862 | // diagnostic. |
863 | llvm::Value *EqualHash = Builder.CreateICmpEQ(LHS: CacheVal, RHS: Hash); |
864 | llvm::Constant *StaticData[] = { |
865 | EmitCheckSourceLocation(Loc), |
866 | EmitCheckTypeDescriptor(T: Ty), |
867 | CGM.GetAddrOfRTTIDescriptor(Ty: Ty.getUnqualifiedType()), |
868 | llvm::ConstantInt::get(Ty: Int8Ty, V: TCK) |
869 | }; |
870 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
871 | EmitCheck(Checked: std::make_pair(x&: EqualHash, y: SanitizerKind::Vptr), |
872 | Check: SanitizerHandler::DynamicTypeCacheMiss, StaticArgs: StaticData, |
873 | DynamicArgs: DynamicData); |
874 | } |
875 | } |
876 | |
877 | if (Done) { |
878 | Builder.CreateBr(Dest: Done); |
879 | EmitBlock(BB: Done); |
880 | } |
881 | } |
882 | |
883 | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
884 | QualType EltTy) { |
885 | ASTContext &C = getContext(); |
886 | uint64_t EltSize = C.getTypeSizeInChars(T: EltTy).getQuantity(); |
887 | if (!EltSize) |
888 | return nullptr; |
889 | |
890 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()); |
891 | if (!ArrayDeclRef) |
892 | return nullptr; |
893 | |
894 | auto *ParamDecl = dyn_cast<ParmVarDecl>(Val: ArrayDeclRef->getDecl()); |
895 | if (!ParamDecl) |
896 | return nullptr; |
897 | |
898 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
899 | if (!POSAttr) |
900 | return nullptr; |
901 | |
902 | // Don't load the size if it's a lower bound. |
903 | int POSType = POSAttr->getType(); |
904 | if (POSType != 0 && POSType != 1) |
905 | return nullptr; |
906 | |
907 | // Find the implicit size parameter. |
908 | auto PassedSizeIt = SizeArguments.find(Val: ParamDecl); |
909 | if (PassedSizeIt == SizeArguments.end()) |
910 | return nullptr; |
911 | |
912 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
913 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable" ); |
914 | Address AddrOfSize = LocalDeclMap.find(Val: PassedSizeDecl)->second; |
915 | llvm::Value *SizeInBytes = EmitLoadOfScalar(Addr: AddrOfSize, /*Volatile=*/false, |
916 | Ty: C.getSizeType(), Loc: E->getExprLoc()); |
917 | llvm::Value *SizeOfElement = |
918 | llvm::ConstantInt::get(Ty: SizeInBytes->getType(), V: EltSize); |
919 | return Builder.CreateUDiv(LHS: SizeInBytes, RHS: SizeOfElement); |
920 | } |
921 | |
922 | /// If Base is known to point to the start of an array, return the length of |
923 | /// that array. Return 0 if the length cannot be determined. |
924 | static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, |
925 | const Expr *Base, |
926 | QualType &IndexedType, |
927 | LangOptions::StrictFlexArraysLevelKind |
928 | StrictFlexArraysLevel) { |
929 | // For the vector indexing extension, the bound is the number of elements. |
930 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
931 | IndexedType = Base->getType(); |
932 | return CGF.Builder.getInt32(C: VT->getNumElements()); |
933 | } |
934 | |
935 | Base = Base->IgnoreParens(); |
936 | |
937 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
938 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
939 | !CE->getSubExpr()->isFlexibleArrayMemberLike(Context&: CGF.getContext(), |
940 | StrictFlexArraysLevel)) { |
941 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
942 | |
943 | IndexedType = CE->getSubExpr()->getType(); |
944 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
945 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
946 | return CGF.Builder.getInt(AI: CAT->getSize()); |
947 | |
948 | if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) |
949 | return CGF.getVLASize(vla: VAT).NumElts; |
950 | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
951 | } |
952 | } |
953 | |
954 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
955 | |
956 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
957 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(E: Base, EltTy)) { |
958 | IndexedType = Base->getType(); |
959 | return POS; |
960 | } |
961 | |
962 | return nullptr; |
963 | } |
964 | |
965 | namespace { |
966 | |
967 | /// \p StructAccessBase returns the base \p Expr of a field access. It returns |
968 | /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: |
969 | /// |
970 | /// p in p-> a.b.c |
971 | /// |
972 | /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're |
973 | /// looking for: |
974 | /// |
975 | /// struct s { |
976 | /// struct s *ptr; |
977 | /// int count; |
978 | /// char array[] __attribute__((counted_by(count))); |
979 | /// }; |
980 | /// |
981 | /// If we have an expression like \p p->ptr->array[index], we want the |
982 | /// \p MemberExpr for \p p->ptr instead of \p p. |
983 | class StructAccessBase |
984 | : public ConstStmtVisitor<StructAccessBase, const Expr *> { |
985 | const RecordDecl *ExpectedRD; |
986 | |
987 | bool IsExpectedRecordDecl(const Expr *E) const { |
988 | QualType Ty = E->getType(); |
989 | if (Ty->isPointerType()) |
990 | Ty = Ty->getPointeeType(); |
991 | return ExpectedRD == Ty->getAsRecordDecl(); |
992 | } |
993 | |
994 | public: |
995 | StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} |
996 | |
997 | //===--------------------------------------------------------------------===// |
998 | // Visitor Methods |
999 | //===--------------------------------------------------------------------===// |
1000 | |
1001 | // NOTE: If we build C++ support for counted_by, then we'll have to handle |
1002 | // horrors like this: |
1003 | // |
1004 | // struct S { |
1005 | // int x, y; |
1006 | // int blah[] __attribute__((counted_by(x))); |
1007 | // } s; |
1008 | // |
1009 | // int foo(int index, int val) { |
1010 | // int (S::*IHatePMDs)[] = &S::blah; |
1011 | // (s.*IHatePMDs)[index] = val; |
1012 | // } |
1013 | |
1014 | const Expr *Visit(const Expr *E) { |
1015 | return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(S: E); |
1016 | } |
1017 | |
1018 | const Expr *VisitStmt(const Stmt *S) { return nullptr; } |
1019 | |
1020 | // These are the types we expect to return (in order of most to least |
1021 | // likely): |
1022 | // |
1023 | // 1. DeclRefExpr - This is the expression for the base of the structure. |
1024 | // It's exactly what we want to build an access to the \p counted_by |
1025 | // field. |
1026 | // 2. MemberExpr - This is the expression that has the same \p RecordDecl |
1027 | // as the flexble array member's lexical enclosing \p RecordDecl. This |
1028 | // allows us to catch things like: "p->p->array" |
1029 | // 3. CompoundLiteralExpr - This is for people who create something |
1030 | // heretical like (struct foo has a flexible array member): |
1031 | // |
1032 | // (struct foo){ 1, 2 }.blah[idx]; |
1033 | const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { |
1034 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1035 | } |
1036 | const Expr *VisitMemberExpr(const MemberExpr *E) { |
1037 | if (IsExpectedRecordDecl(E) && E->isArrow()) |
1038 | return E; |
1039 | const Expr *Res = Visit(E: E->getBase()); |
1040 | return !Res && IsExpectedRecordDecl(E) ? E : Res; |
1041 | } |
1042 | const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
1043 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1044 | } |
1045 | const Expr *VisitCallExpr(const CallExpr *E) { |
1046 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1047 | } |
1048 | |
1049 | const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
1050 | if (IsExpectedRecordDecl(E)) |
1051 | return E; |
1052 | return Visit(E: E->getBase()); |
1053 | } |
1054 | const Expr *VisitCastExpr(const CastExpr *E) { |
1055 | return Visit(E: E->getSubExpr()); |
1056 | } |
1057 | const Expr *VisitParenExpr(const ParenExpr *E) { |
1058 | return Visit(E: E->getSubExpr()); |
1059 | } |
1060 | const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { |
1061 | return Visit(E: E->getSubExpr()); |
1062 | } |
1063 | const Expr *VisitUnaryDeref(const UnaryOperator *E) { |
1064 | return Visit(E: E->getSubExpr()); |
1065 | } |
1066 | }; |
1067 | |
1068 | } // end anonymous namespace |
1069 | |
1070 | using RecIndicesTy = |
1071 | SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>; |
1072 | |
1073 | static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, |
1074 | const FieldDecl *Field, |
1075 | RecIndicesTy &Indices) { |
1076 | const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); |
1077 | int64_t FieldNo = -1; |
1078 | for (const FieldDecl *FD : RD->fields()) { |
1079 | if (!Layout.containsFieldDecl(FD)) |
1080 | // This could happen if the field has a struct type that's empty. I don't |
1081 | // know why either. |
1082 | continue; |
1083 | |
1084 | FieldNo = Layout.getLLVMFieldNo(FD); |
1085 | if (FD == Field) { |
1086 | Indices.emplace_back(Args: std::make_pair(x&: RD, y: CGF.Builder.getInt32(C: FieldNo))); |
1087 | return true; |
1088 | } |
1089 | |
1090 | QualType Ty = FD->getType(); |
1091 | if (Ty->isRecordType()) { |
1092 | if (getGEPIndicesToField(CGF, RD: Ty->getAsRecordDecl(), Field, Indices)) { |
1093 | if (RD->isUnion()) |
1094 | FieldNo = 0; |
1095 | Indices.emplace_back(Args: std::make_pair(x&: RD, y: CGF.Builder.getInt32(C: FieldNo))); |
1096 | return true; |
1097 | } |
1098 | } |
1099 | } |
1100 | |
1101 | return false; |
1102 | } |
1103 | |
1104 | /// This method is typically called in contexts where we can't generate |
1105 | /// side-effects, like in __builtin_dynamic_object_size. When finding |
1106 | /// expressions, only choose those that have either already been emitted or can |
1107 | /// be loaded without side-effects. |
1108 | /// |
1109 | /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be |
1110 | /// within the top-level struct. |
1111 | /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. |
1112 | llvm::Value *CodeGenFunction::EmitCountedByFieldExpr( |
1113 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1114 | const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); |
1115 | |
1116 | // Find the base struct expr (i.e. p in p->a.b.c.d). |
1117 | const Expr *StructBase = StructAccessBase(RD).Visit(E: Base); |
1118 | if (!StructBase || StructBase->HasSideEffects(Ctx: getContext())) |
1119 | return nullptr; |
1120 | |
1121 | llvm::Value *Res = nullptr; |
1122 | if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: StructBase)) { |
1123 | Res = EmitDeclRefLValue(E: DRE).getPointer(CGF&: *this); |
1124 | Res = Builder.CreateAlignedLoad(Ty: ConvertType(T: DRE->getType()), Addr: Res, |
1125 | Align: getPointerAlign(), Name: "dre.load" ); |
1126 | } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: StructBase)) { |
1127 | LValue LV = EmitMemberExpr(E: ME); |
1128 | Address Addr = LV.getAddress(); |
1129 | Res = Addr.emitRawPointer(CGF&: *this); |
1130 | } else if (StructBase->getType()->isPointerType()) { |
1131 | LValueBaseInfo BaseInfo; |
1132 | TBAAAccessInfo TBAAInfo; |
1133 | Address Addr = EmitPointerWithAlignment(Addr: StructBase, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
1134 | Res = Addr.emitRawPointer(CGF&: *this); |
1135 | } else { |
1136 | return nullptr; |
1137 | } |
1138 | |
1139 | llvm::Value *Zero = Builder.getInt32(C: 0); |
1140 | RecIndicesTy Indices; |
1141 | |
1142 | getGEPIndicesToField(CGF&: *this, RD, Field: CountDecl, Indices); |
1143 | |
1144 | for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I) |
1145 | Res = Builder.CreateInBoundsGEP( |
1146 | Ty: ConvertType(T: QualType(I->first->getTypeForDecl(), 0)), Ptr: Res, |
1147 | IdxList: {Zero, I->second}, Name: "..counted_by.gep" ); |
1148 | |
1149 | return Builder.CreateAlignedLoad(Ty: ConvertType(T: CountDecl->getType()), Addr: Res, |
1150 | Align: getIntAlign(), Name: "..counted_by.load" ); |
1151 | } |
1152 | |
1153 | const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) { |
1154 | if (!FD) |
1155 | return nullptr; |
1156 | |
1157 | const auto *CAT = FD->getType()->getAs<CountAttributedType>(); |
1158 | if (!CAT) |
1159 | return nullptr; |
1160 | |
1161 | const auto *CountDRE = cast<DeclRefExpr>(Val: CAT->getCountExpr()); |
1162 | const auto *CountDecl = CountDRE->getDecl(); |
1163 | if (const auto *IFD = dyn_cast<IndirectFieldDecl>(Val: CountDecl)) |
1164 | CountDecl = IFD->getAnonField(); |
1165 | |
1166 | return dyn_cast<FieldDecl>(Val: CountDecl); |
1167 | } |
1168 | |
1169 | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
1170 | llvm::Value *Index, QualType IndexType, |
1171 | bool Accessed) { |
1172 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
1173 | "should not be called unless adding bounds checks" ); |
1174 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
1175 | getLangOpts().getStrictFlexArraysLevel(); |
1176 | QualType IndexedType; |
1177 | llvm::Value *Bound = |
1178 | getArrayIndexingBound(CGF&: *this, Base, IndexedType, StrictFlexArraysLevel); |
1179 | |
1180 | EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); |
1181 | } |
1182 | |
1183 | void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, |
1184 | llvm::Value *Index, |
1185 | QualType IndexType, |
1186 | QualType IndexedType, bool Accessed) { |
1187 | if (!Bound) |
1188 | return; |
1189 | |
1190 | SanitizerScope SanScope(this); |
1191 | |
1192 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1193 | llvm::Value *IndexVal = Builder.CreateIntCast(V: Index, DestTy: SizeTy, isSigned: IndexSigned); |
1194 | llvm::Value *BoundVal = Builder.CreateIntCast(V: Bound, DestTy: SizeTy, isSigned: false); |
1195 | |
1196 | llvm::Constant *StaticData[] = { |
1197 | EmitCheckSourceLocation(Loc: E->getExprLoc()), |
1198 | EmitCheckTypeDescriptor(T: IndexedType), |
1199 | EmitCheckTypeDescriptor(T: IndexType) |
1200 | }; |
1201 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(LHS: IndexVal, RHS: BoundVal) |
1202 | : Builder.CreateICmpULE(LHS: IndexVal, RHS: BoundVal); |
1203 | EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::ArrayBounds), |
1204 | Check: SanitizerHandler::OutOfBounds, StaticArgs: StaticData, DynamicArgs: Index); |
1205 | } |
1206 | |
1207 | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1208 | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1209 | bool isInc, bool isPre) { |
1210 | ComplexPairTy InVal = EmitLoadOfComplex(src: LV, loc: E->getExprLoc()); |
1211 | |
1212 | llvm::Value *NextVal; |
1213 | if (isa<llvm::IntegerType>(Val: InVal.first->getType())) { |
1214 | uint64_t AmountVal = isInc ? 1 : -1; |
1215 | NextVal = llvm::ConstantInt::get(Ty: InVal.first->getType(), V: AmountVal, IsSigned: true); |
1216 | |
1217 | // Add the inc/dec to the real part. |
1218 | NextVal = Builder.CreateAdd(LHS: InVal.first, RHS: NextVal, Name: isInc ? "inc" : "dec" ); |
1219 | } else { |
1220 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1221 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(T: ElemTy), 1); |
1222 | if (!isInc) |
1223 | FVal.changeSign(); |
1224 | NextVal = llvm::ConstantFP::get(Context&: getLLVMContext(), V: FVal); |
1225 | |
1226 | // Add the inc/dec to the real part. |
1227 | NextVal = Builder.CreateFAdd(L: InVal.first, R: NextVal, Name: isInc ? "inc" : "dec" ); |
1228 | } |
1229 | |
1230 | ComplexPairTy IncVal(NextVal, InVal.second); |
1231 | |
1232 | // Store the updated result through the lvalue. |
1233 | EmitStoreOfComplex(V: IncVal, dest: LV, /*init*/ isInit: false); |
1234 | if (getLangOpts().OpenMP) |
1235 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
1236 | LHS: E->getSubExpr()); |
1237 | |
1238 | // If this is a postinc, return the value read from memory, otherwise use the |
1239 | // updated value. |
1240 | return isPre ? IncVal : InVal; |
1241 | } |
1242 | |
1243 | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1244 | CodeGenFunction *CGF) { |
1245 | // Bind VLAs in the cast type. |
1246 | if (CGF && E->getType()->isVariablyModifiedType()) |
1247 | CGF->EmitVariablyModifiedType(Ty: E->getType()); |
1248 | |
1249 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1250 | DI->EmitExplicitCastType(Ty: E->getType()); |
1251 | } |
1252 | |
1253 | //===----------------------------------------------------------------------===// |
1254 | // LValue Expression Emission |
1255 | //===----------------------------------------------------------------------===// |
1256 | |
1257 | static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, |
1258 | TBAAAccessInfo *TBAAInfo, |
1259 | KnownNonNull_t IsKnownNonNull, |
1260 | CodeGenFunction &CGF) { |
1261 | // We allow this with ObjC object pointers because of fragile ABIs. |
1262 | assert(E->getType()->isPointerType() || |
1263 | E->getType()->isObjCObjectPointerType()); |
1264 | E = E->IgnoreParens(); |
1265 | |
1266 | // Casts: |
1267 | if (const CastExpr *CE = dyn_cast<CastExpr>(Val: E)) { |
1268 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: CE)) |
1269 | CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF); |
1270 | |
1271 | switch (CE->getCastKind()) { |
1272 | // Non-converting casts (but not C's implicit conversion from void*). |
1273 | case CK_BitCast: |
1274 | case CK_NoOp: |
1275 | case CK_AddressSpaceConversion: |
1276 | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1277 | if (PtrTy->getPointeeType()->isVoidType()) |
1278 | break; |
1279 | |
1280 | LValueBaseInfo InnerBaseInfo; |
1281 | TBAAAccessInfo InnerTBAAInfo; |
1282 | Address Addr = CGF.EmitPointerWithAlignment( |
1283 | Addr: CE->getSubExpr(), BaseInfo: &InnerBaseInfo, TBAAInfo: &InnerTBAAInfo, IsKnownNonNull); |
1284 | if (BaseInfo) *BaseInfo = InnerBaseInfo; |
1285 | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; |
1286 | |
1287 | if (isa<ExplicitCastExpr>(Val: CE)) { |
1288 | LValueBaseInfo TargetTypeBaseInfo; |
1289 | TBAAAccessInfo TargetTypeTBAAInfo; |
1290 | CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( |
1291 | T: E->getType(), BaseInfo: &TargetTypeBaseInfo, TBAAInfo: &TargetTypeTBAAInfo); |
1292 | if (TBAAInfo) |
1293 | *TBAAInfo = |
1294 | CGF.CGM.mergeTBAAInfoForCast(SourceInfo: *TBAAInfo, TargetInfo: TargetTypeTBAAInfo); |
1295 | // If the source l-value is opaque, honor the alignment of the |
1296 | // casted-to type. |
1297 | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1298 | if (BaseInfo) |
1299 | BaseInfo->mergeForCast(Info: TargetTypeBaseInfo); |
1300 | Addr.setAlignment(Align); |
1301 | } |
1302 | } |
1303 | |
1304 | if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast) && |
1305 | CE->getCastKind() == CK_BitCast) { |
1306 | if (auto PT = E->getType()->getAs<PointerType>()) |
1307 | CGF.EmitVTablePtrCheckForCast(T: PT->getPointeeType(), Derived: Addr, |
1308 | /*MayBeNull=*/true, |
1309 | TCK: CodeGenFunction::CFITCK_UnrelatedCast, |
1310 | Loc: CE->getBeginLoc()); |
1311 | } |
1312 | |
1313 | llvm::Type *ElemTy = |
1314 | CGF.ConvertTypeForMem(T: E->getType()->getPointeeType()); |
1315 | Addr = Addr.withElementType(ElemTy); |
1316 | if (CE->getCastKind() == CK_AddressSpaceConversion) |
1317 | Addr = CGF.Builder.CreateAddrSpaceCast( |
1318 | Addr, Ty: CGF.ConvertType(T: E->getType()), ElementTy: ElemTy); |
1319 | return CGF.authPointerToPointerCast(Ptr: Addr, SourceType: CE->getSubExpr()->getType(), |
1320 | DestType: CE->getType()); |
1321 | } |
1322 | break; |
1323 | |
1324 | // Array-to-pointer decay. |
1325 | case CK_ArrayToPointerDecay: |
1326 | return CGF.EmitArrayToPointerDecay(Array: CE->getSubExpr(), BaseInfo, TBAAInfo); |
1327 | |
1328 | // Derived-to-base conversions. |
1329 | case CK_UncheckedDerivedToBase: |
1330 | case CK_DerivedToBase: { |
1331 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1332 | // conservatively pretend that the complete object is of the base class |
1333 | // type. |
1334 | if (TBAAInfo) |
1335 | *TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: E->getType()); |
1336 | Address Addr = CGF.EmitPointerWithAlignment( |
1337 | Addr: CE->getSubExpr(), BaseInfo, TBAAInfo: nullptr, |
1338 | IsKnownNonNull: (KnownNonNull_t)(IsKnownNonNull || |
1339 | CE->getCastKind() == CK_UncheckedDerivedToBase)); |
1340 | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1341 | return CGF.GetAddressOfBaseClass( |
1342 | Value: Addr, Derived, PathBegin: CE->path_begin(), PathEnd: CE->path_end(), |
1343 | NullCheckValue: CGF.ShouldNullCheckClassCastValue(Cast: CE), Loc: CE->getExprLoc()); |
1344 | } |
1345 | |
1346 | // TODO: Is there any reason to treat base-to-derived conversions |
1347 | // specially? |
1348 | default: |
1349 | break; |
1350 | } |
1351 | } |
1352 | |
1353 | // Unary &. |
1354 | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) { |
1355 | if (UO->getOpcode() == UO_AddrOf) { |
1356 | LValue LV = CGF.EmitLValue(E: UO->getSubExpr(), IsKnownNonNull); |
1357 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1358 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1359 | return LV.getAddress(); |
1360 | } |
1361 | } |
1362 | |
1363 | // std::addressof and variants. |
1364 | if (auto *Call = dyn_cast<CallExpr>(Val: E)) { |
1365 | switch (Call->getBuiltinCallee()) { |
1366 | default: |
1367 | break; |
1368 | case Builtin::BIaddressof: |
1369 | case Builtin::BI__addressof: |
1370 | case Builtin::BI__builtin_addressof: { |
1371 | LValue LV = CGF.EmitLValue(E: Call->getArg(Arg: 0), IsKnownNonNull); |
1372 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1373 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1374 | return LV.getAddress(); |
1375 | } |
1376 | } |
1377 | } |
1378 | |
1379 | // TODO: conditional operators, comma. |
1380 | |
1381 | // Otherwise, use the alignment of the type. |
1382 | return CGF.makeNaturalAddressForPointer( |
1383 | Ptr: CGF.EmitScalarExpr(E), T: E->getType()->getPointeeType(), Alignment: CharUnits(), |
1384 | /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); |
1385 | } |
1386 | |
1387 | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1388 | /// derive a more accurate bound on the alignment of the pointer. |
1389 | Address CodeGenFunction::EmitPointerWithAlignment( |
1390 | const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, |
1391 | KnownNonNull_t IsKnownNonNull) { |
1392 | Address Addr = |
1393 | ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, CGF&: *this); |
1394 | if (IsKnownNonNull && !Addr.isKnownNonNull()) |
1395 | Addr.setKnownNonNull(); |
1396 | return Addr; |
1397 | } |
1398 | |
1399 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1400 | llvm::Value *V = RV.getScalarVal(); |
1401 | if (auto MPT = T->getAs<MemberPointerType>()) |
1402 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF&: *this, MemPtr: V, MPT); |
1403 | return Builder.CreateICmpNE(LHS: V, RHS: llvm::Constant::getNullValue(Ty: V->getType())); |
1404 | } |
1405 | |
1406 | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1407 | if (Ty->isVoidType()) |
1408 | return RValue::get(V: nullptr); |
1409 | |
1410 | switch (getEvaluationKind(T: Ty)) { |
1411 | case TEK_Complex: { |
1412 | llvm::Type *EltTy = |
1413 | ConvertType(T: Ty->castAs<ComplexType>()->getElementType()); |
1414 | llvm::Value *U = llvm::UndefValue::get(T: EltTy); |
1415 | return RValue::getComplex(C: std::make_pair(x&: U, y&: U)); |
1416 | } |
1417 | |
1418 | // If this is a use of an undefined aggregate type, the aggregate must have an |
1419 | // identifiable address. Just because the contents of the value are undefined |
1420 | // doesn't mean that the address can't be taken and compared. |
1421 | case TEK_Aggregate: { |
1422 | Address DestPtr = CreateMemTemp(Ty, Name: "undef.agg.tmp" ); |
1423 | return RValue::getAggregate(addr: DestPtr); |
1424 | } |
1425 | |
1426 | case TEK_Scalar: |
1427 | return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: Ty))); |
1428 | } |
1429 | llvm_unreachable("bad evaluation kind" ); |
1430 | } |
1431 | |
1432 | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1433 | const char *Name) { |
1434 | ErrorUnsupported(S: E, Type: Name); |
1435 | return GetUndefRValue(Ty: E->getType()); |
1436 | } |
1437 | |
1438 | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1439 | const char *Name) { |
1440 | ErrorUnsupported(S: E, Type: Name); |
1441 | llvm::Type *ElTy = ConvertType(T: E->getType()); |
1442 | llvm::Type *Ty = UnqualPtrTy; |
1443 | return MakeAddrLValue( |
1444 | Addr: Address(llvm::UndefValue::get(T: Ty), ElTy, CharUnits::One()), T: E->getType()); |
1445 | } |
1446 | |
1447 | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1448 | const Expr *Base = Obj; |
1449 | while (!isa<CXXThisExpr>(Val: Base)) { |
1450 | // The result of a dynamic_cast can be null. |
1451 | if (isa<CXXDynamicCastExpr>(Val: Base)) |
1452 | return false; |
1453 | |
1454 | if (const auto *CE = dyn_cast<CastExpr>(Val: Base)) { |
1455 | Base = CE->getSubExpr(); |
1456 | } else if (const auto *PE = dyn_cast<ParenExpr>(Val: Base)) { |
1457 | Base = PE->getSubExpr(); |
1458 | } else if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base)) { |
1459 | if (UO->getOpcode() == UO_Extension) |
1460 | Base = UO->getSubExpr(); |
1461 | else |
1462 | return false; |
1463 | } else { |
1464 | return false; |
1465 | } |
1466 | } |
1467 | return true; |
1468 | } |
1469 | |
1470 | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1471 | LValue LV; |
1472 | if (SanOpts.has(K: SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(Val: E)) |
1473 | LV = EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E), /*Accessed*/true); |
1474 | else |
1475 | LV = EmitLValue(E); |
1476 | if (!isa<DeclRefExpr>(Val: E) && !LV.isBitField() && LV.isSimple()) { |
1477 | SanitizerSet SkippedChecks; |
1478 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) { |
1479 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: ME->getBase()); |
1480 | if (IsBaseCXXThis) |
1481 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
1482 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: ME->getBase())) |
1483 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
1484 | } |
1485 | EmitTypeCheck(TCK, Loc: E->getExprLoc(), LV, Type: E->getType(), SkippedChecks); |
1486 | } |
1487 | return LV; |
1488 | } |
1489 | |
1490 | /// EmitLValue - Emit code to compute a designator that specifies the location |
1491 | /// of the expression. |
1492 | /// |
1493 | /// This can return one of two things: a simple address or a bitfield reference. |
1494 | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1495 | /// an LLVM pointer type. |
1496 | /// |
1497 | /// If this returns a bitfield reference, nothing about the pointee type of the |
1498 | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1499 | /// |
1500 | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1501 | /// this method guarantees that the returned pointer type will point to an LLVM |
1502 | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1503 | /// length type, this is not possible. |
1504 | /// |
1505 | LValue CodeGenFunction::EmitLValue(const Expr *E, |
1506 | KnownNonNull_t IsKnownNonNull) { |
1507 | LValue LV = EmitLValueHelper(E, IsKnownNonNull); |
1508 | if (IsKnownNonNull && !LV.isKnownNonNull()) |
1509 | LV.setKnownNonNull(); |
1510 | return LV; |
1511 | } |
1512 | |
1513 | static QualType getConstantExprReferredType(const FullExpr *E, |
1514 | const ASTContext &Ctx) { |
1515 | const Expr *SE = E->getSubExpr()->IgnoreImplicit(); |
1516 | if (isa<OpaqueValueExpr>(Val: SE)) |
1517 | return SE->getType(); |
1518 | return cast<CallExpr>(Val: SE)->getCallReturnType(Ctx)->getPointeeType(); |
1519 | } |
1520 | |
1521 | LValue CodeGenFunction::EmitLValueHelper(const Expr *E, |
1522 | KnownNonNull_t IsKnownNonNull) { |
1523 | ApplyDebugLocation DL(*this, E); |
1524 | switch (E->getStmtClass()) { |
1525 | default: return EmitUnsupportedLValue(E, Name: "l-value expression" ); |
1526 | |
1527 | case Expr::ObjCPropertyRefExprClass: |
1528 | llvm_unreachable("cannot emit a property reference directly" ); |
1529 | |
1530 | case Expr::ObjCSelectorExprClass: |
1531 | return EmitObjCSelectorLValue(E: cast<ObjCSelectorExpr>(Val: E)); |
1532 | case Expr::ObjCIsaExprClass: |
1533 | return EmitObjCIsaExpr(E: cast<ObjCIsaExpr>(Val: E)); |
1534 | case Expr::BinaryOperatorClass: |
1535 | return EmitBinaryOperatorLValue(E: cast<BinaryOperator>(Val: E)); |
1536 | case Expr::CompoundAssignOperatorClass: { |
1537 | QualType Ty = E->getType(); |
1538 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1539 | Ty = AT->getValueType(); |
1540 | if (!Ty->isAnyComplexType()) |
1541 | return EmitCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1542 | return EmitComplexCompoundAssignmentLValue(E: cast<CompoundAssignOperator>(Val: E)); |
1543 | } |
1544 | case Expr::CallExprClass: |
1545 | case Expr::CXXMemberCallExprClass: |
1546 | case Expr::CXXOperatorCallExprClass: |
1547 | case Expr::UserDefinedLiteralClass: |
1548 | return EmitCallExprLValue(E: cast<CallExpr>(Val: E)); |
1549 | case Expr::CXXRewrittenBinaryOperatorClass: |
1550 | return EmitLValue(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(), |
1551 | IsKnownNonNull); |
1552 | case Expr::VAArgExprClass: |
1553 | return EmitVAArgExprLValue(E: cast<VAArgExpr>(Val: E)); |
1554 | case Expr::DeclRefExprClass: |
1555 | return EmitDeclRefLValue(E: cast<DeclRefExpr>(Val: E)); |
1556 | case Expr::ConstantExprClass: { |
1557 | const ConstantExpr *CE = cast<ConstantExpr>(Val: E); |
1558 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1559 | QualType RetType = getConstantExprReferredType(E: CE, Ctx: getContext()); |
1560 | return MakeNaturalAlignAddrLValue(V: Result, T: RetType); |
1561 | } |
1562 | return EmitLValue(E: cast<ConstantExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1563 | } |
1564 | case Expr::ParenExprClass: |
1565 | return EmitLValue(E: cast<ParenExpr>(Val: E)->getSubExpr(), IsKnownNonNull); |
1566 | case Expr::GenericSelectionExprClass: |
1567 | return EmitLValue(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), |
1568 | IsKnownNonNull); |
1569 | case Expr::PredefinedExprClass: |
1570 | return EmitPredefinedLValue(E: cast<PredefinedExpr>(Val: E)); |
1571 | case Expr::StringLiteralClass: |
1572 | return EmitStringLiteralLValue(E: cast<StringLiteral>(Val: E)); |
1573 | case Expr::ObjCEncodeExprClass: |
1574 | return EmitObjCEncodeExprLValue(E: cast<ObjCEncodeExpr>(Val: E)); |
1575 | case Expr::PseudoObjectExprClass: |
1576 | return EmitPseudoObjectLValue(e: cast<PseudoObjectExpr>(Val: E)); |
1577 | case Expr::InitListExprClass: |
1578 | return EmitInitListLValue(E: cast<InitListExpr>(Val: E)); |
1579 | case Expr::CXXTemporaryObjectExprClass: |
1580 | case Expr::CXXConstructExprClass: |
1581 | return EmitCXXConstructLValue(E: cast<CXXConstructExpr>(Val: E)); |
1582 | case Expr::CXXBindTemporaryExprClass: |
1583 | return EmitCXXBindTemporaryLValue(E: cast<CXXBindTemporaryExpr>(Val: E)); |
1584 | case Expr::CXXUuidofExprClass: |
1585 | return EmitCXXUuidofLValue(E: cast<CXXUuidofExpr>(Val: E)); |
1586 | case Expr::LambdaExprClass: |
1587 | return EmitAggExprToLValue(E); |
1588 | |
1589 | case Expr::ExprWithCleanupsClass: { |
1590 | const auto *cleanups = cast<ExprWithCleanups>(Val: E); |
1591 | RunCleanupsScope Scope(*this); |
1592 | LValue LV = EmitLValue(E: cleanups->getSubExpr(), IsKnownNonNull); |
1593 | if (LV.isSimple()) { |
1594 | // Defend against branches out of gnu statement expressions surrounded by |
1595 | // cleanups. |
1596 | Address Addr = LV.getAddress(); |
1597 | llvm::Value *V = Addr.getBasePointer(); |
1598 | Scope.ForceCleanup(ValuesToReload: {&V}); |
1599 | Addr.replaceBasePointer(P: V); |
1600 | return LValue::MakeAddr(Addr, type: LV.getType(), Context&: getContext(), |
1601 | BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
1602 | } |
1603 | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1604 | // bitfield lvalue or some other non-simple lvalue? |
1605 | return LV; |
1606 | } |
1607 | |
1608 | case Expr::CXXDefaultArgExprClass: { |
1609 | auto *DAE = cast<CXXDefaultArgExpr>(Val: E); |
1610 | CXXDefaultArgExprScope Scope(*this, DAE); |
1611 | return EmitLValue(E: DAE->getExpr(), IsKnownNonNull); |
1612 | } |
1613 | case Expr::CXXDefaultInitExprClass: { |
1614 | auto *DIE = cast<CXXDefaultInitExpr>(Val: E); |
1615 | CXXDefaultInitExprScope Scope(*this, DIE); |
1616 | return EmitLValue(E: DIE->getExpr(), IsKnownNonNull); |
1617 | } |
1618 | case Expr::CXXTypeidExprClass: |
1619 | return EmitCXXTypeidLValue(E: cast<CXXTypeidExpr>(Val: E)); |
1620 | |
1621 | case Expr::ObjCMessageExprClass: |
1622 | return EmitObjCMessageExprLValue(E: cast<ObjCMessageExpr>(Val: E)); |
1623 | case Expr::ObjCIvarRefExprClass: |
1624 | return EmitObjCIvarRefLValue(E: cast<ObjCIvarRefExpr>(Val: E)); |
1625 | case Expr::StmtExprClass: |
1626 | return EmitStmtExprLValue(E: cast<StmtExpr>(Val: E)); |
1627 | case Expr::UnaryOperatorClass: |
1628 | return EmitUnaryOpLValue(E: cast<UnaryOperator>(Val: E)); |
1629 | case Expr::ArraySubscriptExprClass: |
1630 | return EmitArraySubscriptExpr(E: cast<ArraySubscriptExpr>(Val: E)); |
1631 | case Expr::MatrixSubscriptExprClass: |
1632 | return EmitMatrixSubscriptExpr(E: cast<MatrixSubscriptExpr>(Val: E)); |
1633 | case Expr::ArraySectionExprClass: |
1634 | return EmitArraySectionExpr(E: cast<ArraySectionExpr>(Val: E)); |
1635 | case Expr::ExtVectorElementExprClass: |
1636 | return EmitExtVectorElementExpr(E: cast<ExtVectorElementExpr>(Val: E)); |
1637 | case Expr::CXXThisExprClass: |
1638 | return MakeAddrLValue(Addr: LoadCXXThisAddress(), T: E->getType()); |
1639 | case Expr::MemberExprClass: |
1640 | return EmitMemberExpr(E: cast<MemberExpr>(Val: E)); |
1641 | case Expr::CompoundLiteralExprClass: |
1642 | return EmitCompoundLiteralLValue(E: cast<CompoundLiteralExpr>(Val: E)); |
1643 | case Expr::ConditionalOperatorClass: |
1644 | return EmitConditionalOperatorLValue(E: cast<ConditionalOperator>(Val: E)); |
1645 | case Expr::BinaryConditionalOperatorClass: |
1646 | return EmitConditionalOperatorLValue(E: cast<BinaryConditionalOperator>(Val: E)); |
1647 | case Expr::ChooseExprClass: |
1648 | return EmitLValue(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), IsKnownNonNull); |
1649 | case Expr::OpaqueValueExprClass: |
1650 | return EmitOpaqueValueLValue(e: cast<OpaqueValueExpr>(Val: E)); |
1651 | case Expr::SubstNonTypeTemplateParmExprClass: |
1652 | return EmitLValue(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), |
1653 | IsKnownNonNull); |
1654 | case Expr::ImplicitCastExprClass: |
1655 | case Expr::CStyleCastExprClass: |
1656 | case Expr::CXXFunctionalCastExprClass: |
1657 | case Expr::CXXStaticCastExprClass: |
1658 | case Expr::CXXDynamicCastExprClass: |
1659 | case Expr::CXXReinterpretCastExprClass: |
1660 | case Expr::CXXConstCastExprClass: |
1661 | case Expr::CXXAddrspaceCastExprClass: |
1662 | case Expr::ObjCBridgedCastExprClass: |
1663 | return EmitCastLValue(E: cast<CastExpr>(Val: E)); |
1664 | |
1665 | case Expr::MaterializeTemporaryExprClass: |
1666 | return EmitMaterializeTemporaryExpr(M: cast<MaterializeTemporaryExpr>(Val: E)); |
1667 | |
1668 | case Expr::CoawaitExprClass: |
1669 | return EmitCoawaitLValue(E: cast<CoawaitExpr>(Val: E)); |
1670 | case Expr::CoyieldExprClass: |
1671 | return EmitCoyieldLValue(E: cast<CoyieldExpr>(Val: E)); |
1672 | case Expr::PackIndexingExprClass: |
1673 | return EmitLValue(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr()); |
1674 | } |
1675 | } |
1676 | |
1677 | /// Given an object of the given canonical type, can we safely copy a |
1678 | /// value out of it based on its initializer? |
1679 | static bool isConstantEmittableObjectType(QualType type) { |
1680 | assert(type.isCanonical()); |
1681 | assert(!type->isReferenceType()); |
1682 | |
1683 | // Must be const-qualified but non-volatile. |
1684 | Qualifiers qs = type.getLocalQualifiers(); |
1685 | if (!qs.hasConst() || qs.hasVolatile()) return false; |
1686 | |
1687 | // Otherwise, all object types satisfy this except C++ classes with |
1688 | // mutable subobjects or non-trivial copy/destroy behavior. |
1689 | if (const auto *RT = dyn_cast<RecordType>(Val&: type)) |
1690 | if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl())) |
1691 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1692 | return false; |
1693 | |
1694 | return true; |
1695 | } |
1696 | |
1697 | /// Can we constant-emit a load of a reference to a variable of the |
1698 | /// given type? This is different from predicates like |
1699 | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1700 | /// in situations that don't necessarily satisfy the language's rules |
1701 | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1702 | /// to do this with const float variables even if those variables |
1703 | /// aren't marked 'constexpr'. |
1704 | enum ConstantEmissionKind { |
1705 | CEK_None, |
1706 | CEK_AsReferenceOnly, |
1707 | CEK_AsValueOrReference, |
1708 | CEK_AsValueOnly |
1709 | }; |
1710 | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1711 | type = type.getCanonicalType(); |
1712 | if (const auto *ref = dyn_cast<ReferenceType>(Val&: type)) { |
1713 | if (isConstantEmittableObjectType(type: ref->getPointeeType())) |
1714 | return CEK_AsValueOrReference; |
1715 | return CEK_AsReferenceOnly; |
1716 | } |
1717 | if (isConstantEmittableObjectType(type)) |
1718 | return CEK_AsValueOnly; |
1719 | return CEK_None; |
1720 | } |
1721 | |
1722 | /// Try to emit a reference to the given value without producing it as |
1723 | /// an l-value. This is just an optimization, but it avoids us needing |
1724 | /// to emit global copies of variables if they're named without triggering |
1725 | /// a formal use in a context where we can't emit a direct reference to them, |
1726 | /// for instance if a block or lambda or a member of a local class uses a |
1727 | /// const int variable or constexpr variable from an enclosing function. |
1728 | CodeGenFunction::ConstantEmission |
1729 | CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { |
1730 | ValueDecl *value = refExpr->getDecl(); |
1731 | |
1732 | // The value needs to be an enum constant or a constant variable. |
1733 | ConstantEmissionKind CEK; |
1734 | if (isa<ParmVarDecl>(Val: value)) { |
1735 | CEK = CEK_None; |
1736 | } else if (auto *var = dyn_cast<VarDecl>(Val: value)) { |
1737 | CEK = checkVarTypeForConstantEmission(type: var->getType()); |
1738 | } else if (isa<EnumConstantDecl>(Val: value)) { |
1739 | CEK = CEK_AsValueOnly; |
1740 | } else { |
1741 | CEK = CEK_None; |
1742 | } |
1743 | if (CEK == CEK_None) return ConstantEmission(); |
1744 | |
1745 | Expr::EvalResult result; |
1746 | bool resultIsReference; |
1747 | QualType resultType; |
1748 | |
1749 | // It's best to evaluate all the way as an r-value if that's permitted. |
1750 | if (CEK != CEK_AsReferenceOnly && |
1751 | refExpr->EvaluateAsRValue(Result&: result, Ctx: getContext())) { |
1752 | resultIsReference = false; |
1753 | resultType = refExpr->getType(); |
1754 | |
1755 | // Otherwise, try to evaluate as an l-value. |
1756 | } else if (CEK != CEK_AsValueOnly && |
1757 | refExpr->EvaluateAsLValue(Result&: result, Ctx: getContext())) { |
1758 | resultIsReference = true; |
1759 | resultType = value->getType(); |
1760 | |
1761 | // Failure. |
1762 | } else { |
1763 | return ConstantEmission(); |
1764 | } |
1765 | |
1766 | // In any case, if the initializer has side-effects, abandon ship. |
1767 | if (result.HasSideEffects) |
1768 | return ConstantEmission(); |
1769 | |
1770 | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1771 | // referencing a global host variable by copy. In this case the lambda should |
1772 | // make a copy of the value of the global host variable. The DRE of the |
1773 | // captured reference variable cannot be emitted as load from the host |
1774 | // global variable as compile time constant, since the host variable is not |
1775 | // accessible on device. The DRE of the captured reference variable has to be |
1776 | // loaded from captures. |
1777 | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && |
1778 | refExpr->refersToEnclosingVariableOrCapture()) { |
1779 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: CurCodeDecl); |
1780 | if (MD && MD->getParent()->isLambda() && |
1781 | MD->getOverloadedOperator() == OO_Call) { |
1782 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1783 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1784 | if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D)) { |
1785 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1786 | return ConstantEmission(); |
1787 | } |
1788 | } |
1789 | } |
1790 | } |
1791 | } |
1792 | |
1793 | // Emit as a constant. |
1794 | auto C = ConstantEmitter(*this).emitAbstract(loc: refExpr->getLocation(), |
1795 | value: result.Val, T: resultType); |
1796 | |
1797 | // Make sure we emit a debug reference to the global variable. |
1798 | // This should probably fire even for |
1799 | if (isa<VarDecl>(Val: value)) { |
1800 | if (!getContext().DeclMustBeEmitted(D: cast<VarDecl>(Val: value))) |
1801 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1802 | } else { |
1803 | assert(isa<EnumConstantDecl>(value)); |
1804 | EmitDeclRefExprDbgValue(E: refExpr, Init: result.Val); |
1805 | } |
1806 | |
1807 | // If we emitted a reference constant, we need to dereference that. |
1808 | if (resultIsReference) |
1809 | return ConstantEmission::forReference(C); |
1810 | |
1811 | return ConstantEmission::forValue(C); |
1812 | } |
1813 | |
1814 | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1815 | const MemberExpr *ME) { |
1816 | if (auto *VD = dyn_cast<VarDecl>(Val: ME->getMemberDecl())) { |
1817 | // Try to emit static variable member expressions as DREs. |
1818 | return DeclRefExpr::Create( |
1819 | Context: CGF.getContext(), QualifierLoc: NestedNameSpecifierLoc(), TemplateKWLoc: SourceLocation(), D: VD, |
1820 | /*RefersToEnclosingVariableOrCapture=*/false, NameLoc: ME->getExprLoc(), |
1821 | T: ME->getType(), VK: ME->getValueKind(), FoundD: nullptr, TemplateArgs: nullptr, NOUR: ME->isNonOdrUse()); |
1822 | } |
1823 | return nullptr; |
1824 | } |
1825 | |
1826 | CodeGenFunction::ConstantEmission |
1827 | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1828 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME)) |
1829 | return tryEmitAsConstant(refExpr: DRE); |
1830 | return ConstantEmission(); |
1831 | } |
1832 | |
1833 | llvm::Value *CodeGenFunction::emitScalarConstant( |
1834 | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1835 | assert(Constant && "not a constant" ); |
1836 | if (Constant.isReference()) |
1837 | return EmitLoadOfLValue(V: Constant.getReferenceLValue(CGF&: *this, refExpr: E), |
1838 | Loc: E->getExprLoc()) |
1839 | .getScalarVal(); |
1840 | return Constant.getValue(); |
1841 | } |
1842 | |
1843 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1844 | SourceLocation Loc) { |
1845 | return EmitLoadOfScalar(Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
1846 | Ty: lvalue.getType(), Loc, BaseInfo: lvalue.getBaseInfo(), |
1847 | TBAAInfo: lvalue.getTBAAInfo(), isNontemporal: lvalue.isNontemporal()); |
1848 | } |
1849 | |
1850 | static bool hasBooleanRepresentation(QualType Ty) { |
1851 | if (Ty->isBooleanType()) |
1852 | return true; |
1853 | |
1854 | if (const EnumType *ET = Ty->getAs<EnumType>()) |
1855 | return ET->getDecl()->getIntegerType()->isBooleanType(); |
1856 | |
1857 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1858 | return hasBooleanRepresentation(Ty: AT->getValueType()); |
1859 | |
1860 | return false; |
1861 | } |
1862 | |
1863 | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1864 | llvm::APInt &Min, llvm::APInt &End, |
1865 | bool StrictEnums, bool IsBool) { |
1866 | const EnumType *ET = Ty->getAs<EnumType>(); |
1867 | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && |
1868 | ET && !ET->getDecl()->isFixed(); |
1869 | if (!IsBool && !IsRegularCPlusPlusEnum) |
1870 | return false; |
1871 | |
1872 | if (IsBool) { |
1873 | Min = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 0); |
1874 | End = llvm::APInt(CGF.getContext().getTypeSize(T: Ty), 2); |
1875 | } else { |
1876 | const EnumDecl *ED = ET->getDecl(); |
1877 | ED->getValueRange(Max&: End, Min); |
1878 | } |
1879 | return true; |
1880 | } |
1881 | |
1882 | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1883 | llvm::APInt Min, End; |
1884 | if (!getRangeForType(CGF&: *this, Ty, Min, End, StrictEnums: CGM.getCodeGenOpts().StrictEnums, |
1885 | IsBool: hasBooleanRepresentation(Ty))) |
1886 | return nullptr; |
1887 | |
1888 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1889 | return MDHelper.createRange(Lo: Min, Hi: End); |
1890 | } |
1891 | |
1892 | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1893 | SourceLocation Loc) { |
1894 | bool HasBoolCheck = SanOpts.has(K: SanitizerKind::Bool); |
1895 | bool HasEnumCheck = SanOpts.has(K: SanitizerKind::Enum); |
1896 | if (!HasBoolCheck && !HasEnumCheck) |
1897 | return false; |
1898 | |
1899 | bool IsBool = hasBooleanRepresentation(Ty) || |
1900 | NSAPI(CGM.getContext()).isObjCBOOLType(T: Ty); |
1901 | bool NeedsBoolCheck = HasBoolCheck && IsBool; |
1902 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); |
1903 | if (!NeedsBoolCheck && !NeedsEnumCheck) |
1904 | return false; |
1905 | |
1906 | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1907 | // a bit width mismatch when handling bitfield values. This is handled by |
1908 | // EmitFromMemory for the non-bitfield case. |
1909 | if (IsBool && |
1910 | cast<llvm::IntegerType>(Val: Value->getType())->getBitWidth() == 1) |
1911 | return false; |
1912 | |
1913 | llvm::APInt Min, End; |
1914 | if (!getRangeForType(CGF&: *this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1915 | return true; |
1916 | |
1917 | auto &Ctx = getLLVMContext(); |
1918 | SanitizerScope SanScope(this); |
1919 | llvm::Value *Check; |
1920 | --End; |
1921 | if (!Min) { |
1922 | Check = Builder.CreateICmpULE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1923 | } else { |
1924 | llvm::Value *Upper = |
1925 | Builder.CreateICmpSLE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: End)); |
1926 | llvm::Value *Lower = |
1927 | Builder.CreateICmpSGE(LHS: Value, RHS: llvm::ConstantInt::get(Context&: Ctx, V: Min)); |
1928 | Check = Builder.CreateAnd(LHS: Upper, RHS: Lower); |
1929 | } |
1930 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
1931 | EmitCheckTypeDescriptor(T: Ty)}; |
1932 | SanitizerMask Kind = |
1933 | NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; |
1934 | EmitCheck(Checked: std::make_pair(x&: Check, y&: Kind), Check: SanitizerHandler::LoadInvalidValue, |
1935 | StaticArgs, DynamicArgs: EmitCheckValue(V: Value)); |
1936 | return true; |
1937 | } |
1938 | |
1939 | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
1940 | QualType Ty, |
1941 | SourceLocation Loc, |
1942 | LValueBaseInfo BaseInfo, |
1943 | TBAAAccessInfo TBAAInfo, |
1944 | bool isNontemporal) { |
1945 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
1946 | if (GV->isThreadLocal()) |
1947 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
1948 | IsKnownNonNull: NotKnownNonNull); |
1949 | |
1950 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
1951 | // Boolean vectors use `iN` as storage type. |
1952 | if (ClangVecTy->isExtVectorBoolType()) { |
1953 | llvm::Type *ValTy = ConvertType(T: Ty); |
1954 | unsigned ValNumElems = |
1955 | cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
1956 | // Load the `iP` storage object (P is the padded vector size). |
1957 | auto *RawIntV = Builder.CreateLoad(Addr, IsVolatile: Volatile, Name: "load_bits" ); |
1958 | const auto *RawIntTy = RawIntV->getType(); |
1959 | assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors" ); |
1960 | // Bitcast iP --> <P x i1>. |
1961 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
1962 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
1963 | llvm::Value *V = Builder.CreateBitCast(V: RawIntV, DestTy: PaddedVecTy); |
1964 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
1965 | V = emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
1966 | |
1967 | return EmitFromMemory(Value: V, Ty); |
1968 | } |
1969 | |
1970 | // Handle vectors of size 3 like size 4 for better performance. |
1971 | const llvm::Type *EltTy = Addr.getElementType(); |
1972 | const auto *VTy = cast<llvm::FixedVectorType>(Val: EltTy); |
1973 | |
1974 | if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) { |
1975 | |
1976 | llvm::VectorType *vec4Ty = |
1977 | llvm::FixedVectorType::get(ElementType: VTy->getElementType(), NumElts: 4); |
1978 | Address Cast = Addr.withElementType(ElemTy: vec4Ty); |
1979 | // Now load value. |
1980 | llvm::Value *V = Builder.CreateLoad(Addr: Cast, IsVolatile: Volatile, Name: "loadVec4" ); |
1981 | |
1982 | // Shuffle vector to get vec3. |
1983 | V = Builder.CreateShuffleVector(V, Mask: ArrayRef<int>{0, 1, 2}, Name: "extractVec" ); |
1984 | return EmitFromMemory(Value: V, Ty); |
1985 | } |
1986 | } |
1987 | |
1988 | // Atomic operations have to be done on integral types. |
1989 | LValue AtomicLValue = |
1990 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
1991 | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(Src: AtomicLValue)) { |
1992 | return EmitAtomicLoad(LV: AtomicLValue, SL: Loc).getScalarVal(); |
1993 | } |
1994 | |
1995 | Addr = |
1996 | Addr.withElementType(ElemTy: convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Addr.getElementType())); |
1997 | |
1998 | llvm::LoadInst *Load = Builder.CreateLoad(Addr, IsVolatile: Volatile); |
1999 | if (isNontemporal) { |
2000 | llvm::MDNode *Node = llvm::MDNode::get( |
2001 | Context&: Load->getContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2002 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2003 | } |
2004 | |
2005 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo); |
2006 | |
2007 | if (EmitScalarRangeCheck(Value: Load, Ty, Loc)) { |
2008 | // In order to prevent the optimizer from throwing away the check, don't |
2009 | // attach range metadata to the load. |
2010 | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) |
2011 | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { |
2012 | Load->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RangeInfo); |
2013 | Load->setMetadata(KindID: llvm::LLVMContext::MD_noundef, |
2014 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: std::nullopt)); |
2015 | } |
2016 | |
2017 | return EmitFromMemory(Value: Load, Ty); |
2018 | } |
2019 | |
2020 | /// Converts a scalar value from its primary IR type (as returned |
2021 | /// by ConvertType) to its load/store type (as returned by |
2022 | /// convertTypeForLoadStore). |
2023 | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
2024 | if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) { |
2025 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2026 | bool Signed = Ty->isSignedIntegerOrEnumerationType(); |
2027 | return Builder.CreateIntCast(V: Value, DestTy: StoreTy, isSigned: Signed, Name: "storedv" ); |
2028 | } |
2029 | |
2030 | if (Ty->isExtVectorBoolType()) { |
2031 | llvm::Type *StoreTy = convertTypeForLoadStore(ASTTy: Ty, LLVMTy: Value->getType()); |
2032 | // Expand to the memory bit width. |
2033 | unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits(); |
2034 | // <N x i1> --> <P x i1>. |
2035 | Value = emitBoolVecConversion(SrcVec: Value, NumElementsDst: MemNumElems, Name: "insertvec" ); |
2036 | // <P x i1> --> iP. |
2037 | Value = Builder.CreateBitCast(V: Value, DestTy: StoreTy); |
2038 | } |
2039 | |
2040 | return Value; |
2041 | } |
2042 | |
2043 | /// Converts a scalar value from its load/store type (as returned |
2044 | /// by convertTypeForLoadStore) to its primary IR type (as returned |
2045 | /// by ConvertType). |
2046 | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
2047 | if (Ty->isExtVectorBoolType()) { |
2048 | const auto *RawIntTy = Value->getType(); |
2049 | // Bitcast iP --> <P x i1>. |
2050 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2051 | ElementType: Builder.getInt1Ty(), NumElts: RawIntTy->getPrimitiveSizeInBits()); |
2052 | auto *V = Builder.CreateBitCast(V: Value, DestTy: PaddedVecTy); |
2053 | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2054 | llvm::Type *ValTy = ConvertType(T: Ty); |
2055 | unsigned ValNumElems = cast<llvm::FixedVectorType>(Val: ValTy)->getNumElements(); |
2056 | return emitBoolVecConversion(SrcVec: V, NumElementsDst: ValNumElems, Name: "extractvec" ); |
2057 | } |
2058 | |
2059 | if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) { |
2060 | llvm::Type *ResTy = ConvertType(T: Ty); |
2061 | return Builder.CreateTrunc(V: Value, DestTy: ResTy, Name: "loadedv" ); |
2062 | } |
2063 | |
2064 | return Value; |
2065 | } |
2066 | |
2067 | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
2068 | // MatrixType), if it points to a array (the memory type of MatrixType). |
2069 | static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, |
2070 | CodeGenFunction &CGF, |
2071 | bool IsVector = true) { |
2072 | auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: Addr.getElementType()); |
2073 | if (ArrayTy && IsVector) { |
2074 | auto *VectorTy = llvm::FixedVectorType::get(ElementType: ArrayTy->getElementType(), |
2075 | NumElts: ArrayTy->getNumElements()); |
2076 | |
2077 | return Addr.withElementType(ElemTy: VectorTy); |
2078 | } |
2079 | auto *VectorTy = dyn_cast<llvm::VectorType>(Val: Addr.getElementType()); |
2080 | if (VectorTy && !IsVector) { |
2081 | auto *ArrayTy = llvm::ArrayType::get( |
2082 | ElementType: VectorTy->getElementType(), |
2083 | NumElements: cast<llvm::FixedVectorType>(Val: VectorTy)->getNumElements()); |
2084 | |
2085 | return Addr.withElementType(ElemTy: ArrayTy); |
2086 | } |
2087 | |
2088 | return Addr; |
2089 | } |
2090 | |
2091 | // Emit a store of a matrix LValue. This may require casting the original |
2092 | // pointer to memory address (ArrayType) to a pointer to the value type |
2093 | // (VectorType). |
2094 | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
2095 | bool isInit, CodeGenFunction &CGF) { |
2096 | Address Addr = MaybeConvertMatrixAddress(Addr: lvalue.getAddress(), CGF, |
2097 | IsVector: value->getType()->isVectorTy()); |
2098 | CGF.EmitStoreOfScalar(Value: value, Addr, Volatile: lvalue.isVolatile(), Ty: lvalue.getType(), |
2099 | BaseInfo: lvalue.getBaseInfo(), TBAAInfo: lvalue.getTBAAInfo(), isInit, |
2100 | isNontemporal: lvalue.isNontemporal()); |
2101 | } |
2102 | |
2103 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
2104 | bool Volatile, QualType Ty, |
2105 | LValueBaseInfo BaseInfo, |
2106 | TBAAAccessInfo TBAAInfo, |
2107 | bool isInit, bool isNontemporal) { |
2108 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Val: Addr.getBasePointer())) |
2109 | if (GV->isThreadLocal()) |
2110 | Addr = Addr.withPointer(NewPointer: Builder.CreateThreadLocalAddress(Ptr: GV), |
2111 | IsKnownNonNull: NotKnownNonNull); |
2112 | |
2113 | llvm::Type *SrcTy = Value->getType(); |
2114 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2115 | auto *VecTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy); |
2116 | if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
2117 | // Handle vec3 special. |
2118 | if (VecTy && !ClangVecTy->isExtVectorBoolType() && |
2119 | cast<llvm::FixedVectorType>(Val: VecTy)->getNumElements() == 3) { |
2120 | // Our source is a vec3, do a shuffle vector to make it a vec4. |
2121 | Value = Builder.CreateShuffleVector(V: Value, Mask: ArrayRef<int>{0, 1, 2, -1}, |
2122 | Name: "extractVec" ); |
2123 | SrcTy = llvm::FixedVectorType::get(ElementType: VecTy->getElementType(), NumElts: 4); |
2124 | } |
2125 | if (Addr.getElementType() != SrcTy) { |
2126 | Addr = Addr.withElementType(ElemTy: SrcTy); |
2127 | } |
2128 | } |
2129 | } |
2130 | |
2131 | Value = EmitToMemory(Value, Ty); |
2132 | |
2133 | LValue AtomicLValue = |
2134 | LValue::MakeAddr(Addr, type: Ty, Context&: getContext(), BaseInfo, TBAAInfo); |
2135 | if (Ty->isAtomicType() || |
2136 | (!isInit && LValueIsSuitableForInlineAtomic(Src: AtomicLValue))) { |
2137 | EmitAtomicStore(rvalue: RValue::get(V: Value), lvalue: AtomicLValue, isInit); |
2138 | return; |
2139 | } |
2140 | |
2141 | llvm::StoreInst *Store = Builder.CreateStore(Val: Value, Addr, IsVolatile: Volatile); |
2142 | if (isNontemporal) { |
2143 | llvm::MDNode *Node = |
2144 | llvm::MDNode::get(Context&: Store->getContext(), |
2145 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
2146 | Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
2147 | } |
2148 | |
2149 | CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo); |
2150 | } |
2151 | |
2152 | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
2153 | bool isInit) { |
2154 | if (lvalue.getType()->isConstantMatrixType()) { |
2155 | EmitStoreOfMatrixScalar(value, lvalue, isInit, CGF&: *this); |
2156 | return; |
2157 | } |
2158 | |
2159 | EmitStoreOfScalar(Value: value, Addr: lvalue.getAddress(), Volatile: lvalue.isVolatile(), |
2160 | Ty: lvalue.getType(), BaseInfo: lvalue.getBaseInfo(), |
2161 | TBAAInfo: lvalue.getTBAAInfo(), isInit, isNontemporal: lvalue.isNontemporal()); |
2162 | } |
2163 | |
2164 | // Emit a load of a LValue of matrix type. This may require casting the pointer |
2165 | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
2166 | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
2167 | CodeGenFunction &CGF) { |
2168 | assert(LV.getType()->isConstantMatrixType()); |
2169 | Address Addr = MaybeConvertMatrixAddress(Addr: LV.getAddress(), CGF); |
2170 | LV.setAddress(Addr); |
2171 | return RValue::get(V: CGF.EmitLoadOfScalar(lvalue: LV, Loc)); |
2172 | } |
2173 | |
2174 | RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot, |
2175 | SourceLocation Loc) { |
2176 | QualType Ty = LV.getType(); |
2177 | switch (getEvaluationKind(T: Ty)) { |
2178 | case TEK_Scalar: |
2179 | return EmitLoadOfLValue(V: LV, Loc); |
2180 | case TEK_Complex: |
2181 | return RValue::getComplex(C: EmitLoadOfComplex(src: LV, loc: Loc)); |
2182 | case TEK_Aggregate: |
2183 | EmitAggFinalDestCopy(Type: Ty, Dest: Slot, Src: LV, SrcKind: EVK_NonRValue); |
2184 | return Slot.asRValue(); |
2185 | } |
2186 | llvm_unreachable("bad evaluation kind" ); |
2187 | } |
2188 | |
2189 | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
2190 | /// method emits the address of the lvalue, then loads the result as an rvalue, |
2191 | /// returning the rvalue. |
2192 | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
2193 | if (LV.isObjCWeak()) { |
2194 | // load of a __weak object. |
2195 | Address AddrWeakObj = LV.getAddress(); |
2196 | return RValue::get(V: CGM.getObjCRuntime().EmitObjCWeakRead(CGF&: *this, |
2197 | AddrWeakObj)); |
2198 | } |
2199 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
2200 | // In MRC mode, we do a load+autorelease. |
2201 | if (!getLangOpts().ObjCAutoRefCount) { |
2202 | return RValue::get(V: EmitARCLoadWeak(addr: LV.getAddress())); |
2203 | } |
2204 | |
2205 | // In ARC mode, we load retained and then consume the value. |
2206 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: LV.getAddress()); |
2207 | Object = EmitObjCConsumeObject(T: LV.getType(), Ptr: Object); |
2208 | return RValue::get(V: Object); |
2209 | } |
2210 | |
2211 | if (LV.isSimple()) { |
2212 | assert(!LV.getType()->isFunctionType()); |
2213 | |
2214 | if (LV.getType()->isConstantMatrixType()) |
2215 | return EmitLoadOfMatrixLValue(LV, Loc, CGF&: *this); |
2216 | |
2217 | // Everything needs a load. |
2218 | return RValue::get(V: EmitLoadOfScalar(lvalue: LV, Loc)); |
2219 | } |
2220 | |
2221 | if (LV.isVectorElt()) { |
2222 | llvm::LoadInst *Load = Builder.CreateLoad(Addr: LV.getVectorAddress(), |
2223 | IsVolatile: LV.isVolatileQualified()); |
2224 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx: LV.getVectorIdx(), |
2225 | Name: "vecext" )); |
2226 | } |
2227 | |
2228 | // If this is a reference to a subset of the elements of a vector, either |
2229 | // shuffle the input or extract/insert them as appropriate. |
2230 | if (LV.isExtVectorElt()) { |
2231 | return EmitLoadOfExtVectorElementLValue(V: LV); |
2232 | } |
2233 | |
2234 | // Global Register variables always invoke intrinsics |
2235 | if (LV.isGlobalReg()) |
2236 | return EmitLoadOfGlobalRegLValue(LV); |
2237 | |
2238 | if (LV.isMatrixElt()) { |
2239 | llvm::Value *Idx = LV.getMatrixIdx(); |
2240 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2241 | const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); |
2242 | llvm::MatrixBuilder MB(Builder); |
2243 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2244 | } |
2245 | llvm::LoadInst *Load = |
2246 | Builder.CreateLoad(Addr: LV.getMatrixAddress(), IsVolatile: LV.isVolatileQualified()); |
2247 | return RValue::get(V: Builder.CreateExtractElement(Vec: Load, Idx, Name: "matrixext" )); |
2248 | } |
2249 | |
2250 | assert(LV.isBitField() && "Unknown LValue type!" ); |
2251 | return EmitLoadOfBitfieldLValue(LV, Loc); |
2252 | } |
2253 | |
2254 | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
2255 | SourceLocation Loc) { |
2256 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
2257 | |
2258 | // Get the output type. |
2259 | llvm::Type *ResLTy = ConvertType(T: LV.getType()); |
2260 | |
2261 | Address Ptr = LV.getBitFieldAddress(); |
2262 | llvm::Value *Val = |
2263 | Builder.CreateLoad(Addr: Ptr, IsVolatile: LV.isVolatileQualified(), Name: "bf.load" ); |
2264 | |
2265 | bool UseVolatile = LV.isVolatileQualified() && |
2266 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2267 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2268 | const unsigned StorageSize = |
2269 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2270 | if (Info.IsSigned) { |
2271 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
2272 | unsigned HighBits = StorageSize - Offset - Info.Size; |
2273 | if (HighBits) |
2274 | Val = Builder.CreateShl(LHS: Val, RHS: HighBits, Name: "bf.shl" ); |
2275 | if (Offset + HighBits) |
2276 | Val = Builder.CreateAShr(LHS: Val, RHS: Offset + HighBits, Name: "bf.ashr" ); |
2277 | } else { |
2278 | if (Offset) |
2279 | Val = Builder.CreateLShr(LHS: Val, RHS: Offset, Name: "bf.lshr" ); |
2280 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
2281 | Val = Builder.CreateAnd( |
2282 | LHS: Val, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), Name: "bf.clear" ); |
2283 | } |
2284 | Val = Builder.CreateIntCast(V: Val, DestTy: ResLTy, isSigned: Info.IsSigned, Name: "bf.cast" ); |
2285 | EmitScalarRangeCheck(Value: Val, Ty: LV.getType(), Loc); |
2286 | return RValue::get(V: Val); |
2287 | } |
2288 | |
2289 | // If this is a reference to a subset of the elements of a vector, create an |
2290 | // appropriate shufflevector. |
2291 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
2292 | llvm::Value *Vec = Builder.CreateLoad(Addr: LV.getExtVectorAddress(), |
2293 | IsVolatile: LV.isVolatileQualified()); |
2294 | |
2295 | // HLSL allows treating scalars as one-element vectors. Converting the scalar |
2296 | // IR value to a vector here allows the rest of codegen to behave as normal. |
2297 | if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { |
2298 | llvm::Type *DstTy = llvm::FixedVectorType::get(ElementType: Vec->getType(), NumElts: 1); |
2299 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGM.Int64Ty); |
2300 | Vec = Builder.CreateInsertElement(VecTy: DstTy, NewElt: Vec, Idx: Zero, Name: "cast.splat" ); |
2301 | } |
2302 | |
2303 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2304 | |
2305 | // If the result of the expression is a non-vector type, we must be extracting |
2306 | // a single element. Just codegen as an extractelement. |
2307 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
2308 | if (!ExprVT) { |
2309 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2310 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2311 | return RValue::get(V: Builder.CreateExtractElement(Vec, Idx: Elt)); |
2312 | } |
2313 | |
2314 | // Always use shuffle vector to try to retain the original program structure |
2315 | unsigned NumResultElts = ExprVT->getNumElements(); |
2316 | |
2317 | SmallVector<int, 4> Mask; |
2318 | for (unsigned i = 0; i != NumResultElts; ++i) |
2319 | Mask.push_back(Elt: getAccessedFieldNo(Idx: i, Elts)); |
2320 | |
2321 | Vec = Builder.CreateShuffleVector(V: Vec, Mask); |
2322 | return RValue::get(V: Vec); |
2323 | } |
2324 | |
2325 | /// Generates lvalue for partial ext_vector access. |
2326 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2327 | Address VectorAddress = LV.getExtVectorAddress(); |
2328 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2329 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(T: EQT); |
2330 | |
2331 | Address CastToPointerElement = VectorAddress.withElementType(ElemTy: VectorElementTy); |
2332 | |
2333 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2334 | unsigned ix = getAccessedFieldNo(Idx: 0, Elts); |
2335 | |
2336 | Address VectorBasePtrPlusIx = |
2337 | Builder.CreateConstInBoundsGEP(Addr: CastToPointerElement, Index: ix, |
2338 | Name: "vector.elt" ); |
2339 | |
2340 | return VectorBasePtrPlusIx; |
2341 | } |
2342 | |
2343 | /// Load of global gamed gegisters are always calls to intrinsics. |
2344 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2345 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2346 | "Bad type for register variable" ); |
2347 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2348 | Val: cast<llvm::MetadataAsValue>(Val: LV.getGlobalReg())->getMetadata()); |
2349 | |
2350 | // We accept integer and pointer types only |
2351 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: LV.getType()); |
2352 | llvm::Type *Ty = OrigTy; |
2353 | if (OrigTy->isPointerTy()) |
2354 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2355 | llvm::Type *Types[] = { Ty }; |
2356 | |
2357 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: Types); |
2358 | llvm::Value *Call = Builder.CreateCall( |
2359 | Callee: F, Args: llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName)); |
2360 | if (OrigTy->isPointerTy()) |
2361 | Call = Builder.CreateIntToPtr(V: Call, DestTy: OrigTy); |
2362 | return RValue::get(V: Call); |
2363 | } |
2364 | |
2365 | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2366 | /// lvalue, where both are guaranteed to the have the same type, and that type |
2367 | /// is 'Ty'. |
2368 | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2369 | bool isInit) { |
2370 | if (!Dst.isSimple()) { |
2371 | if (Dst.isVectorElt()) { |
2372 | // Read/modify/write the vector, inserting the new element. |
2373 | llvm::Value *Vec = Builder.CreateLoad(Addr: Dst.getVectorAddress(), |
2374 | IsVolatile: Dst.isVolatileQualified()); |
2375 | auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Val: Vec->getType()); |
2376 | if (IRStoreTy) { |
2377 | auto *IRVecTy = llvm::FixedVectorType::get( |
2378 | ElementType: Builder.getInt1Ty(), NumElts: IRStoreTy->getPrimitiveSizeInBits()); |
2379 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRVecTy); |
2380 | // iN --> <N x i1>. |
2381 | } |
2382 | Vec = Builder.CreateInsertElement(Vec, NewElt: Src.getScalarVal(), |
2383 | Idx: Dst.getVectorIdx(), Name: "vecins" ); |
2384 | if (IRStoreTy) { |
2385 | // <N x i1> --> <iN>. |
2386 | Vec = Builder.CreateBitCast(V: Vec, DestTy: IRStoreTy); |
2387 | } |
2388 | Builder.CreateStore(Val: Vec, Addr: Dst.getVectorAddress(), |
2389 | IsVolatile: Dst.isVolatileQualified()); |
2390 | return; |
2391 | } |
2392 | |
2393 | // If this is an update of extended vector elements, insert them as |
2394 | // appropriate. |
2395 | if (Dst.isExtVectorElt()) |
2396 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2397 | |
2398 | if (Dst.isGlobalReg()) |
2399 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2400 | |
2401 | if (Dst.isMatrixElt()) { |
2402 | llvm::Value *Idx = Dst.getMatrixIdx(); |
2403 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2404 | const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); |
2405 | llvm::MatrixBuilder MB(Builder); |
2406 | MB.CreateIndexAssumption(Idx, NumElements: MatTy->getNumElementsFlattened()); |
2407 | } |
2408 | llvm::Instruction *Load = Builder.CreateLoad(Addr: Dst.getMatrixAddress()); |
2409 | llvm::Value *Vec = |
2410 | Builder.CreateInsertElement(Vec: Load, NewElt: Src.getScalarVal(), Idx, Name: "matins" ); |
2411 | Builder.CreateStore(Val: Vec, Addr: Dst.getMatrixAddress(), |
2412 | IsVolatile: Dst.isVolatileQualified()); |
2413 | return; |
2414 | } |
2415 | |
2416 | assert(Dst.isBitField() && "Unknown LValue type" ); |
2417 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2418 | } |
2419 | |
2420 | // There's special magic for assigning into an ARC-qualified l-value. |
2421 | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2422 | switch (Lifetime) { |
2423 | case Qualifiers::OCL_None: |
2424 | llvm_unreachable("present but none" ); |
2425 | |
2426 | case Qualifiers::OCL_ExplicitNone: |
2427 | // nothing special |
2428 | break; |
2429 | |
2430 | case Qualifiers::OCL_Strong: |
2431 | if (isInit) { |
2432 | Src = RValue::get(V: EmitARCRetain(type: Dst.getType(), value: Src.getScalarVal())); |
2433 | break; |
2434 | } |
2435 | EmitARCStoreStrong(lvalue: Dst, value: Src.getScalarVal(), /*ignore*/ resultIgnored: true); |
2436 | return; |
2437 | |
2438 | case Qualifiers::OCL_Weak: |
2439 | if (isInit) |
2440 | // Initialize and then skip the primitive store. |
2441 | EmitARCInitWeak(addr: Dst.getAddress(), value: Src.getScalarVal()); |
2442 | else |
2443 | EmitARCStoreWeak(addr: Dst.getAddress(), value: Src.getScalarVal(), |
2444 | /*ignore*/ ignored: true); |
2445 | return; |
2446 | |
2447 | case Qualifiers::OCL_Autoreleasing: |
2448 | Src = RValue::get(V: EmitObjCExtendObjectLifetime(T: Dst.getType(), |
2449 | Ptr: Src.getScalarVal())); |
2450 | // fall into the normal path |
2451 | break; |
2452 | } |
2453 | } |
2454 | |
2455 | if (Dst.isObjCWeak() && !Dst.isNonGC()) { |
2456 | // load of a __weak object. |
2457 | Address LvalueDst = Dst.getAddress(); |
2458 | llvm::Value *src = Src.getScalarVal(); |
2459 | CGM.getObjCRuntime().EmitObjCWeakAssign(CGF&: *this, src, dest: LvalueDst); |
2460 | return; |
2461 | } |
2462 | |
2463 | if (Dst.isObjCStrong() && !Dst.isNonGC()) { |
2464 | // load of a __strong object. |
2465 | Address LvalueDst = Dst.getAddress(); |
2466 | llvm::Value *src = Src.getScalarVal(); |
2467 | if (Dst.isObjCIvar()) { |
2468 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL" ); |
2469 | llvm::Type *ResultType = IntPtrTy; |
2470 | Address dst = EmitPointerWithAlignment(E: Dst.getBaseIvarExp()); |
2471 | llvm::Value *RHS = dst.emitRawPointer(CGF&: *this); |
2472 | RHS = Builder.CreatePtrToInt(V: RHS, DestTy: ResultType, Name: "sub.ptr.rhs.cast" ); |
2473 | llvm::Value *LHS = Builder.CreatePtrToInt(V: LvalueDst.emitRawPointer(CGF&: *this), |
2474 | DestTy: ResultType, Name: "sub.ptr.lhs.cast" ); |
2475 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, Name: "ivar.offset" ); |
2476 | CGM.getObjCRuntime().EmitObjCIvarAssign(CGF&: *this, src, dest: dst, ivarOffset: BytesBetween); |
2477 | } else if (Dst.isGlobalObjCRef()) { |
2478 | CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF&: *this, src, dest: LvalueDst, |
2479 | threadlocal: Dst.isThreadLocalRef()); |
2480 | } |
2481 | else |
2482 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(CGF&: *this, src, dest: LvalueDst); |
2483 | return; |
2484 | } |
2485 | |
2486 | assert(Src.isScalar() && "Can't emit an agg store with this method" ); |
2487 | EmitStoreOfScalar(value: Src.getScalarVal(), lvalue: Dst, isInit); |
2488 | } |
2489 | |
2490 | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2491 | llvm::Value **Result) { |
2492 | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2493 | llvm::Type *ResLTy = convertTypeForLoadStore(ASTTy: Dst.getType()); |
2494 | Address Ptr = Dst.getBitFieldAddress(); |
2495 | |
2496 | // Get the source value, truncated to the width of the bit-field. |
2497 | llvm::Value *SrcVal = Src.getScalarVal(); |
2498 | |
2499 | // Cast the source to the storage type and shift it into place. |
2500 | SrcVal = Builder.CreateIntCast(V: SrcVal, DestTy: Ptr.getElementType(), |
2501 | /*isSigned=*/false); |
2502 | llvm::Value *MaskedVal = SrcVal; |
2503 | |
2504 | const bool UseVolatile = |
2505 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && |
2506 | Info.VolatileStorageSize != 0 && isAAPCS(TargetInfo: CGM.getTarget()); |
2507 | const unsigned StorageSize = |
2508 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2509 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2510 | // See if there are other bits in the bitfield's storage we'll need to load |
2511 | // and mask together with source before storing. |
2512 | if (StorageSize != Info.Size) { |
2513 | assert(StorageSize > Info.Size && "Invalid bitfield size." ); |
2514 | llvm::Value *Val = |
2515 | Builder.CreateLoad(Addr: Ptr, IsVolatile: Dst.isVolatileQualified(), Name: "bf.load" ); |
2516 | |
2517 | // Mask the source value as needed. |
2518 | if (!hasBooleanRepresentation(Ty: Dst.getType())) |
2519 | SrcVal = Builder.CreateAnd( |
2520 | LHS: SrcVal, RHS: llvm::APInt::getLowBitsSet(numBits: StorageSize, loBitsSet: Info.Size), |
2521 | Name: "bf.value" ); |
2522 | MaskedVal = SrcVal; |
2523 | if (Offset) |
2524 | SrcVal = Builder.CreateShl(LHS: SrcVal, RHS: Offset, Name: "bf.shl" ); |
2525 | |
2526 | // Mask out the original value. |
2527 | Val = Builder.CreateAnd( |
2528 | LHS: Val, RHS: ~llvm::APInt::getBitsSet(numBits: StorageSize, loBit: Offset, hiBit: Offset + Info.Size), |
2529 | Name: "bf.clear" ); |
2530 | |
2531 | // Or together the unchanged values and the source value. |
2532 | SrcVal = Builder.CreateOr(LHS: Val, RHS: SrcVal, Name: "bf.set" ); |
2533 | } else { |
2534 | assert(Offset == 0); |
2535 | // According to the AACPS: |
2536 | // When a volatile bit-field is written, and its container does not overlap |
2537 | // with any non-bit-field member, its container must be read exactly once |
2538 | // and written exactly once using the access width appropriate to the type |
2539 | // of the container. The two accesses are not atomic. |
2540 | if (Dst.isVolatileQualified() && isAAPCS(TargetInfo: CGM.getTarget()) && |
2541 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2542 | Builder.CreateLoad(Addr: Ptr, IsVolatile: true, Name: "bf.load" ); |
2543 | } |
2544 | |
2545 | // Write the new value back out. |
2546 | Builder.CreateStore(Val: SrcVal, Addr: Ptr, IsVolatile: Dst.isVolatileQualified()); |
2547 | |
2548 | // Return the new value of the bit-field, if requested. |
2549 | if (Result) { |
2550 | llvm::Value *ResultVal = MaskedVal; |
2551 | |
2552 | // Sign extend the value if needed. |
2553 | if (Info.IsSigned) { |
2554 | assert(Info.Size <= StorageSize); |
2555 | unsigned HighBits = StorageSize - Info.Size; |
2556 | if (HighBits) { |
2557 | ResultVal = Builder.CreateShl(LHS: ResultVal, RHS: HighBits, Name: "bf.result.shl" ); |
2558 | ResultVal = Builder.CreateAShr(LHS: ResultVal, RHS: HighBits, Name: "bf.result.ashr" ); |
2559 | } |
2560 | } |
2561 | |
2562 | ResultVal = Builder.CreateIntCast(V: ResultVal, DestTy: ResLTy, isSigned: Info.IsSigned, |
2563 | Name: "bf.result.cast" ); |
2564 | *Result = EmitFromMemory(Value: ResultVal, Ty: Dst.getType()); |
2565 | } |
2566 | } |
2567 | |
2568 | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2569 | LValue Dst) { |
2570 | // HLSL allows storing to scalar values through ExtVector component LValues. |
2571 | // To support this we need to handle the case where the destination address is |
2572 | // a scalar. |
2573 | Address DstAddr = Dst.getExtVectorAddress(); |
2574 | if (!DstAddr.getElementType()->isVectorTy()) { |
2575 | assert(!Dst.getType()->isVectorType() && |
2576 | "this should only occur for non-vector l-values" ); |
2577 | Builder.CreateStore(Val: Src.getScalarVal(), Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2578 | return; |
2579 | } |
2580 | |
2581 | // This access turns into a read/modify/write of the vector. Load the input |
2582 | // value now. |
2583 | llvm::Value *Vec = Builder.CreateLoad(Addr: DstAddr, IsVolatile: Dst.isVolatileQualified()); |
2584 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2585 | |
2586 | llvm::Value *SrcVal = Src.getScalarVal(); |
2587 | |
2588 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2589 | unsigned NumSrcElts = VTy->getNumElements(); |
2590 | unsigned NumDstElts = |
2591 | cast<llvm::FixedVectorType>(Val: Vec->getType())->getNumElements(); |
2592 | if (NumDstElts == NumSrcElts) { |
2593 | // Use shuffle vector is the src and destination are the same number of |
2594 | // elements and restore the vector mask since it is on the side it will be |
2595 | // stored. |
2596 | SmallVector<int, 4> Mask(NumDstElts); |
2597 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2598 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i; |
2599 | |
2600 | Vec = Builder.CreateShuffleVector(V: SrcVal, Mask); |
2601 | } else if (NumDstElts > NumSrcElts) { |
2602 | // Extended the source vector to the same length and then shuffle it |
2603 | // into the destination. |
2604 | // FIXME: since we're shuffling with undef, can we just use the indices |
2605 | // into that? This could be simpler. |
2606 | SmallVector<int, 4> ExtMask; |
2607 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2608 | ExtMask.push_back(Elt: i); |
2609 | ExtMask.resize(N: NumDstElts, NV: -1); |
2610 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(V: SrcVal, Mask: ExtMask); |
2611 | // build identity |
2612 | SmallVector<int, 4> Mask; |
2613 | for (unsigned i = 0; i != NumDstElts; ++i) |
2614 | Mask.push_back(Elt: i); |
2615 | |
2616 | // When the vector size is odd and .odd or .hi is used, the last element |
2617 | // of the Elts constant array will be one past the size of the vector. |
2618 | // Ignore the last element here, if it is greater than the mask size. |
2619 | if (getAccessedFieldNo(Idx: NumSrcElts - 1, Elts) == Mask.size()) |
2620 | NumSrcElts--; |
2621 | |
2622 | // modify when what gets shuffled in |
2623 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2624 | Mask[getAccessedFieldNo(Idx: i, Elts)] = i + NumDstElts; |
2625 | Vec = Builder.CreateShuffleVector(V1: Vec, V2: ExtSrcVal, Mask); |
2626 | } else { |
2627 | // We should never shorten the vector |
2628 | llvm_unreachable("unexpected shorten vector length" ); |
2629 | } |
2630 | } else { |
2631 | // If the Src is a scalar (not a vector), and the target is a vector it must |
2632 | // be updating one element. |
2633 | unsigned InIdx = getAccessedFieldNo(Idx: 0, Elts); |
2634 | llvm::Value *Elt = llvm::ConstantInt::get(Ty: SizeTy, V: InIdx); |
2635 | Vec = Builder.CreateInsertElement(Vec, NewElt: SrcVal, Idx: Elt); |
2636 | } |
2637 | |
2638 | Builder.CreateStore(Val: Vec, Addr: Dst.getExtVectorAddress(), |
2639 | IsVolatile: Dst.isVolatileQualified()); |
2640 | } |
2641 | |
2642 | /// Store of global named registers are always calls to intrinsics. |
2643 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2644 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2645 | "Bad type for register variable" ); |
2646 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2647 | Val: cast<llvm::MetadataAsValue>(Val: Dst.getGlobalReg())->getMetadata()); |
2648 | assert(RegName && "Register LValue is not metadata" ); |
2649 | |
2650 | // We accept integer and pointer types only |
2651 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(T: Dst.getType()); |
2652 | llvm::Type *Ty = OrigTy; |
2653 | if (OrigTy->isPointerTy()) |
2654 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2655 | llvm::Type *Types[] = { Ty }; |
2656 | |
2657 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types); |
2658 | llvm::Value *Value = Src.getScalarVal(); |
2659 | if (OrigTy->isPointerTy()) |
2660 | Value = Builder.CreatePtrToInt(V: Value, DestTy: Ty); |
2661 | Builder.CreateCall( |
2662 | Callee: F, Args: {llvm::MetadataAsValue::get(Context&: Ty->getContext(), MD: RegName), Value}); |
2663 | } |
2664 | |
2665 | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2666 | // generating write-barries API. It is currently a global, ivar, |
2667 | // or neither. |
2668 | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2669 | LValue &LV, |
2670 | bool IsMemberAccess=false) { |
2671 | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2672 | return; |
2673 | |
2674 | if (isa<ObjCIvarRefExpr>(Val: E)) { |
2675 | QualType ExpTy = E->getType(); |
2676 | if (IsMemberAccess && ExpTy->isPointerType()) { |
2677 | // If ivar is a structure pointer, assigning to field of |
2678 | // this struct follows gcc's behavior and makes it a non-ivar |
2679 | // writer-barrier conservatively. |
2680 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2681 | if (ExpTy->isRecordType()) { |
2682 | LV.setObjCIvar(false); |
2683 | return; |
2684 | } |
2685 | } |
2686 | LV.setObjCIvar(true); |
2687 | auto *Exp = cast<ObjCIvarRefExpr>(Val: const_cast<Expr *>(E)); |
2688 | LV.setBaseIvarExp(Exp->getBase()); |
2689 | LV.setObjCArray(E->getType()->isArrayType()); |
2690 | return; |
2691 | } |
2692 | |
2693 | if (const auto *Exp = dyn_cast<DeclRefExpr>(Val: E)) { |
2694 | if (const auto *VD = dyn_cast<VarDecl>(Val: Exp->getDecl())) { |
2695 | if (VD->hasGlobalStorage()) { |
2696 | LV.setGlobalObjCRef(true); |
2697 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2698 | } |
2699 | } |
2700 | LV.setObjCArray(E->getType()->isArrayType()); |
2701 | return; |
2702 | } |
2703 | |
2704 | if (const auto *Exp = dyn_cast<UnaryOperator>(Val: E)) { |
2705 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2706 | return; |
2707 | } |
2708 | |
2709 | if (const auto *Exp = dyn_cast<ParenExpr>(Val: E)) { |
2710 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2711 | if (LV.isObjCIvar()) { |
2712 | // If cast is to a structure pointer, follow gcc's behavior and make it |
2713 | // a non-ivar write-barrier. |
2714 | QualType ExpTy = E->getType(); |
2715 | if (ExpTy->isPointerType()) |
2716 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2717 | if (ExpTy->isRecordType()) |
2718 | LV.setObjCIvar(false); |
2719 | } |
2720 | return; |
2721 | } |
2722 | |
2723 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(Val: E)) { |
2724 | setObjCGCLValueClass(Ctx, E: Exp->getResultExpr(), LV); |
2725 | return; |
2726 | } |
2727 | |
2728 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(Val: E)) { |
2729 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2730 | return; |
2731 | } |
2732 | |
2733 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(Val: E)) { |
2734 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2735 | return; |
2736 | } |
2737 | |
2738 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(Val: E)) { |
2739 | setObjCGCLValueClass(Ctx, E: Exp->getSubExpr(), LV, IsMemberAccess); |
2740 | return; |
2741 | } |
2742 | |
2743 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
2744 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV); |
2745 | if (LV.isObjCIvar() && !LV.isObjCArray()) |
2746 | // Using array syntax to assigning to what an ivar points to is not |
2747 | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2748 | LV.setObjCIvar(false); |
2749 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) |
2750 | // Using array syntax to assigning to what global points to is not |
2751 | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2752 | LV.setGlobalObjCRef(false); |
2753 | return; |
2754 | } |
2755 | |
2756 | if (const auto *Exp = dyn_cast<MemberExpr>(Val: E)) { |
2757 | setObjCGCLValueClass(Ctx, E: Exp->getBase(), LV, IsMemberAccess: true); |
2758 | // We don't know if member is an 'ivar', but this flag is looked at |
2759 | // only in the context of LV.isObjCIvar(). |
2760 | LV.setObjCArray(E->getType()->isArrayType()); |
2761 | return; |
2762 | } |
2763 | } |
2764 | |
2765 | static LValue EmitThreadPrivateVarDeclLValue( |
2766 | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2767 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2768 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2769 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2770 | CGF, VD, VDAddr: Addr, Loc); |
2771 | else |
2772 | Addr = |
2773 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, VDAddr: Addr, Loc); |
2774 | |
2775 | Addr = Addr.withElementType(ElemTy: RealVarTy); |
2776 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2777 | } |
2778 | |
2779 | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2780 | const VarDecl *VD, QualType T) { |
2781 | std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2782 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2783 | // Return an invalid address if variable is MT_To (or MT_Enter starting with |
2784 | // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link |
2785 | // and MT_To (or MT_Enter) with unified memory, return a valid address. |
2786 | if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2787 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2788 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2789 | return Address::invalid(); |
2790 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2791 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2792 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2793 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2794 | "Expected link clause OR to clause with unified memory enabled." ); |
2795 | QualType PtrTy = CGF.getContext().getPointerType(T: VD->getType()); |
2796 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2797 | return CGF.EmitLoadOfPointer(Ptr: Addr, PtrTy: PtrTy->castAs<PointerType>()); |
2798 | } |
2799 | |
2800 | Address |
2801 | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2802 | LValueBaseInfo *PointeeBaseInfo, |
2803 | TBAAAccessInfo *PointeeTBAAInfo) { |
2804 | llvm::LoadInst *Load = |
2805 | Builder.CreateLoad(Addr: RefLVal.getAddress(), IsVolatile: RefLVal.isVolatile()); |
2806 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo: RefLVal.getTBAAInfo()); |
2807 | return makeNaturalAddressForPointer(Ptr: Load, T: RefLVal.getType()->getPointeeType(), |
2808 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2809 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2810 | } |
2811 | |
2812 | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2813 | LValueBaseInfo PointeeBaseInfo; |
2814 | TBAAAccessInfo PointeeTBAAInfo; |
2815 | Address PointeeAddr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &PointeeBaseInfo, |
2816 | PointeeTBAAInfo: &PointeeTBAAInfo); |
2817 | return MakeAddrLValue(Addr: PointeeAddr, T: RefLVal.getType()->getPointeeType(), |
2818 | BaseInfo: PointeeBaseInfo, TBAAInfo: PointeeTBAAInfo); |
2819 | } |
2820 | |
2821 | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2822 | const PointerType *PtrTy, |
2823 | LValueBaseInfo *BaseInfo, |
2824 | TBAAAccessInfo *TBAAInfo) { |
2825 | llvm::Value *Addr = Builder.CreateLoad(Addr: Ptr); |
2826 | return makeNaturalAddressForPointer(Ptr: Addr, T: PtrTy->getPointeeType(), |
2827 | Alignment: CharUnits(), /*ForPointeeType=*/true, |
2828 | BaseInfo, TBAAInfo); |
2829 | } |
2830 | |
2831 | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2832 | const PointerType *PtrTy) { |
2833 | LValueBaseInfo BaseInfo; |
2834 | TBAAAccessInfo TBAAInfo; |
2835 | Address Addr = EmitLoadOfPointer(Ptr: PtrAddr, PtrTy, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
2836 | return MakeAddrLValue(Addr, T: PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2837 | } |
2838 | |
2839 | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2840 | const Expr *E, const VarDecl *VD) { |
2841 | QualType T = E->getType(); |
2842 | |
2843 | // If it's thread_local, emit a call to its wrapper function instead. |
2844 | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2845 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2846 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, LValType: T); |
2847 | // Check if the variable is marked as declare target with link clause in |
2848 | // device codegen. |
2849 | if (CGF.getLangOpts().OpenMPIsTargetDevice) { |
2850 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2851 | if (Addr.isValid()) |
2852 | return CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2853 | } |
2854 | |
2855 | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(D: VD); |
2856 | |
2857 | if (VD->getTLSKind() != VarDecl::TLS_None) |
2858 | V = CGF.Builder.CreateThreadLocalAddress(Ptr: V); |
2859 | |
2860 | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(T: VD->getType()); |
2861 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: VD); |
2862 | Address Addr(V, RealVarTy, Alignment); |
2863 | // Emit reference to the private copy of the variable if it is an OpenMP |
2864 | // threadprivate variable. |
2865 | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && |
2866 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2867 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
2868 | Loc: E->getExprLoc()); |
2869 | } |
2870 | LValue LV = VD->getType()->isReferenceType() ? |
2871 | CGF.EmitLoadOfReferenceLValue(RefAddr: Addr, RefTy: VD->getType(), |
2872 | Source: AlignmentSource::Decl) : |
2873 | CGF.MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
2874 | setObjCGCLValueClass(Ctx: CGF.getContext(), E, LV); |
2875 | return LV; |
2876 | } |
2877 | |
2878 | llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD, |
2879 | llvm::Type *Ty) { |
2880 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2881 | if (FD->hasAttr<WeakRefAttr>()) { |
2882 | ConstantAddress aliasee = GetWeakRefReference(VD: FD); |
2883 | return aliasee.getPointer(); |
2884 | } |
2885 | |
2886 | llvm::Constant *V = GetAddrOfFunction(GD, Ty); |
2887 | return V; |
2888 | } |
2889 | |
2890 | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
2891 | GlobalDecl GD) { |
2892 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
2893 | llvm::Constant *V = CGF.CGM.getFunctionPointer(GD); |
2894 | CharUnits Alignment = CGF.getContext().getDeclAlign(D: FD); |
2895 | return CGF.MakeAddrLValue(V, T: E->getType(), Alignment, |
2896 | Source: AlignmentSource::Decl); |
2897 | } |
2898 | |
2899 | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
2900 | llvm::Value *ThisValue) { |
2901 | |
2902 | return CGF.EmitLValueForLambdaField(Field: FD, ThisValue); |
2903 | } |
2904 | |
2905 | /// Named Registers are named metadata pointing to the register name |
2906 | /// which will be read from/written to as an argument to the intrinsic |
2907 | /// @llvm.read/write_register. |
2908 | /// So far, only the name is being passed down, but other options such as |
2909 | /// register type, allocation type or even optimization options could be |
2910 | /// passed down via the metadata node. |
2911 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
2912 | SmallString<64> Name("llvm.named.register." ); |
2913 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
2914 | assert(Asm->getLabel().size() < 64-Name.size() && |
2915 | "Register name too big" ); |
2916 | Name.append(RHS: Asm->getLabel()); |
2917 | llvm::NamedMDNode *M = |
2918 | CGM.getModule().getOrInsertNamedMetadata(Name); |
2919 | if (M->getNumOperands() == 0) { |
2920 | llvm::MDString *Str = llvm::MDString::get(Context&: CGM.getLLVMContext(), |
2921 | Str: Asm->getLabel()); |
2922 | llvm::Metadata *Ops[] = {Str}; |
2923 | M->addOperand(M: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Ops)); |
2924 | } |
2925 | |
2926 | CharUnits Alignment = CGM.getContext().getDeclAlign(D: VD); |
2927 | |
2928 | llvm::Value *Ptr = |
2929 | llvm::MetadataAsValue::get(Context&: CGM.getLLVMContext(), MD: M->getOperand(i: 0)); |
2930 | return LValue::MakeGlobalReg(V: Ptr, alignment: Alignment, type: VD->getType()); |
2931 | } |
2932 | |
2933 | /// Determine whether we can emit a reference to \p VD from the current |
2934 | /// context, despite not necessarily having seen an odr-use of the variable in |
2935 | /// this context. |
2936 | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
2937 | const DeclRefExpr *E, |
2938 | const VarDecl *VD) { |
2939 | // For a variable declared in an enclosing scope, do not emit a spurious |
2940 | // reference even if we have a capture, as that will emit an unwarranted |
2941 | // reference to our capture state, and will likely generate worse code than |
2942 | // emitting a local copy. |
2943 | if (E->refersToEnclosingVariableOrCapture()) |
2944 | return false; |
2945 | |
2946 | // For a local declaration declared in this function, we can always reference |
2947 | // it even if we don't have an odr-use. |
2948 | if (VD->hasLocalStorage()) { |
2949 | return VD->getDeclContext() == |
2950 | dyn_cast_or_null<DeclContext>(Val: CGF.CurCodeDecl); |
2951 | } |
2952 | |
2953 | // For a global declaration, we can emit a reference to it if we know |
2954 | // for sure that we are able to emit a definition of it. |
2955 | VD = VD->getDefinition(C&: CGF.getContext()); |
2956 | if (!VD) |
2957 | return false; |
2958 | |
2959 | // Don't emit a spurious reference if it might be to a variable that only |
2960 | // exists on a different device / target. |
2961 | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
2962 | // cross-target reference. |
2963 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || |
2964 | CGF.getLangOpts().OpenCL) { |
2965 | return false; |
2966 | } |
2967 | |
2968 | // We can emit a spurious reference only if the linkage implies that we'll |
2969 | // be emitting a non-interposable symbol that will be retained until link |
2970 | // time. |
2971 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { |
2972 | case llvm::GlobalValue::ExternalLinkage: |
2973 | case llvm::GlobalValue::LinkOnceODRLinkage: |
2974 | case llvm::GlobalValue::WeakODRLinkage: |
2975 | case llvm::GlobalValue::InternalLinkage: |
2976 | case llvm::GlobalValue::PrivateLinkage: |
2977 | return true; |
2978 | default: |
2979 | return false; |
2980 | } |
2981 | } |
2982 | |
2983 | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
2984 | const NamedDecl *ND = E->getDecl(); |
2985 | QualType T = E->getType(); |
2986 | |
2987 | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
2988 | "should not emit an unevaluated operand" ); |
2989 | |
2990 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
2991 | // Global Named registers access via intrinsics only |
2992 | if (VD->getStorageClass() == SC_Register && |
2993 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) |
2994 | return EmitGlobalNamedRegister(VD, CGM); |
2995 | |
2996 | // If this DeclRefExpr does not constitute an odr-use of the variable, |
2997 | // we're not permitted to emit a reference to it in general, and it might |
2998 | // not be captured if capture would be necessary for a use. Emit the |
2999 | // constant value directly instead. |
3000 | if (E->isNonOdrUse() == NOUR_Constant && |
3001 | (VD->getType()->isReferenceType() || |
3002 | !canEmitSpuriousReferenceToVariable(CGF&: *this, E, VD))) { |
3003 | VD->getAnyInitializer(D&: VD); |
3004 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
3005 | loc: E->getLocation(), value: *VD->evaluateValue(), T: VD->getType()); |
3006 | assert(Val && "failed to emit constant expression" ); |
3007 | |
3008 | Address Addr = Address::invalid(); |
3009 | if (!VD->getType()->isReferenceType()) { |
3010 | // Spill the constant value to a global. |
3011 | Addr = CGM.createUnnamedGlobalFrom(D: *VD, Constant: Val, |
3012 | Align: getContext().getDeclAlign(D: VD)); |
3013 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(T: VD->getType()); |
3014 | auto *PTy = llvm::PointerType::get( |
3015 | ElementType: VarTy, AddressSpace: getTypes().getTargetAddressSpace(T: VD->getType())); |
3016 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty: PTy, ElementTy: VarTy); |
3017 | } else { |
3018 | // Should we be using the alignment of the constant pointer we emitted? |
3019 | CharUnits Alignment = |
3020 | CGM.getNaturalTypeAlignment(T: E->getType(), |
3021 | /* BaseInfo= */ nullptr, |
3022 | /* TBAAInfo= */ nullptr, |
3023 | /* forPointeeType= */ true); |
3024 | Addr = makeNaturalAddressForPointer(Ptr: Val, T, Alignment); |
3025 | } |
3026 | return MakeAddrLValue(Addr, T, Source: AlignmentSource::Decl); |
3027 | } |
3028 | |
3029 | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
3030 | |
3031 | // Check for captured variables. |
3032 | if (E->refersToEnclosingVariableOrCapture()) { |
3033 | VD = VD->getCanonicalDecl(); |
3034 | if (auto *FD = LambdaCaptureFields.lookup(Val: VD)) |
3035 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3036 | if (CapturedStmtInfo) { |
3037 | auto I = LocalDeclMap.find(Val: VD); |
3038 | if (I != LocalDeclMap.end()) { |
3039 | LValue CapLVal; |
3040 | if (VD->getType()->isReferenceType()) |
3041 | CapLVal = EmitLoadOfReferenceLValue(RefAddr: I->second, RefTy: VD->getType(), |
3042 | Source: AlignmentSource::Decl); |
3043 | else |
3044 | CapLVal = MakeAddrLValue(Addr: I->second, T); |
3045 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3046 | // in simd context. |
3047 | if (getLangOpts().OpenMP && |
3048 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3049 | CapLVal.setNontemporal(/*Value=*/true); |
3050 | return CapLVal; |
3051 | } |
3052 | LValue CapLVal = |
3053 | EmitCapturedFieldLValue(CGF&: *this, FD: CapturedStmtInfo->lookup(VD), |
3054 | ThisValue: CapturedStmtInfo->getContextValue()); |
3055 | Address LValueAddress = CapLVal.getAddress(); |
3056 | CapLVal = MakeAddrLValue(Addr: Address(LValueAddress.emitRawPointer(CGF&: *this), |
3057 | LValueAddress.getElementType(), |
3058 | getContext().getDeclAlign(D: VD)), |
3059 | T: CapLVal.getType(), |
3060 | BaseInfo: LValueBaseInfo(AlignmentSource::Decl), |
3061 | TBAAInfo: CapLVal.getTBAAInfo()); |
3062 | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3063 | // in simd context. |
3064 | if (getLangOpts().OpenMP && |
3065 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3066 | CapLVal.setNontemporal(/*Value=*/true); |
3067 | return CapLVal; |
3068 | } |
3069 | |
3070 | assert(isa<BlockDecl>(CurCodeDecl)); |
3071 | Address addr = GetAddrOfBlockDecl(var: VD); |
3072 | return MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3073 | } |
3074 | } |
3075 | |
3076 | // FIXME: We should be able to assert this for FunctionDecls as well! |
3077 | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
3078 | // those with a valid source location. |
3079 | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
3080 | !E->getLocation().isValid()) && |
3081 | "Should not use decl without marking it used!" ); |
3082 | |
3083 | if (ND->hasAttr<WeakRefAttr>()) { |
3084 | const auto *VD = cast<ValueDecl>(Val: ND); |
3085 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); |
3086 | return MakeAddrLValue(Addr: Aliasee, T, Source: AlignmentSource::Decl); |
3087 | } |
3088 | |
3089 | if (const auto *VD = dyn_cast<VarDecl>(Val: ND)) { |
3090 | // Check if this is a global variable. |
3091 | if (VD->hasLinkage() || VD->isStaticDataMember()) |
3092 | return EmitGlobalVarDeclLValue(CGF&: *this, E, VD); |
3093 | |
3094 | Address addr = Address::invalid(); |
3095 | |
3096 | // The variable should generally be present in the local decl map. |
3097 | auto iter = LocalDeclMap.find(Val: VD); |
3098 | if (iter != LocalDeclMap.end()) { |
3099 | addr = iter->second; |
3100 | |
3101 | // Otherwise, it might be static local we haven't emitted yet for |
3102 | // some reason; most likely, because it's in an outer function. |
3103 | } else if (VD->isStaticLocal()) { |
3104 | llvm::Constant *var = CGM.getOrCreateStaticVarDecl( |
3105 | D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD)); |
3106 | addr = Address( |
3107 | var, ConvertTypeForMem(T: VD->getType()), getContext().getDeclAlign(D: VD)); |
3108 | |
3109 | // No other cases for now. |
3110 | } else { |
3111 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?" ); |
3112 | } |
3113 | |
3114 | // Handle threadlocal function locals. |
3115 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3116 | addr = addr.withPointer( |
3117 | NewPointer: Builder.CreateThreadLocalAddress(Ptr: addr.getBasePointer()), |
3118 | IsKnownNonNull: NotKnownNonNull); |
3119 | |
3120 | // Check for OpenMP threadprivate variables. |
3121 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
3122 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3123 | return EmitThreadPrivateVarDeclLValue( |
3124 | CGF&: *this, VD, T, Addr: addr, RealVarTy: getTypes().ConvertTypeForMem(T: VD->getType()), |
3125 | Loc: E->getExprLoc()); |
3126 | } |
3127 | |
3128 | // Drill into block byref variables. |
3129 | bool isBlockByref = VD->isEscapingByref(); |
3130 | if (isBlockByref) { |
3131 | addr = emitBlockByrefAddress(baseAddr: addr, V: VD); |
3132 | } |
3133 | |
3134 | // Drill into reference types. |
3135 | LValue LV = VD->getType()->isReferenceType() ? |
3136 | EmitLoadOfReferenceLValue(RefAddr: addr, RefTy: VD->getType(), Source: AlignmentSource::Decl) : |
3137 | MakeAddrLValue(Addr: addr, T, Source: AlignmentSource::Decl); |
3138 | |
3139 | bool isLocalStorage = VD->hasLocalStorage(); |
3140 | |
3141 | bool NonGCable = isLocalStorage && |
3142 | !VD->getType()->isReferenceType() && |
3143 | !isBlockByref; |
3144 | if (NonGCable) { |
3145 | LV.getQuals().removeObjCGCAttr(); |
3146 | LV.setNonGC(true); |
3147 | } |
3148 | |
3149 | bool isImpreciseLifetime = |
3150 | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); |
3151 | if (isImpreciseLifetime) |
3152 | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
3153 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
3154 | return LV; |
3155 | } |
3156 | |
3157 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
3158 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
3159 | |
3160 | // FIXME: While we're emitting a binding from an enclosing scope, all other |
3161 | // DeclRefExprs we see should be implicitly treated as if they also refer to |
3162 | // an enclosing scope. |
3163 | if (const auto *BD = dyn_cast<BindingDecl>(Val: ND)) { |
3164 | if (E->refersToEnclosingVariableOrCapture()) { |
3165 | auto *FD = LambdaCaptureFields.lookup(Val: BD); |
3166 | return EmitCapturedFieldLValue(CGF&: *this, FD, ThisValue: CXXABIThisValue); |
3167 | } |
3168 | return EmitLValue(E: BD->getBinding()); |
3169 | } |
3170 | |
3171 | // We can form DeclRefExprs naming GUID declarations when reconstituting |
3172 | // non-type template parameters into expressions. |
3173 | if (const auto *GD = dyn_cast<MSGuidDecl>(Val: ND)) |
3174 | return MakeAddrLValue(Addr: CGM.GetAddrOfMSGuidDecl(GD), T, |
3175 | Source: AlignmentSource::Decl); |
3176 | |
3177 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: ND)) { |
3178 | auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO); |
3179 | auto AS = getLangASFromTargetAS(TargetAS: ATPO.getAddressSpace()); |
3180 | |
3181 | if (AS != T.getAddressSpace()) { |
3182 | auto TargetAS = getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
3183 | auto PtrTy = ATPO.getElementType()->getPointerTo(AddrSpace: TargetAS); |
3184 | auto ASC = getTargetHooks().performAddrSpaceCast( |
3185 | CGM, V: ATPO.getPointer(), SrcAddr: AS, DestAddr: T.getAddressSpace(), DestTy: PtrTy); |
3186 | ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); |
3187 | } |
3188 | |
3189 | return MakeAddrLValue(Addr: ATPO, T, Source: AlignmentSource::Decl); |
3190 | } |
3191 | |
3192 | llvm_unreachable("Unhandled DeclRefExpr" ); |
3193 | } |
3194 | |
3195 | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
3196 | // __extension__ doesn't affect lvalue-ness. |
3197 | if (E->getOpcode() == UO_Extension) |
3198 | return EmitLValue(E: E->getSubExpr()); |
3199 | |
3200 | QualType ExprTy = getContext().getCanonicalType(T: E->getSubExpr()->getType()); |
3201 | switch (E->getOpcode()) { |
3202 | default: llvm_unreachable("Unknown unary operator lvalue!" ); |
3203 | case UO_Deref: { |
3204 | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
3205 | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type" ); |
3206 | |
3207 | LValueBaseInfo BaseInfo; |
3208 | TBAAAccessInfo TBAAInfo; |
3209 | Address Addr = EmitPointerWithAlignment(E: E->getSubExpr(), BaseInfo: &BaseInfo, |
3210 | TBAAInfo: &TBAAInfo); |
3211 | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
3212 | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
3213 | |
3214 | // We should not generate __weak write barrier on indirect reference |
3215 | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
3216 | // But, we continue to generate __strong write barrier on indirect write |
3217 | // into a pointer to object. |
3218 | if (getLangOpts().ObjC && |
3219 | getLangOpts().getGC() != LangOptions::NonGC && |
3220 | LV.isObjCWeak()) |
3221 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
3222 | return LV; |
3223 | } |
3224 | case UO_Real: |
3225 | case UO_Imag: { |
3226 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3227 | assert(LV.isSimple() && "real/imag on non-ordinary l-value" ); |
3228 | |
3229 | // __real is valid on scalars. This is a faster way of testing that. |
3230 | // __imag can only produce an rvalue on scalars. |
3231 | if (E->getOpcode() == UO_Real && |
3232 | !LV.getAddress().getElementType()->isStructTy()) { |
3233 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
3234 | return LV; |
3235 | } |
3236 | |
3237 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
3238 | |
3239 | Address Component = |
3240 | (E->getOpcode() == UO_Real |
3241 | ? emitAddrOfRealComponent(complex: LV.getAddress(), complexType: LV.getType()) |
3242 | : emitAddrOfImagComponent(complex: LV.getAddress(), complexType: LV.getType())); |
3243 | LValue ElemLV = MakeAddrLValue(Addr: Component, T, BaseInfo: LV.getBaseInfo(), |
3244 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: T)); |
3245 | ElemLV.getQuals().addQualifiers(Q: LV.getQuals()); |
3246 | return ElemLV; |
3247 | } |
3248 | case UO_PreInc: |
3249 | case UO_PreDec: { |
3250 | LValue LV = EmitLValue(E: E->getSubExpr()); |
3251 | bool isInc = E->getOpcode() == UO_PreInc; |
3252 | |
3253 | if (E->getType()->isAnyComplexType()) |
3254 | EmitComplexPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3255 | else |
3256 | EmitScalarPrePostIncDec(E, LV, isInc, isPre: true/*isPre*/); |
3257 | return LV; |
3258 | } |
3259 | } |
3260 | } |
3261 | |
3262 | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
3263 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromLiteral(S: E), |
3264 | T: E->getType(), Source: AlignmentSource::Decl); |
3265 | } |
3266 | |
3267 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
3268 | return MakeAddrLValue(Addr: CGM.GetAddrOfConstantStringFromObjCEncode(E), |
3269 | T: E->getType(), Source: AlignmentSource::Decl); |
3270 | } |
3271 | |
3272 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
3273 | auto SL = E->getFunctionName(); |
3274 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr" ); |
3275 | StringRef FnName = CurFn->getName(); |
3276 | if (FnName.starts_with(Prefix: "\01" )) |
3277 | FnName = FnName.substr(Start: 1); |
3278 | StringRef NameItems[] = { |
3279 | PredefinedExpr::getIdentKindName(IK: E->getIdentKind()), FnName}; |
3280 | std::string GVName = llvm::join(Begin: NameItems, End: NameItems + 2, Separator: "." ); |
3281 | if (auto *BD = dyn_cast_or_null<BlockDecl>(Val: CurCodeDecl)) { |
3282 | std::string Name = std::string(SL->getString()); |
3283 | if (!Name.empty()) { |
3284 | unsigned Discriminator = |
3285 | CGM.getCXXABI().getMangleContext().getBlockId(BD, Local: true); |
3286 | if (Discriminator) |
3287 | Name += "_" + Twine(Discriminator + 1).str(); |
3288 | auto C = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: GVName.c_str()); |
3289 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3290 | } else { |
3291 | auto C = |
3292 | CGM.GetAddrOfConstantCString(Str: std::string(FnName), GlobalName: GVName.c_str()); |
3293 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3294 | } |
3295 | } |
3296 | auto C = CGM.GetAddrOfConstantStringFromLiteral(S: SL, Name: GVName); |
3297 | return MakeAddrLValue(Addr: C, T: E->getType(), Source: AlignmentSource::Decl); |
3298 | } |
3299 | |
3300 | /// Emit a type description suitable for use by a runtime sanitizer library. The |
3301 | /// format of a type descriptor is |
3302 | /// |
3303 | /// \code |
3304 | /// { i16 TypeKind, i16 TypeInfo } |
3305 | /// \endcode |
3306 | /// |
3307 | /// followed by an array of i8 containing the type name. TypeKind is 0 for an |
3308 | /// integer, 1 for a floating point value, and -1 for anything else. |
3309 | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
3310 | // Only emit each type's descriptor once. |
3311 | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(Ty: T)) |
3312 | return C; |
3313 | |
3314 | uint16_t TypeKind = -1; |
3315 | uint16_t TypeInfo = 0; |
3316 | |
3317 | if (T->isIntegerType()) { |
3318 | TypeKind = 0; |
3319 | TypeInfo = (llvm::Log2_32(Value: getContext().getTypeSize(T)) << 1) | |
3320 | (T->isSignedIntegerType() ? 1 : 0); |
3321 | } else if (T->isFloatingType()) { |
3322 | TypeKind = 1; |
3323 | TypeInfo = getContext().getTypeSize(T); |
3324 | } |
3325 | |
3326 | // Format the type name as if for a diagnostic, including quotes and |
3327 | // optionally an 'aka'. |
3328 | SmallString<32> Buffer; |
3329 | CGM.getDiags().ConvertArgToString( |
3330 | Kind: DiagnosticsEngine::ak_qualtype, Val: (intptr_t)T.getAsOpaquePtr(), Modifier: StringRef(), |
3331 | Argument: StringRef(), PrevArgs: std::nullopt, Output&: Buffer, QualTypeVals: std::nullopt); |
3332 | |
3333 | llvm::Constant *Components[] = { |
3334 | Builder.getInt16(C: TypeKind), Builder.getInt16(C: TypeInfo), |
3335 | llvm::ConstantDataArray::getString(Context&: getLLVMContext(), Initializer: Buffer) |
3336 | }; |
3337 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(V: Components); |
3338 | |
3339 | auto *GV = new llvm::GlobalVariable( |
3340 | CGM.getModule(), Descriptor->getType(), |
3341 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3342 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3343 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3344 | |
3345 | // Remember the descriptor for this type. |
3346 | CGM.setTypeDescriptorInMap(Ty: T, C: GV); |
3347 | |
3348 | return GV; |
3349 | } |
3350 | |
3351 | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3352 | llvm::Type *TargetTy = IntPtrTy; |
3353 | |
3354 | if (V->getType() == TargetTy) |
3355 | return V; |
3356 | |
3357 | // Floating-point types which fit into intptr_t are bitcast to integers |
3358 | // and then passed directly (after zero-extension, if necessary). |
3359 | if (V->getType()->isFloatingPointTy()) { |
3360 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); |
3361 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3362 | V = Builder.CreateBitCast(V, DestTy: llvm::Type::getIntNTy(C&: getLLVMContext(), |
3363 | N: Bits)); |
3364 | } |
3365 | |
3366 | // Integers which fit in intptr_t are zero-extended and passed directly. |
3367 | if (V->getType()->isIntegerTy() && |
3368 | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3369 | return Builder.CreateZExt(V, DestTy: TargetTy); |
3370 | |
3371 | // Pointers are passed directly, everything else is passed by address. |
3372 | if (!V->getType()->isPointerTy()) { |
3373 | RawAddress Ptr = CreateDefaultAlignTempAlloca(Ty: V->getType()); |
3374 | Builder.CreateStore(Val: V, Addr: Ptr); |
3375 | V = Ptr.getPointer(); |
3376 | } |
3377 | return Builder.CreatePtrToInt(V, DestTy: TargetTy); |
3378 | } |
3379 | |
3380 | /// Emit a representation of a SourceLocation for passing to a handler |
3381 | /// in a sanitizer runtime library. The format for this data is: |
3382 | /// \code |
3383 | /// struct SourceLocation { |
3384 | /// const char *Filename; |
3385 | /// int32_t Line, Column; |
3386 | /// }; |
3387 | /// \endcode |
3388 | /// For an invalid SourceLocation, the Filename pointer is null. |
3389 | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3390 | llvm::Constant *Filename; |
3391 | int Line, Column; |
3392 | |
3393 | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3394 | if (PLoc.isValid()) { |
3395 | StringRef FilenameString = PLoc.getFilename(); |
3396 | |
3397 | int PathComponentsToStrip = |
3398 | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3399 | if (PathComponentsToStrip < 0) { |
3400 | assert(PathComponentsToStrip != INT_MIN); |
3401 | int PathComponentsToKeep = -PathComponentsToStrip; |
3402 | auto I = llvm::sys::path::rbegin(path: FilenameString); |
3403 | auto E = llvm::sys::path::rend(path: FilenameString); |
3404 | while (I != E && --PathComponentsToKeep) |
3405 | ++I; |
3406 | |
3407 | FilenameString = FilenameString.substr(Start: I - E); |
3408 | } else if (PathComponentsToStrip > 0) { |
3409 | auto I = llvm::sys::path::begin(path: FilenameString); |
3410 | auto E = llvm::sys::path::end(path: FilenameString); |
3411 | while (I != E && PathComponentsToStrip--) |
3412 | ++I; |
3413 | |
3414 | if (I != E) |
3415 | FilenameString = |
3416 | FilenameString.substr(Start: I - llvm::sys::path::begin(path: FilenameString)); |
3417 | else |
3418 | FilenameString = llvm::sys::path::filename(path: FilenameString); |
3419 | } |
3420 | |
3421 | auto FilenameGV = |
3422 | CGM.GetAddrOfConstantCString(Str: std::string(FilenameString), GlobalName: ".src" ); |
3423 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3424 | GV: cast<llvm::GlobalVariable>( |
3425 | Val: FilenameGV.getPointer()->stripPointerCasts())); |
3426 | Filename = FilenameGV.getPointer(); |
3427 | Line = PLoc.getLine(); |
3428 | Column = PLoc.getColumn(); |
3429 | } else { |
3430 | Filename = llvm::Constant::getNullValue(Ty: Int8PtrTy); |
3431 | Line = Column = 0; |
3432 | } |
3433 | |
3434 | llvm::Constant *Data[] = {Filename, Builder.getInt32(C: Line), |
3435 | Builder.getInt32(C: Column)}; |
3436 | |
3437 | return llvm::ConstantStruct::getAnon(V: Data); |
3438 | } |
3439 | |
3440 | namespace { |
3441 | /// Specify under what conditions this check can be recovered |
3442 | enum class CheckRecoverableKind { |
3443 | /// Always terminate program execution if this check fails. |
3444 | Unrecoverable, |
3445 | /// Check supports recovering, runtime has both fatal (noreturn) and |
3446 | /// non-fatal handlers for this check. |
3447 | Recoverable, |
3448 | /// Runtime conditionally aborts, always need to support recovery. |
3449 | AlwaysRecoverable |
3450 | }; |
3451 | } |
3452 | |
3453 | static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { |
3454 | assert(Kind.countPopulation() == 1); |
3455 | if (Kind == SanitizerKind::Vptr) |
3456 | return CheckRecoverableKind::AlwaysRecoverable; |
3457 | else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) |
3458 | return CheckRecoverableKind::Unrecoverable; |
3459 | else |
3460 | return CheckRecoverableKind::Recoverable; |
3461 | } |
3462 | |
3463 | namespace { |
3464 | struct SanitizerHandlerInfo { |
3465 | char const *const Name; |
3466 | unsigned Version; |
3467 | }; |
3468 | } |
3469 | |
3470 | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3471 | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3472 | LIST_SANITIZER_CHECKS |
3473 | #undef SANITIZER_CHECK |
3474 | }; |
3475 | |
3476 | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3477 | llvm::FunctionType *FnType, |
3478 | ArrayRef<llvm::Value *> FnArgs, |
3479 | SanitizerHandler CheckHandler, |
3480 | CheckRecoverableKind RecoverKind, bool IsFatal, |
3481 | llvm::BasicBlock *ContBB) { |
3482 | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3483 | std::optional<ApplyDebugLocation> DL; |
3484 | if (!CGF.Builder.getCurrentDebugLocation()) { |
3485 | // Ensure that the call has at least an artificial debug location. |
3486 | DL.emplace(args&: CGF, args: SourceLocation()); |
3487 | } |
3488 | bool NeedsAbortSuffix = |
3489 | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; |
3490 | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3491 | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3492 | const StringRef CheckName = CheckInfo.Name; |
3493 | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3494 | if (CheckInfo.Version && !MinimalRuntime) |
3495 | FnName += "_v" + llvm::utostr(X: CheckInfo.Version); |
3496 | if (MinimalRuntime) |
3497 | FnName += "_minimal" ; |
3498 | if (NeedsAbortSuffix) |
3499 | FnName += "_abort" ; |
3500 | bool MayReturn = |
3501 | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; |
3502 | |
3503 | llvm::AttrBuilder B(CGF.getLLVMContext()); |
3504 | if (!MayReturn) { |
3505 | B.addAttribute(Val: llvm::Attribute::NoReturn) |
3506 | .addAttribute(Val: llvm::Attribute::NoUnwind); |
3507 | } |
3508 | B.addUWTableAttr(Kind: llvm::UWTableKind::Default); |
3509 | |
3510 | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3511 | Ty: FnType, Name: FnName, |
3512 | ExtraAttrs: llvm::AttributeList::get(C&: CGF.getLLVMContext(), |
3513 | Index: llvm::AttributeList::FunctionIndex, B), |
3514 | /*Local=*/true); |
3515 | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(callee: Fn, args: FnArgs); |
3516 | if (!MayReturn) { |
3517 | HandlerCall->setDoesNotReturn(); |
3518 | CGF.Builder.CreateUnreachable(); |
3519 | } else { |
3520 | CGF.Builder.CreateBr(Dest: ContBB); |
3521 | } |
3522 | } |
3523 | |
3524 | void CodeGenFunction::EmitCheck( |
3525 | ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, |
3526 | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3527 | ArrayRef<llvm::Value *> DynamicArgs) { |
3528 | assert(IsSanitizerScope); |
3529 | assert(Checked.size() > 0); |
3530 | assert(CheckHandler >= 0 && |
3531 | size_t(CheckHandler) < std::size(SanitizerHandlers)); |
3532 | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3533 | |
3534 | llvm::Value *FatalCond = nullptr; |
3535 | llvm::Value *RecoverableCond = nullptr; |
3536 | llvm::Value *TrapCond = nullptr; |
3537 | for (int i = 0, n = Checked.size(); i < n; ++i) { |
3538 | llvm::Value *Check = Checked[i].first; |
3539 | // -fsanitize-trap= overrides -fsanitize-recover=. |
3540 | llvm::Value *&Cond = |
3541 | CGM.getCodeGenOpts().SanitizeTrap.has(K: Checked[i].second) |
3542 | ? TrapCond |
3543 | : CGM.getCodeGenOpts().SanitizeRecover.has(K: Checked[i].second) |
3544 | ? RecoverableCond |
3545 | : FatalCond; |
3546 | Cond = Cond ? Builder.CreateAnd(LHS: Cond, RHS: Check) : Check; |
3547 | } |
3548 | |
3549 | if (ClSanitizeGuardChecks) { |
3550 | llvm::Value *Allow = |
3551 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::allow_ubsan_check), |
3552 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: CheckHandler)); |
3553 | |
3554 | for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) { |
3555 | if (*Cond) |
3556 | *Cond = Builder.CreateOr(LHS: *Cond, RHS: Builder.CreateNot(V: Allow)); |
3557 | } |
3558 | } |
3559 | |
3560 | if (TrapCond) |
3561 | EmitTrapCheck(Checked: TrapCond, CheckHandlerID: CheckHandler); |
3562 | if (!FatalCond && !RecoverableCond) |
3563 | return; |
3564 | |
3565 | llvm::Value *JointCond; |
3566 | if (FatalCond && RecoverableCond) |
3567 | JointCond = Builder.CreateAnd(LHS: FatalCond, RHS: RecoverableCond); |
3568 | else |
3569 | JointCond = FatalCond ? FatalCond : RecoverableCond; |
3570 | assert(JointCond); |
3571 | |
3572 | CheckRecoverableKind RecoverKind = getRecoverableKind(Kind: Checked[0].second); |
3573 | assert(SanOpts.has(Checked[0].second)); |
3574 | #ifndef NDEBUG |
3575 | for (int i = 1, n = Checked.size(); i < n; ++i) { |
3576 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3577 | "All recoverable kinds in a single check must be same!" ); |
3578 | assert(SanOpts.has(Checked[i].second)); |
3579 | } |
3580 | #endif |
3581 | |
3582 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3583 | llvm::BasicBlock *Handlers = createBasicBlock(name: "handler." + CheckName); |
3584 | llvm::Instruction *Branch = Builder.CreateCondBr(Cond: JointCond, True: Cont, False: Handlers); |
3585 | // Give hint that we very much don't expect to execute the handler |
3586 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3587 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3588 | Branch->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3589 | EmitBlock(BB: Handlers); |
3590 | |
3591 | // Handler functions take an i8* pointing to the (handler-specific) static |
3592 | // information block, followed by a sequence of intptr_t arguments |
3593 | // representing operand values. |
3594 | SmallVector<llvm::Value *, 4> Args; |
3595 | SmallVector<llvm::Type *, 4> ArgTypes; |
3596 | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3597 | Args.reserve(N: DynamicArgs.size() + 1); |
3598 | ArgTypes.reserve(N: DynamicArgs.size() + 1); |
3599 | |
3600 | // Emit handler arguments and create handler function type. |
3601 | if (!StaticArgs.empty()) { |
3602 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3603 | auto *InfoPtr = new llvm::GlobalVariable( |
3604 | CGM.getModule(), Info->getType(), false, |
3605 | llvm::GlobalVariable::PrivateLinkage, Info, "" , nullptr, |
3606 | llvm::GlobalVariable::NotThreadLocal, |
3607 | CGM.getDataLayout().getDefaultGlobalsAddressSpace()); |
3608 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3609 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3610 | Args.push_back(Elt: InfoPtr); |
3611 | ArgTypes.push_back(Elt: Args.back()->getType()); |
3612 | } |
3613 | |
3614 | for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { |
3615 | Args.push_back(Elt: EmitCheckValue(V: DynamicArgs[i])); |
3616 | ArgTypes.push_back(Elt: IntPtrTy); |
3617 | } |
3618 | } |
3619 | |
3620 | llvm::FunctionType *FnType = |
3621 | llvm::FunctionType::get(Result: CGM.VoidTy, Params: ArgTypes, isVarArg: false); |
3622 | |
3623 | if (!FatalCond || !RecoverableCond) { |
3624 | // Simple case: we need to generate a single handler call, either |
3625 | // fatal, or non-fatal. |
3626 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, |
3627 | IsFatal: (FatalCond != nullptr), ContBB: Cont); |
3628 | } else { |
3629 | // Emit two handler calls: first one for set of unrecoverable checks, |
3630 | // another one for recoverable. |
3631 | llvm::BasicBlock *NonFatalHandlerBB = |
3632 | createBasicBlock(name: "non_fatal." + CheckName); |
3633 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock(name: "fatal." + CheckName); |
3634 | Builder.CreateCondBr(Cond: FatalCond, True: NonFatalHandlerBB, False: FatalHandlerBB); |
3635 | EmitBlock(BB: FatalHandlerBB); |
3636 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: true, |
3637 | ContBB: NonFatalHandlerBB); |
3638 | EmitBlock(BB: NonFatalHandlerBB); |
3639 | emitCheckHandlerCall(CGF&: *this, FnType, FnArgs: Args, CheckHandler, RecoverKind, IsFatal: false, |
3640 | ContBB: Cont); |
3641 | } |
3642 | |
3643 | EmitBlock(BB: Cont); |
3644 | } |
3645 | |
3646 | void CodeGenFunction::EmitCfiSlowPathCheck( |
3647 | SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, |
3648 | llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { |
3649 | llvm::BasicBlock *Cont = createBasicBlock(name: "cfi.cont" ); |
3650 | |
3651 | llvm::BasicBlock *CheckBB = createBasicBlock(name: "cfi.slowpath" ); |
3652 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, True: Cont, False: CheckBB); |
3653 | |
3654 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3655 | llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); |
3656 | BI->setMetadata(KindID: llvm::LLVMContext::MD_prof, Node); |
3657 | |
3658 | EmitBlock(BB: CheckBB); |
3659 | |
3660 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(K: Kind); |
3661 | |
3662 | llvm::CallInst *CheckCall; |
3663 | llvm::FunctionCallee SlowPathFn; |
3664 | if (WithDiag) { |
3665 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(V: StaticArgs); |
3666 | auto *InfoPtr = |
3667 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3668 | llvm::GlobalVariable::PrivateLinkage, Info); |
3669 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3670 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: InfoPtr); |
3671 | |
3672 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3673 | Name: "__cfi_slowpath_diag" , |
3674 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3675 | isVarArg: false)); |
3676 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr, InfoPtr}); |
3677 | } else { |
3678 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3679 | Name: "__cfi_slowpath" , |
3680 | T: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, Int8PtrTy}, isVarArg: false)); |
3681 | CheckCall = Builder.CreateCall(Callee: SlowPathFn, Args: {TypeId, Ptr}); |
3682 | } |
3683 | |
3684 | CGM.setDSOLocal( |
3685 | cast<llvm::GlobalValue>(Val: SlowPathFn.getCallee()->stripPointerCasts())); |
3686 | CheckCall->setDoesNotThrow(); |
3687 | |
3688 | EmitBlock(BB: Cont); |
3689 | } |
3690 | |
3691 | // Emit a stub for __cfi_check function so that the linker knows about this |
3692 | // symbol in LTO mode. |
3693 | void CodeGenFunction::EmitCfiCheckStub() { |
3694 | llvm::Module *M = &CGM.getModule(); |
3695 | ASTContext &C = getContext(); |
3696 | QualType QInt64Ty = C.getIntTypeForBitwidth(DestWidth: 64, Signed: false); |
3697 | |
3698 | FunctionArgList FnArgs; |
3699 | ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other); |
3700 | ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other); |
3701 | ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy, |
3702 | ImplicitParamKind::Other); |
3703 | FnArgs.push_back(Elt: &ArgCallsiteTypeId); |
3704 | FnArgs.push_back(Elt: &ArgAddr); |
3705 | FnArgs.push_back(Elt: &ArgCFICheckFailData); |
3706 | const CGFunctionInfo &FI = |
3707 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: C.VoidTy, args: FnArgs); |
3708 | |
3709 | llvm::Function *F = llvm::Function::Create( |
3710 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {Int64Ty, VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3711 | Linkage: llvm::GlobalValue::WeakAnyLinkage, N: "__cfi_check" , M); |
3712 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3713 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3714 | F->setAlignment(llvm::Align(4096)); |
3715 | CGM.setDSOLocal(F); |
3716 | |
3717 | llvm::LLVMContext &Ctx = M->getContext(); |
3718 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Context&: Ctx, Name: "entry" , Parent: F); |
3719 | // CrossDSOCFI pass is not executed if there is no executable code. |
3720 | SmallVector<llvm::Value*> Args{F->getArg(i: 2), F->getArg(i: 1)}; |
3721 | llvm::CallInst::Create(Func: M->getFunction(Name: "__cfi_check_fail" ), Args, NameStr: "" , InsertBefore: BB); |
3722 | llvm::ReturnInst::Create(C&: Ctx, retVal: nullptr, InsertBefore: BB); |
3723 | } |
3724 | |
3725 | // This function is basically a switch over the CFI failure kind, which is |
3726 | // extracted from CFICheckFailData (1st function argument). Each case is either |
3727 | // llvm.trap or a call to one of the two runtime handlers, based on |
3728 | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3729 | // failure kind) traps, but this should really never happen. CFICheckFailData |
3730 | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3731 | // check kind; in this case __cfi_check_fail traps as well. |
3732 | void CodeGenFunction::EmitCfiCheckFail() { |
3733 | SanitizerScope SanScope(this); |
3734 | FunctionArgList Args; |
3735 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3736 | ImplicitParamKind::Other); |
3737 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3738 | ImplicitParamKind::Other); |
3739 | Args.push_back(Elt: &ArgData); |
3740 | Args.push_back(Elt: &ArgAddr); |
3741 | |
3742 | const CGFunctionInfo &FI = |
3743 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: getContext().VoidTy, args: Args); |
3744 | |
3745 | llvm::Function *F = llvm::Function::Create( |
3746 | Ty: llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy, VoidPtrTy}, isVarArg: false), |
3747 | Linkage: llvm::GlobalValue::WeakODRLinkage, N: "__cfi_check_fail" , M: &CGM.getModule()); |
3748 | |
3749 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F, /*IsThunk=*/false); |
3750 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F); |
3751 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3752 | |
3753 | StartFunction(GD: GlobalDecl(), RetTy: CGM.getContext().VoidTy, Fn: F, FnInfo: FI, Args, |
3754 | Loc: SourceLocation()); |
3755 | |
3756 | // This function is not affected by NoSanitizeList. This function does |
3757 | // not have a source location, but "src:*" would still apply. Revert any |
3758 | // changes to SanOpts made in StartFunction. |
3759 | SanOpts = CGM.getLangOpts().Sanitize; |
3760 | |
3761 | llvm::Value *Data = |
3762 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgData), /*Volatile=*/false, |
3763 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgData.getLocation()); |
3764 | llvm::Value *Addr = |
3765 | EmitLoadOfScalar(Addr: GetAddrOfLocalVar(VD: &ArgAddr), /*Volatile=*/false, |
3766 | Ty: CGM.getContext().VoidPtrTy, Loc: ArgAddr.getLocation()); |
3767 | |
3768 | // Data == nullptr means the calling module has trap behaviour for this check. |
3769 | llvm::Value *DataIsNotNullPtr = |
3770 | Builder.CreateICmpNE(LHS: Data, RHS: llvm::ConstantPointerNull::get(T: Int8PtrTy)); |
3771 | EmitTrapCheck(Checked: DataIsNotNullPtr, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3772 | |
3773 | llvm::StructType *SourceLocationTy = |
3774 | llvm::StructType::get(elt1: VoidPtrTy, elts: Int32Ty, elts: Int32Ty); |
3775 | llvm::StructType *CfiCheckFailDataTy = |
3776 | llvm::StructType::get(elt1: Int8Ty, elts: SourceLocationTy, elts: VoidPtrTy); |
3777 | |
3778 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3779 | Ty: CfiCheckFailDataTy, |
3780 | Ptr: Builder.CreatePointerCast(V: Data, DestTy: CfiCheckFailDataTy->getPointerTo(AddrSpace: 0)), Idx0: 0, |
3781 | Idx1: 0); |
3782 | |
3783 | Address CheckKindAddr(V, Int8Ty, getIntAlign()); |
3784 | llvm::Value *CheckKind = Builder.CreateLoad(Addr: CheckKindAddr); |
3785 | |
3786 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3787 | Context&: CGM.getLLVMContext(), |
3788 | MD: llvm::MDString::get(Context&: CGM.getLLVMContext(), Str: "all-vtables" )); |
3789 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3790 | V: Builder.CreateCall(Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), |
3791 | Args: {Addr, AllVtables}), |
3792 | DestTy: IntPtrTy); |
3793 | |
3794 | const std::pair<int, SanitizerMask> CheckKinds[] = { |
3795 | {CFITCK_VCall, SanitizerKind::CFIVCall}, |
3796 | {CFITCK_NVCall, SanitizerKind::CFINVCall}, |
3797 | {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, |
3798 | {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, |
3799 | {CFITCK_ICall, SanitizerKind::CFIICall}}; |
3800 | |
3801 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; |
3802 | for (auto CheckKindMaskPair : CheckKinds) { |
3803 | int Kind = CheckKindMaskPair.first; |
3804 | SanitizerMask Mask = CheckKindMaskPair.second; |
3805 | llvm::Value *Cond = |
3806 | Builder.CreateICmpNE(LHS: CheckKind, RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: Kind)); |
3807 | if (CGM.getLangOpts().Sanitize.has(K: Mask)) |
3808 | EmitCheck(Checked: std::make_pair(x&: Cond, y&: Mask), CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: {}, |
3809 | DynamicArgs: {Data, Addr, ValidVtable}); |
3810 | else |
3811 | EmitTrapCheck(Checked: Cond, CheckHandlerID: SanitizerHandler::CFICheckFail); |
3812 | } |
3813 | |
3814 | FinishFunction(); |
3815 | // The only reference to this function will be created during LTO link. |
3816 | // Make sure it survives until then. |
3817 | CGM.addUsedGlobal(GV: F); |
3818 | } |
3819 | |
3820 | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
3821 | if (SanOpts.has(K: SanitizerKind::Unreachable)) { |
3822 | SanitizerScope SanScope(this); |
3823 | EmitCheck(Checked: std::make_pair(x: static_cast<llvm::Value *>(Builder.getFalse()), |
3824 | y: SanitizerKind::Unreachable), |
3825 | CheckHandler: SanitizerHandler::BuiltinUnreachable, |
3826 | StaticArgs: EmitCheckSourceLocation(Loc), DynamicArgs: std::nullopt); |
3827 | } |
3828 | Builder.CreateUnreachable(); |
3829 | } |
3830 | |
3831 | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
3832 | SanitizerHandler CheckHandlerID) { |
3833 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
3834 | |
3835 | // If we're optimizing, collapse all calls to trap down to just one per |
3836 | // check-type per function to save on code size. |
3837 | if ((int)TrapBBs.size() <= CheckHandlerID) |
3838 | TrapBBs.resize(N: CheckHandlerID + 1); |
3839 | |
3840 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
3841 | |
3842 | if (!ClSanitizeDebugDeoptimization && |
3843 | CGM.getCodeGenOpts().OptimizationLevel && TrapBB && |
3844 | (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) { |
3845 | auto Call = TrapBB->begin(); |
3846 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB" ); |
3847 | |
3848 | Call->applyMergedLocation(LocA: Call->getDebugLoc(), |
3849 | LocB: Builder.getCurrentDebugLocation()); |
3850 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3851 | } else { |
3852 | TrapBB = createBasicBlock(name: "trap" ); |
3853 | Builder.CreateCondBr(Cond: Checked, True: Cont, False: TrapBB); |
3854 | EmitBlock(BB: TrapBB); |
3855 | |
3856 | llvm::CallInst *TrapCall = Builder.CreateCall( |
3857 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::ubsantrap), |
3858 | Args: llvm::ConstantInt::get(Ty: CGM.Int8Ty, |
3859 | V: ClSanitizeDebugDeoptimization |
3860 | ? TrapBB->getParent()->size() |
3861 | : static_cast<uint64_t>(CheckHandlerID))); |
3862 | |
3863 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3864 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3865 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3866 | TrapCall->addFnAttr(Attr: A); |
3867 | } |
3868 | TrapCall->setDoesNotReturn(); |
3869 | TrapCall->setDoesNotThrow(); |
3870 | Builder.CreateUnreachable(); |
3871 | } |
3872 | |
3873 | EmitBlock(BB: Cont); |
3874 | } |
3875 | |
3876 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
3877 | llvm::CallInst *TrapCall = |
3878 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntrID)); |
3879 | |
3880 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3881 | auto A = llvm::Attribute::get(Context&: getLLVMContext(), Kind: "trap-func-name" , |
3882 | Val: CGM.getCodeGenOpts().TrapFuncName); |
3883 | TrapCall->addFnAttr(Attr: A); |
3884 | } |
3885 | |
3886 | return TrapCall; |
3887 | } |
3888 | |
3889 | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
3890 | LValueBaseInfo *BaseInfo, |
3891 | TBAAAccessInfo *TBAAInfo) { |
3892 | assert(E->getType()->isArrayType() && |
3893 | "Array to pointer decay must have array source type!" ); |
3894 | |
3895 | // Expressions of array type can't be bitfields or vector elements. |
3896 | LValue LV = EmitLValue(E); |
3897 | Address Addr = LV.getAddress(); |
3898 | |
3899 | // If the array type was an incomplete type, we need to make sure |
3900 | // the decay ends up being the right type. |
3901 | llvm::Type *NewTy = ConvertType(T: E->getType()); |
3902 | Addr = Addr.withElementType(ElemTy: NewTy); |
3903 | |
3904 | // Note that VLA pointers are always decayed, so we don't need to do |
3905 | // anything here. |
3906 | if (!E->getType()->isVariableArrayType()) { |
3907 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3908 | "Expected pointer to array" ); |
3909 | Addr = Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
3910 | } |
3911 | |
3912 | // The result of this decay conversion points to an array element within the |
3913 | // base lvalue. However, since TBAA currently does not support representing |
3914 | // accesses to elements of member arrays, we conservatively represent accesses |
3915 | // to the pointee object as if it had no any base lvalue specified. |
3916 | // TODO: Support TBAA for member arrays. |
3917 | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
3918 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
3919 | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(AccessType: EltType); |
3920 | |
3921 | return Addr.withElementType(ElemTy: ConvertTypeForMem(T: EltType)); |
3922 | } |
3923 | |
3924 | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
3925 | /// array to pointer, return the array subexpression. |
3926 | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
3927 | // If this isn't just an array->pointer decay, bail out. |
3928 | const auto *CE = dyn_cast<CastExpr>(Val: E); |
3929 | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) |
3930 | return nullptr; |
3931 | |
3932 | // If this is a decay from variable width array, bail out. |
3933 | const Expr *SubExpr = CE->getSubExpr(); |
3934 | if (SubExpr->getType()->isVariableArrayType()) |
3935 | return nullptr; |
3936 | |
3937 | return SubExpr; |
3938 | } |
3939 | |
3940 | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
3941 | llvm::Type *elemType, |
3942 | llvm::Value *ptr, |
3943 | ArrayRef<llvm::Value*> indices, |
3944 | bool inbounds, |
3945 | bool signedIndices, |
3946 | SourceLocation loc, |
3947 | const llvm::Twine &name = "arrayidx" ) { |
3948 | if (inbounds) { |
3949 | return CGF.EmitCheckedInBoundsGEP(ElemTy: elemType, Ptr: ptr, IdxList: indices, SignedIndices: signedIndices, |
3950 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3951 | Name: name); |
3952 | } else { |
3953 | return CGF.Builder.CreateGEP(Ty: elemType, Ptr: ptr, IdxList: indices, Name: name); |
3954 | } |
3955 | } |
3956 | |
3957 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
3958 | ArrayRef<llvm::Value *> indices, |
3959 | llvm::Type *elementType, bool inbounds, |
3960 | bool signedIndices, SourceLocation loc, |
3961 | CharUnits align, |
3962 | const llvm::Twine &name = "arrayidx" ) { |
3963 | if (inbounds) { |
3964 | return CGF.EmitCheckedInBoundsGEP(Addr: addr, IdxList: indices, elementType, SignedIndices: signedIndices, |
3965 | IsSubtraction: CodeGenFunction::NotSubtraction, Loc: loc, |
3966 | Align: align, Name: name); |
3967 | } else { |
3968 | return CGF.Builder.CreateGEP(Addr: addr, IdxList: indices, ElementType: elementType, Align: align, Name: name); |
3969 | } |
3970 | } |
3971 | |
3972 | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
3973 | llvm::Value *idx, |
3974 | CharUnits eltSize) { |
3975 | // If we have a constant index, we can use the exact offset of the |
3976 | // element we're accessing. |
3977 | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(Val: idx)) { |
3978 | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
3979 | return arrayAlign.alignmentAtOffset(offset); |
3980 | |
3981 | // Otherwise, use the worst-case alignment for any element. |
3982 | } else { |
3983 | return arrayAlign.alignmentOfArrayElement(elementSize: eltSize); |
3984 | } |
3985 | } |
3986 | |
3987 | static QualType getFixedSizeElementType(const ASTContext &ctx, |
3988 | const VariableArrayType *vla) { |
3989 | QualType eltType; |
3990 | do { |
3991 | eltType = vla->getElementType(); |
3992 | } while ((vla = ctx.getAsVariableArrayType(T: eltType))); |
3993 | return eltType; |
3994 | } |
3995 | |
3996 | static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { |
3997 | return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); |
3998 | } |
3999 | |
4000 | static bool hasBPFPreserveStaticOffset(const Expr *E) { |
4001 | if (!E) |
4002 | return false; |
4003 | QualType PointeeType = E->getType()->getPointeeType(); |
4004 | if (PointeeType.isNull()) |
4005 | return false; |
4006 | if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) |
4007 | return hasBPFPreserveStaticOffset(D: BaseDecl); |
4008 | return false; |
4009 | } |
4010 | |
4011 | // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. |
4012 | static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, |
4013 | Address &Addr) { |
4014 | if (!CGF.getTarget().getTriple().isBPF()) |
4015 | return Addr; |
4016 | |
4017 | llvm::Function *Fn = |
4018 | CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::preserve_static_offset); |
4019 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: Fn, Args: {Addr.emitRawPointer(CGF)}); |
4020 | return Address(Call, Addr.getElementType(), Addr.getAlignment()); |
4021 | } |
4022 | |
4023 | /// Given an array base, check whether its member access belongs to a record |
4024 | /// with preserve_access_index attribute or not. |
4025 | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
4026 | if (!ArrayBase || !CGF.getDebugInfo()) |
4027 | return false; |
4028 | |
4029 | // Only support base as either a MemberExpr or DeclRefExpr. |
4030 | // DeclRefExpr to cover cases like: |
4031 | // struct s { int a; int b[10]; }; |
4032 | // struct s *p; |
4033 | // p[1].a |
4034 | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
4035 | // p->b[5] is a MemberExpr example. |
4036 | const Expr *E = ArrayBase->IgnoreImpCasts(); |
4037 | if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) |
4038 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4039 | |
4040 | if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
4041 | const auto *VarDef = dyn_cast<VarDecl>(Val: DRE->getDecl()); |
4042 | if (!VarDef) |
4043 | return false; |
4044 | |
4045 | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
4046 | if (!PtrT) |
4047 | return false; |
4048 | |
4049 | const auto *PointeeT = PtrT->getPointeeType() |
4050 | ->getUnqualifiedDesugaredType(); |
4051 | if (const auto *RecT = dyn_cast<RecordType>(Val: PointeeT)) |
4052 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4053 | return false; |
4054 | } |
4055 | |
4056 | return false; |
4057 | } |
4058 | |
4059 | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4060 | ArrayRef<llvm::Value *> indices, |
4061 | QualType eltType, bool inbounds, |
4062 | bool signedIndices, SourceLocation loc, |
4063 | QualType *arrayType = nullptr, |
4064 | const Expr *Base = nullptr, |
4065 | const llvm::Twine &name = "arrayidx" ) { |
4066 | // All the indices except that last must be zero. |
4067 | #ifndef NDEBUG |
4068 | for (auto *idx : indices.drop_back()) |
4069 | assert(isa<llvm::ConstantInt>(idx) && |
4070 | cast<llvm::ConstantInt>(idx)->isZero()); |
4071 | #endif |
4072 | |
4073 | // Determine the element size of the statically-sized base. This is |
4074 | // the thing that the indices are expressed in terms of. |
4075 | if (auto vla = CGF.getContext().getAsVariableArrayType(T: eltType)) { |
4076 | eltType = getFixedSizeElementType(ctx: CGF.getContext(), vla); |
4077 | } |
4078 | |
4079 | // We can use that to compute the best alignment of the element. |
4080 | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: eltType); |
4081 | CharUnits eltAlign = |
4082 | getArrayElementAlign(arrayAlign: addr.getAlignment(), idx: indices.back(), eltSize); |
4083 | |
4084 | if (hasBPFPreserveStaticOffset(E: Base)) |
4085 | addr = wrapWithBPFPreserveStaticOffset(CGF, Addr&: addr); |
4086 | |
4087 | llvm::Value *eltPtr; |
4088 | auto LastIndex = dyn_cast<llvm::ConstantInt>(Val: indices.back()); |
4089 | if (!LastIndex || |
4090 | (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, ArrayBase: Base))) { |
4091 | addr = emitArraySubscriptGEP(CGF, addr, indices, |
4092 | elementType: CGF.ConvertTypeForMem(T: eltType), inbounds, |
4093 | signedIndices, loc, align: eltAlign, name); |
4094 | return addr; |
4095 | } else { |
4096 | // Remember the original array subscript for bpf target |
4097 | unsigned idx = LastIndex->getZExtValue(); |
4098 | llvm::DIType *DbgInfo = nullptr; |
4099 | if (arrayType) |
4100 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(Ty: *arrayType, Loc: loc); |
4101 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( |
4102 | ElTy: addr.getElementType(), Base: addr.emitRawPointer(CGF), Dimension: indices.size() - 1, |
4103 | LastIndex: idx, DbgInfo); |
4104 | } |
4105 | |
4106 | return Address(eltPtr, CGF.ConvertTypeForMem(T: eltType), eltAlign); |
4107 | } |
4108 | |
4109 | /// The offset of a field from the beginning of the record. |
4110 | static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, |
4111 | const FieldDecl *FD, int64_t &Offset) { |
4112 | ASTContext &Ctx = CGF.getContext(); |
4113 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
4114 | unsigned FieldNo = 0; |
4115 | |
4116 | for (const Decl *D : RD->decls()) { |
4117 | if (const auto *Record = dyn_cast<RecordDecl>(Val: D)) |
4118 | if (getFieldOffsetInBits(CGF, RD: Record, FD, Offset)) { |
4119 | Offset += Layout.getFieldOffset(FieldNo); |
4120 | return true; |
4121 | } |
4122 | |
4123 | if (const auto *Field = dyn_cast<FieldDecl>(Val: D)) |
4124 | if (FD == Field) { |
4125 | Offset += Layout.getFieldOffset(FieldNo); |
4126 | return true; |
4127 | } |
4128 | |
4129 | if (isa<FieldDecl>(Val: D)) |
4130 | ++FieldNo; |
4131 | } |
4132 | |
4133 | return false; |
4134 | } |
4135 | |
4136 | /// Returns the relative offset difference between \p FD1 and \p FD2. |
4137 | /// \code |
4138 | /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) |
4139 | /// \endcode |
4140 | /// Both fields must be within the same struct. |
4141 | static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, |
4142 | const FieldDecl *FD1, |
4143 | const FieldDecl *FD2) { |
4144 | const RecordDecl *FD1OuterRec = |
4145 | FD1->getParent()->getOuterLexicalRecordContext(); |
4146 | const RecordDecl *FD2OuterRec = |
4147 | FD2->getParent()->getOuterLexicalRecordContext(); |
4148 | |
4149 | if (FD1OuterRec != FD2OuterRec) |
4150 | // Fields must be within the same RecordDecl. |
4151 | return std::optional<int64_t>(); |
4152 | |
4153 | int64_t FD1Offset = 0; |
4154 | if (!getFieldOffsetInBits(CGF, RD: FD1OuterRec, FD: FD1, Offset&: FD1Offset)) |
4155 | return std::optional<int64_t>(); |
4156 | |
4157 | int64_t FD2Offset = 0; |
4158 | if (!getFieldOffsetInBits(CGF, RD: FD2OuterRec, FD: FD2, Offset&: FD2Offset)) |
4159 | return std::optional<int64_t>(); |
4160 | |
4161 | return std::make_optional<int64_t>(t: FD1Offset - FD2Offset); |
4162 | } |
4163 | |
4164 | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
4165 | bool Accessed) { |
4166 | // The index must always be an integer, which is not an aggregate. Emit it |
4167 | // in lexical order (this complexity is, sadly, required by C++17). |
4168 | llvm::Value *IdxPre = |
4169 | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E: E->getIdx()) : nullptr; |
4170 | bool SignedIndices = false; |
4171 | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
4172 | auto *Idx = IdxPre; |
4173 | if (E->getLHS() != E->getIdx()) { |
4174 | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS" ); |
4175 | Idx = EmitScalarExpr(E: E->getIdx()); |
4176 | } |
4177 | |
4178 | QualType IdxTy = E->getIdx()->getType(); |
4179 | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
4180 | SignedIndices |= IdxSigned; |
4181 | |
4182 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) |
4183 | EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, Accessed); |
4184 | |
4185 | // Extend or truncate the index type to 32 or 64-bits. |
4186 | if (Promote && Idx->getType() != IntPtrTy) |
4187 | Idx = Builder.CreateIntCast(V: Idx, DestTy: IntPtrTy, isSigned: IdxSigned, Name: "idxprom" ); |
4188 | |
4189 | return Idx; |
4190 | }; |
4191 | IdxPre = nullptr; |
4192 | |
4193 | // If the base is a vector type, then we are forming a vector element lvalue |
4194 | // with this subscript. |
4195 | if (E->getBase()->getType()->isSubscriptableVectorType() && |
4196 | !isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4197 | // Emit the vector as an lvalue to get its address. |
4198 | LValue LHS = EmitLValue(E: E->getBase()); |
4199 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
4200 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!" ); |
4201 | return LValue::MakeVectorElt(vecAddress: LHS.getAddress(), Idx, type: E->getBase()->getType(), |
4202 | BaseInfo: LHS.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4203 | } |
4204 | |
4205 | // All the other cases basically behave like simple offsetting. |
4206 | |
4207 | // Handle the extvector case we ignored above. |
4208 | if (isa<ExtVectorElementExpr>(Val: E->getBase())) { |
4209 | LValue LV = EmitLValue(E: E->getBase()); |
4210 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4211 | Address Addr = EmitExtVectorElementLValue(LV); |
4212 | |
4213 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
4214 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: EltType, /*inbounds*/ true, |
4215 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4216 | return MakeAddrLValue(Addr, T: EltType, BaseInfo: LV.getBaseInfo(), |
4217 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: EltType)); |
4218 | } |
4219 | |
4220 | LValueBaseInfo EltBaseInfo; |
4221 | TBAAAccessInfo EltTBAAInfo; |
4222 | Address Addr = Address::invalid(); |
4223 | if (const VariableArrayType *vla = |
4224 | getContext().getAsVariableArrayType(T: E->getType())) { |
4225 | // The base must be a pointer, which is not an aggregate. Emit |
4226 | // it. It needs to be emitted first in case it's what captures |
4227 | // the VLA bounds. |
4228 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4229 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4230 | |
4231 | // The element count here is the total number of non-VLA elements. |
4232 | llvm::Value *numElements = getVLASize(vla).NumElts; |
4233 | |
4234 | // Effectively, the multiply by the VLA size is part of the GEP. |
4235 | // GEP indexes are signed, and scaling an index isn't permitted to |
4236 | // signed-overflow, so we use the same semantics for our explicit |
4237 | // multiply. We suppress this if overflow is not undefined behavior. |
4238 | if (getLangOpts().isSignedOverflowDefined()) { |
4239 | Idx = Builder.CreateMul(LHS: Idx, RHS: numElements); |
4240 | } else { |
4241 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: numElements); |
4242 | } |
4243 | |
4244 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: vla->getElementType(), |
4245 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4246 | signedIndices: SignedIndices, loc: E->getExprLoc()); |
4247 | |
4248 | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
4249 | // Indexing over an interface, as in "NSString *P; P[4];" |
4250 | |
4251 | // Emit the base pointer. |
4252 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4253 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4254 | |
4255 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(T: OIT); |
4256 | llvm::Value *InterfaceSizeVal = |
4257 | llvm::ConstantInt::get(Ty: Idx->getType(), V: InterfaceSize.getQuantity()); |
4258 | |
4259 | llvm::Value *ScaledIdx = Builder.CreateMul(LHS: Idx, RHS: InterfaceSizeVal); |
4260 | |
4261 | // We don't necessarily build correct LLVM struct types for ObjC |
4262 | // interfaces, so we can't rely on GEP to do this scaling |
4263 | // correctly, so we need to cast to i8*. FIXME: is this actually |
4264 | // true? A lot of other things in the fragile ABI would break... |
4265 | llvm::Type *OrigBaseElemTy = Addr.getElementType(); |
4266 | |
4267 | // Do the GEP. |
4268 | CharUnits EltAlign = |
4269 | getArrayElementAlign(arrayAlign: Addr.getAlignment(), idx: Idx, eltSize: InterfaceSize); |
4270 | llvm::Value *EltPtr = |
4271 | emitArraySubscriptGEP(CGF&: *this, elemType: Int8Ty, ptr: Addr.emitRawPointer(CGF&: *this), |
4272 | indices: ScaledIdx, inbounds: false, signedIndices: SignedIndices, loc: E->getExprLoc()); |
4273 | Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); |
4274 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4275 | // If this is A[i] where A is an array, the frontend will have decayed the |
4276 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4277 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4278 | // "gep x, i" here. Emit one "gep A, 0, i". |
4279 | assert(Array->getType()->isArrayType() && |
4280 | "Array to pointer decay must have array source type!" ); |
4281 | LValue ArrayLV; |
4282 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4283 | // better bounds-checking of the base expression. |
4284 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4285 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4286 | else |
4287 | ArrayLV = EmitLValue(E: Array); |
4288 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4289 | |
4290 | if (SanOpts.has(K: SanitizerKind::ArrayBounds)) { |
4291 | // If the array being accessed has a "counted_by" attribute, generate |
4292 | // bounds checking code. The "count" field is at the top level of the |
4293 | // struct or in an anonymous struct, that's also at the top level. Future |
4294 | // expansions may allow the "count" to reside at any place in the struct, |
4295 | // but the value of "counted_by" will be a "simple" path to the count, |
4296 | // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or |
4297 | // similar to emit the correct GEP. |
4298 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
4299 | getLangOpts().getStrictFlexArraysLevel(); |
4300 | |
4301 | if (const auto *ME = dyn_cast<MemberExpr>(Val: Array); |
4302 | ME && |
4303 | ME->isFlexibleArrayMemberLike(Context&: getContext(), StrictFlexArraysLevel) && |
4304 | ME->getMemberDecl()->getType()->isCountAttributedType()) { |
4305 | const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()); |
4306 | if (const FieldDecl *CountFD = FindCountedByField(FD: FAMDecl)) { |
4307 | if (std::optional<int64_t> Diff = |
4308 | getOffsetDifferenceInBits(CGF&: *this, FD1: CountFD, FD2: FAMDecl)) { |
4309 | CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(BitSize: *Diff); |
4310 | |
4311 | // Create a GEP with a byte offset between the FAM and count and |
4312 | // use that to load the count value. |
4313 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast( |
4314 | Addr: ArrayLV.getAddress(), Ty: Int8PtrTy, ElementTy: Int8Ty); |
4315 | |
4316 | llvm::Type *CountTy = ConvertType(T: CountFD->getType()); |
4317 | llvm::Value *Res = Builder.CreateInBoundsGEP( |
4318 | Ty: Int8Ty, Ptr: Addr.emitRawPointer(CGF&: *this), |
4319 | IdxList: Builder.getInt32(C: OffsetDiff.getQuantity()), Name: ".counted_by.gep" ); |
4320 | Res = Builder.CreateAlignedLoad(Ty: CountTy, Addr: Res, Align: getIntAlign(), |
4321 | Name: ".counted_by.load" ); |
4322 | |
4323 | // Now emit the bounds checking. |
4324 | EmitBoundsCheckImpl(E, Bound: Res, Index: Idx, IndexType: E->getIdx()->getType(), |
4325 | IndexedType: Array->getType(), Accessed); |
4326 | } |
4327 | } |
4328 | } |
4329 | } |
4330 | |
4331 | // Propagate the alignment from the array itself to the result. |
4332 | QualType arrayType = Array->getType(); |
4333 | Addr = emitArraySubscriptGEP( |
4334 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4335 | eltType: E->getType(), inbounds: !getLangOpts().isSignedOverflowDefined(), signedIndices: SignedIndices, |
4336 | loc: E->getExprLoc(), arrayType: &arrayType, Base: E->getBase()); |
4337 | EltBaseInfo = ArrayLV.getBaseInfo(); |
4338 | EltTBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: E->getType()); |
4339 | } else { |
4340 | // The base must be a pointer; emit it with an estimate of its alignment. |
4341 | Addr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &EltBaseInfo, TBAAInfo: &EltTBAAInfo); |
4342 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4343 | QualType ptrType = E->getBase()->getType(); |
4344 | Addr = emitArraySubscriptGEP(CGF&: *this, addr: Addr, indices: Idx, eltType: E->getType(), |
4345 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4346 | signedIndices: SignedIndices, loc: E->getExprLoc(), arrayType: &ptrType, |
4347 | Base: E->getBase()); |
4348 | } |
4349 | |
4350 | LValue LV = MakeAddrLValue(Addr, T: E->getType(), BaseInfo: EltBaseInfo, TBAAInfo: EltTBAAInfo); |
4351 | |
4352 | if (getLangOpts().ObjC && |
4353 | getLangOpts().getGC() != LangOptions::NonGC) { |
4354 | LV.setNonGC(!E->isOBJCGCCandidate(Ctx&: getContext())); |
4355 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4356 | } |
4357 | return LV; |
4358 | } |
4359 | |
4360 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
4361 | assert( |
4362 | !E->isIncomplete() && |
4363 | "incomplete matrix subscript expressions should be rejected during Sema" ); |
4364 | LValue Base = EmitLValue(E: E->getBase()); |
4365 | llvm::Value *RowIdx = EmitScalarExpr(E: E->getRowIdx()); |
4366 | llvm::Value *ColIdx = EmitScalarExpr(E: E->getColumnIdx()); |
4367 | llvm::Value *NumRows = Builder.getIntN( |
4368 | N: RowIdx->getType()->getScalarSizeInBits(), |
4369 | C: E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
4370 | llvm::Value *FinalIdx = |
4371 | Builder.CreateAdd(LHS: Builder.CreateMul(LHS: ColIdx, RHS: NumRows), RHS: RowIdx); |
4372 | return LValue::MakeMatrixElt( |
4373 | matAddress: MaybeConvertMatrixAddress(Addr: Base.getAddress(), CGF&: *this), Idx: FinalIdx, |
4374 | type: E->getBase()->getType(), BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4375 | } |
4376 | |
4377 | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
4378 | LValueBaseInfo &BaseInfo, |
4379 | TBAAAccessInfo &TBAAInfo, |
4380 | QualType BaseTy, QualType ElTy, |
4381 | bool IsLowerBound) { |
4382 | LValue BaseLVal; |
4383 | if (auto *ASE = dyn_cast<ArraySectionExpr>(Val: Base->IgnoreParenImpCasts())) { |
4384 | BaseLVal = CGF.EmitArraySectionExpr(E: ASE, IsLowerBound); |
4385 | if (BaseTy->isArrayType()) { |
4386 | Address Addr = BaseLVal.getAddress(); |
4387 | BaseInfo = BaseLVal.getBaseInfo(); |
4388 | |
4389 | // If the array type was an incomplete type, we need to make sure |
4390 | // the decay ends up being the right type. |
4391 | llvm::Type *NewTy = CGF.ConvertType(T: BaseTy); |
4392 | Addr = Addr.withElementType(ElemTy: NewTy); |
4393 | |
4394 | // Note that VLA pointers are always decayed, so we don't need to do |
4395 | // anything here. |
4396 | if (!BaseTy->isVariableArrayType()) { |
4397 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4398 | "Expected pointer to array" ); |
4399 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, Index: 0, Name: "arraydecay" ); |
4400 | } |
4401 | |
4402 | return Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: ElTy)); |
4403 | } |
4404 | LValueBaseInfo TypeBaseInfo; |
4405 | TBAAAccessInfo TypeTBAAInfo; |
4406 | CharUnits Align = |
4407 | CGF.CGM.getNaturalTypeAlignment(T: ElTy, BaseInfo: &TypeBaseInfo, TBAAInfo: &TypeTBAAInfo); |
4408 | BaseInfo.mergeForCast(Info: TypeBaseInfo); |
4409 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(SourceInfo: TBAAInfo, TargetInfo: TypeTBAAInfo); |
4410 | return Address(CGF.Builder.CreateLoad(Addr: BaseLVal.getAddress()), |
4411 | CGF.ConvertTypeForMem(T: ElTy), Align); |
4412 | } |
4413 | return CGF.EmitPointerWithAlignment(E: Base, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4414 | } |
4415 | |
4416 | LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E, |
4417 | bool IsLowerBound) { |
4418 | |
4419 | assert(!E->isOpenACCArraySection() && |
4420 | "OpenACC Array section codegen not implemented" ); |
4421 | |
4422 | QualType BaseTy = ArraySectionExpr::getBaseOriginalType(Base: E->getBase()); |
4423 | QualType ResultExprTy; |
4424 | if (auto *AT = getContext().getAsArrayType(T: BaseTy)) |
4425 | ResultExprTy = AT->getElementType(); |
4426 | else |
4427 | ResultExprTy = BaseTy->getPointeeType(); |
4428 | llvm::Value *Idx = nullptr; |
4429 | if (IsLowerBound || E->getColonLocFirst().isInvalid()) { |
4430 | // Requesting lower bound or upper bound, but without provided length and |
4431 | // without ':' symbol for the default length -> length = 1. |
4432 | // Idx = LowerBound ?: 0; |
4433 | if (auto *LowerBound = E->getLowerBound()) { |
4434 | Idx = Builder.CreateIntCast( |
4435 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4436 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()); |
4437 | } else |
4438 | Idx = llvm::ConstantInt::getNullValue(Ty: IntPtrTy); |
4439 | } else { |
4440 | // Try to emit length or lower bound as constant. If this is possible, 1 |
4441 | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
4442 | // IR (LB + Len) - 1. |
4443 | auto &C = CGM.getContext(); |
4444 | auto *Length = E->getLength(); |
4445 | llvm::APSInt ConstLength; |
4446 | if (Length) { |
4447 | // Idx = LowerBound + Length - 1; |
4448 | if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(Ctx: C)) { |
4449 | ConstLength = CL->zextOrTrunc(width: PointerWidthInBits); |
4450 | Length = nullptr; |
4451 | } |
4452 | auto *LowerBound = E->getLowerBound(); |
4453 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
4454 | if (LowerBound) { |
4455 | if (std::optional<llvm::APSInt> LB = |
4456 | LowerBound->getIntegerConstantExpr(Ctx: C)) { |
4457 | ConstLowerBound = LB->zextOrTrunc(width: PointerWidthInBits); |
4458 | LowerBound = nullptr; |
4459 | } |
4460 | } |
4461 | if (!Length) |
4462 | --ConstLength; |
4463 | else if (!LowerBound) |
4464 | --ConstLowerBound; |
4465 | |
4466 | if (Length || LowerBound) { |
4467 | auto *LowerBoundVal = |
4468 | LowerBound |
4469 | ? Builder.CreateIntCast( |
4470 | V: EmitScalarExpr(E: LowerBound), DestTy: IntPtrTy, |
4471 | isSigned: LowerBound->getType()->hasSignedIntegerRepresentation()) |
4472 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLowerBound); |
4473 | auto *LengthVal = |
4474 | Length |
4475 | ? Builder.CreateIntCast( |
4476 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4477 | isSigned: Length->getType()->hasSignedIntegerRepresentation()) |
4478 | : llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4479 | Idx = Builder.CreateAdd(LHS: LowerBoundVal, RHS: LengthVal, Name: "lb_add_len" , |
4480 | /*HasNUW=*/false, |
4481 | HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4482 | if (Length && LowerBound) { |
4483 | Idx = Builder.CreateSub( |
4484 | LHS: Idx, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "idx_sub_1" , |
4485 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4486 | } |
4487 | } else |
4488 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength + ConstLowerBound); |
4489 | } else { |
4490 | // Idx = ArraySize - 1; |
4491 | QualType ArrayTy = BaseTy->isPointerType() |
4492 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
4493 | : BaseTy; |
4494 | if (auto *VAT = C.getAsVariableArrayType(T: ArrayTy)) { |
4495 | Length = VAT->getSizeExpr(); |
4496 | if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(Ctx: C)) { |
4497 | ConstLength = *L; |
4498 | Length = nullptr; |
4499 | } |
4500 | } else { |
4501 | auto *CAT = C.getAsConstantArrayType(T: ArrayTy); |
4502 | assert(CAT && "unexpected type for array initializer" ); |
4503 | ConstLength = CAT->getSize(); |
4504 | } |
4505 | if (Length) { |
4506 | auto *LengthVal = Builder.CreateIntCast( |
4507 | V: EmitScalarExpr(E: Length), DestTy: IntPtrTy, |
4508 | isSigned: Length->getType()->hasSignedIntegerRepresentation()); |
4509 | Idx = Builder.CreateSub( |
4510 | LHS: LengthVal, RHS: llvm::ConstantInt::get(Ty: IntPtrTy, /*V=*/1), Name: "len_sub_1" , |
4511 | /*HasNUW=*/false, HasNSW: !getLangOpts().isSignedOverflowDefined()); |
4512 | } else { |
4513 | ConstLength = ConstLength.zextOrTrunc(width: PointerWidthInBits); |
4514 | --ConstLength; |
4515 | Idx = llvm::ConstantInt::get(Ty: IntPtrTy, V: ConstLength); |
4516 | } |
4517 | } |
4518 | } |
4519 | assert(Idx); |
4520 | |
4521 | Address EltPtr = Address::invalid(); |
4522 | LValueBaseInfo BaseInfo; |
4523 | TBAAAccessInfo TBAAInfo; |
4524 | if (auto *VLA = getContext().getAsVariableArrayType(T: ResultExprTy)) { |
4525 | // The base must be a pointer, which is not an aggregate. Emit |
4526 | // it. It needs to be emitted first in case it's what captures |
4527 | // the VLA bounds. |
4528 | Address Base = |
4529 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, |
4530 | BaseTy, ElTy: VLA->getElementType(), IsLowerBound); |
4531 | // The element count here is the total number of non-VLA elements. |
4532 | llvm::Value *NumElements = getVLASize(vla: VLA).NumElts; |
4533 | |
4534 | // Effectively, the multiply by the VLA size is part of the GEP. |
4535 | // GEP indexes are signed, and scaling an index isn't permitted to |
4536 | // signed-overflow, so we use the same semantics for our explicit |
4537 | // multiply. We suppress this if overflow is not undefined behavior. |
4538 | if (getLangOpts().isSignedOverflowDefined()) |
4539 | Idx = Builder.CreateMul(LHS: Idx, RHS: NumElements); |
4540 | else |
4541 | Idx = Builder.CreateNSWMul(LHS: Idx, RHS: NumElements); |
4542 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: VLA->getElementType(), |
4543 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4544 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4545 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E: E->getBase())) { |
4546 | // If this is A[i] where A is an array, the frontend will have decayed the |
4547 | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4548 | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4549 | // "gep x, i" here. Emit one "gep A, 0, i". |
4550 | assert(Array->getType()->isArrayType() && |
4551 | "Array to pointer decay must have array source type!" ); |
4552 | LValue ArrayLV; |
4553 | // For simple multidimensional array indexing, set the 'accessed' flag for |
4554 | // better bounds-checking of the base expression. |
4555 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: Array)) |
4556 | ArrayLV = EmitArraySubscriptExpr(E: ASE, /*Accessed*/ true); |
4557 | else |
4558 | ArrayLV = EmitLValue(E: Array); |
4559 | |
4560 | // Propagate the alignment from the array itself to the result. |
4561 | EltPtr = emitArraySubscriptGEP( |
4562 | CGF&: *this, addr: ArrayLV.getAddress(), indices: {CGM.getSize(numChars: CharUnits::Zero()), Idx}, |
4563 | eltType: ResultExprTy, inbounds: !getLangOpts().isSignedOverflowDefined(), |
4564 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4565 | BaseInfo = ArrayLV.getBaseInfo(); |
4566 | TBAAInfo = CGM.getTBAAInfoForSubobject(Base: ArrayLV, AccessType: ResultExprTy); |
4567 | } else { |
4568 | Address Base = |
4569 | emitOMPArraySectionBase(CGF&: *this, Base: E->getBase(), BaseInfo, TBAAInfo, BaseTy, |
4570 | ElTy: ResultExprTy, IsLowerBound); |
4571 | EltPtr = emitArraySubscriptGEP(CGF&: *this, addr: Base, indices: Idx, eltType: ResultExprTy, |
4572 | inbounds: !getLangOpts().isSignedOverflowDefined(), |
4573 | /*signedIndices=*/false, loc: E->getExprLoc()); |
4574 | } |
4575 | |
4576 | return MakeAddrLValue(Addr: EltPtr, T: ResultExprTy, BaseInfo, TBAAInfo); |
4577 | } |
4578 | |
4579 | LValue CodeGenFunction:: |
4580 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4581 | // Emit the base vector as an l-value. |
4582 | LValue Base; |
4583 | |
4584 | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4585 | if (E->isArrow()) { |
4586 | // If it is a pointer to a vector, emit the address and form an lvalue with |
4587 | // it. |
4588 | LValueBaseInfo BaseInfo; |
4589 | TBAAAccessInfo TBAAInfo; |
4590 | Address Ptr = EmitPointerWithAlignment(E: E->getBase(), BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4591 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4592 | Base = MakeAddrLValue(Addr: Ptr, T: PT->getPointeeType(), BaseInfo, TBAAInfo); |
4593 | Base.getQuals().removeObjCGCAttr(); |
4594 | } else if (E->getBase()->isGLValue()) { |
4595 | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4596 | // emit the base as an lvalue. |
4597 | assert(E->getBase()->getType()->isVectorType()); |
4598 | Base = EmitLValue(E: E->getBase()); |
4599 | } else { |
4600 | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4601 | assert(E->getBase()->getType()->isVectorType() && |
4602 | "Result must be a vector" ); |
4603 | llvm::Value *Vec = EmitScalarExpr(E: E->getBase()); |
4604 | |
4605 | // Store the vector to memory (because LValue wants an address). |
4606 | Address VecMem = CreateMemTemp(Ty: E->getBase()->getType()); |
4607 | Builder.CreateStore(Val: Vec, Addr: VecMem); |
4608 | Base = MakeAddrLValue(Addr: VecMem, T: E->getBase()->getType(), |
4609 | Source: AlignmentSource::Decl); |
4610 | } |
4611 | |
4612 | QualType type = |
4613 | E->getType().withCVRQualifiers(CVR: Base.getQuals().getCVRQualifiers()); |
4614 | |
4615 | // Encode the element access list into a vector of unsigned indices. |
4616 | SmallVector<uint32_t, 4> Indices; |
4617 | E->getEncodedElementAccess(Elts&: Indices); |
4618 | |
4619 | if (Base.isSimple()) { |
4620 | llvm::Constant *CV = |
4621 | llvm::ConstantDataVector::get(Context&: getLLVMContext(), Elts: Indices); |
4622 | return LValue::MakeExtVectorElt(Addr: Base.getAddress(), Elts: CV, type, |
4623 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4624 | } |
4625 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!" ); |
4626 | |
4627 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4628 | SmallVector<llvm::Constant *, 4> CElts; |
4629 | |
4630 | for (unsigned i = 0, e = Indices.size(); i != e; ++i) |
4631 | CElts.push_back(Elt: BaseElts->getAggregateElement(Elt: Indices[i])); |
4632 | llvm::Constant *CV = llvm::ConstantVector::get(V: CElts); |
4633 | return LValue::MakeExtVectorElt(Addr: Base.getExtVectorAddress(), Elts: CV, type, |
4634 | BaseInfo: Base.getBaseInfo(), TBAAInfo: TBAAAccessInfo()); |
4635 | } |
4636 | |
4637 | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4638 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(CGF&: *this, ME: E)) { |
4639 | EmitIgnoredExpr(E: E->getBase()); |
4640 | return EmitDeclRefLValue(E: DRE); |
4641 | } |
4642 | |
4643 | Expr *BaseExpr = E->getBase(); |
4644 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4645 | LValue BaseLV; |
4646 | if (E->isArrow()) { |
4647 | LValueBaseInfo BaseInfo; |
4648 | TBAAAccessInfo TBAAInfo; |
4649 | Address Addr = EmitPointerWithAlignment(E: BaseExpr, BaseInfo: &BaseInfo, TBAAInfo: &TBAAInfo); |
4650 | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4651 | SanitizerSet SkippedChecks; |
4652 | bool IsBaseCXXThis = IsWrappedCXXThis(Obj: BaseExpr); |
4653 | if (IsBaseCXXThis) |
4654 | SkippedChecks.set(K: SanitizerKind::Alignment, Value: true); |
4655 | if (IsBaseCXXThis || isa<DeclRefExpr>(Val: BaseExpr)) |
4656 | SkippedChecks.set(K: SanitizerKind::Null, Value: true); |
4657 | EmitTypeCheck(TCK: TCK_MemberAccess, Loc: E->getExprLoc(), Addr, Type: PtrTy, |
4658 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4659 | BaseLV = MakeAddrLValue(Addr, T: PtrTy, BaseInfo, TBAAInfo); |
4660 | } else |
4661 | BaseLV = EmitCheckedLValue(E: BaseExpr, TCK: TCK_MemberAccess); |
4662 | |
4663 | NamedDecl *ND = E->getMemberDecl(); |
4664 | if (auto *Field = dyn_cast<FieldDecl>(Val: ND)) { |
4665 | LValue LV = EmitLValueForField(Base: BaseLV, Field); |
4666 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
4667 | if (getLangOpts().OpenMP) { |
4668 | // If the member was explicitly marked as nontemporal, mark it as |
4669 | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4670 | // to children as nontemporal too. |
4671 | if ((IsWrappedCXXThis(Obj: BaseExpr) && |
4672 | CGM.getOpenMPRuntime().isNontemporalDecl(VD: Field)) || |
4673 | BaseLV.isNontemporal()) |
4674 | LV.setNontemporal(/*Value=*/true); |
4675 | } |
4676 | return LV; |
4677 | } |
4678 | |
4679 | if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) |
4680 | return EmitFunctionDeclLValue(CGF&: *this, E, GD: FD); |
4681 | |
4682 | llvm_unreachable("Unhandled member declaration!" ); |
4683 | } |
4684 | |
4685 | /// Given that we are currently emitting a lambda, emit an l-value for |
4686 | /// one of its members. |
4687 | /// |
4688 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, |
4689 | llvm::Value *ThisValue) { |
4690 | bool HasExplicitObjectParameter = false; |
4691 | const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Val: CurCodeDecl); |
4692 | if (MD) { |
4693 | HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); |
4694 | assert(MD->getParent()->isLambda()); |
4695 | assert(MD->getParent() == Field->getParent()); |
4696 | } |
4697 | LValue LambdaLV; |
4698 | if (HasExplicitObjectParameter) { |
4699 | const VarDecl *D = cast<CXXMethodDecl>(Val: CurCodeDecl)->getParamDecl(i: 0); |
4700 | auto It = LocalDeclMap.find(Val: D); |
4701 | assert(It != LocalDeclMap.end() && "explicit parameter not loaded?" ); |
4702 | Address AddrOfExplicitObject = It->getSecond(); |
4703 | if (D->getType()->isReferenceType()) |
4704 | LambdaLV = EmitLoadOfReferenceLValue(RefAddr: AddrOfExplicitObject, RefTy: D->getType(), |
4705 | Source: AlignmentSource::Decl); |
4706 | else |
4707 | LambdaLV = MakeAddrLValue(Addr: AddrOfExplicitObject, |
4708 | T: D->getType().getNonReferenceType()); |
4709 | |
4710 | // Make sure we have an lvalue to the lambda itself and not a derived class. |
4711 | auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl(); |
4712 | auto *LambdaTy = cast<CXXRecordDecl>(Val: Field->getParent()); |
4713 | if (ThisTy != LambdaTy) { |
4714 | const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(Val: MD); |
4715 | Address Base = GetAddressOfBaseClass( |
4716 | Value: LambdaLV.getAddress(), Derived: ThisTy, PathBegin: BasePathArray.begin(), |
4717 | PathEnd: BasePathArray.end(), /*NullCheckValue=*/false, Loc: SourceLocation()); |
4718 | LambdaLV = MakeAddrLValue(Addr: Base, T: QualType{LambdaTy->getTypeForDecl(), 0}); |
4719 | } |
4720 | } else { |
4721 | QualType LambdaTagType = getContext().getTagDeclType(Decl: Field->getParent()); |
4722 | LambdaLV = MakeNaturalAlignAddrLValue(V: ThisValue, T: LambdaTagType); |
4723 | } |
4724 | return EmitLValueForField(Base: LambdaLV, Field); |
4725 | } |
4726 | |
4727 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
4728 | return EmitLValueForLambdaField(Field, ThisValue: CXXABIThisValue); |
4729 | } |
4730 | |
4731 | /// Get the field index in the debug info. The debug info structure/union |
4732 | /// will ignore the unnamed bitfields. |
4733 | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
4734 | unsigned FieldIndex) { |
4735 | unsigned I = 0, Skipped = 0; |
4736 | |
4737 | for (auto *F : Rec->getDefinition()->fields()) { |
4738 | if (I == FieldIndex) |
4739 | break; |
4740 | if (F->isUnnamedBitField()) |
4741 | Skipped++; |
4742 | I++; |
4743 | } |
4744 | |
4745 | return FieldIndex - Skipped; |
4746 | } |
4747 | |
4748 | /// Get the address of a zero-sized field within a record. The resulting |
4749 | /// address doesn't necessarily have the right type. |
4750 | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
4751 | const FieldDecl *Field) { |
4752 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
4753 | BitSize: CGF.getContext().getFieldOffset(FD: Field)); |
4754 | if (Offset.isZero()) |
4755 | return Base; |
4756 | Base = Base.withElementType(ElemTy: CGF.Int8Ty); |
4757 | return CGF.Builder.CreateConstInBoundsByteGEP(Addr: Base, Offset); |
4758 | } |
4759 | |
4760 | /// Drill down to the storage of a field without walking into |
4761 | /// reference types. |
4762 | /// |
4763 | /// The resulting address doesn't necessarily have the right type. |
4764 | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
4765 | const FieldDecl *field) { |
4766 | if (isEmptyFieldForLayout(Context: CGF.getContext(), FD: field)) |
4767 | return emitAddrOfZeroSizeField(CGF, Base: base, Field: field); |
4768 | |
4769 | const RecordDecl *rec = field->getParent(); |
4770 | |
4771 | unsigned idx = |
4772 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4773 | |
4774 | return CGF.Builder.CreateStructGEP(Addr: base, Index: idx, Name: field->getName()); |
4775 | } |
4776 | |
4777 | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
4778 | Address addr, const FieldDecl *field) { |
4779 | const RecordDecl *rec = field->getParent(); |
4780 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
4781 | Ty: base.getType(), Loc: rec->getLocation()); |
4782 | |
4783 | unsigned idx = |
4784 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(FD: field); |
4785 | |
4786 | return CGF.Builder.CreatePreserveStructAccessIndex( |
4787 | Addr: addr, Index: idx, FieldIndex: CGF.getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo); |
4788 | } |
4789 | |
4790 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
4791 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
4792 | if (!RD) |
4793 | return false; |
4794 | |
4795 | if (RD->isDynamicClass()) |
4796 | return true; |
4797 | |
4798 | for (const auto &Base : RD->bases()) |
4799 | if (hasAnyVptr(Type: Base.getType(), Context)) |
4800 | return true; |
4801 | |
4802 | for (const FieldDecl *Field : RD->fields()) |
4803 | if (hasAnyVptr(Type: Field->getType(), Context)) |
4804 | return true; |
4805 | |
4806 | return false; |
4807 | } |
4808 | |
4809 | LValue CodeGenFunction::EmitLValueForField(LValue base, |
4810 | const FieldDecl *field) { |
4811 | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
4812 | |
4813 | if (field->isBitField()) { |
4814 | const CGRecordLayout &RL = |
4815 | CGM.getTypes().getCGRecordLayout(field->getParent()); |
4816 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: field); |
4817 | const bool UseVolatile = isAAPCS(TargetInfo: CGM.getTarget()) && |
4818 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
4819 | Info.VolatileStorageSize != 0 && |
4820 | field->getType() |
4821 | .withCVRQualifiers(CVR: base.getVRQualifiers()) |
4822 | .isVolatileQualified(); |
4823 | Address Addr = base.getAddress(); |
4824 | unsigned Idx = RL.getLLVMFieldNo(FD: field); |
4825 | const RecordDecl *rec = field->getParent(); |
4826 | if (hasBPFPreserveStaticOffset(D: rec)) |
4827 | Addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr); |
4828 | if (!UseVolatile) { |
4829 | if (!IsInPreservedAIRegion && |
4830 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4831 | if (Idx != 0) |
4832 | // For structs, we GEP to the field that the record layout suggests. |
4833 | Addr = Builder.CreateStructGEP(Addr, Index: Idx, Name: field->getName()); |
4834 | } else { |
4835 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
4836 | Ty: getContext().getRecordType(Decl: rec), L: rec->getLocation()); |
4837 | Addr = Builder.CreatePreserveStructAccessIndex( |
4838 | Addr, Index: Idx, FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), |
4839 | DbgInfo); |
4840 | } |
4841 | } |
4842 | const unsigned SS = |
4843 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
4844 | // Get the access type. |
4845 | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), N: SS); |
4846 | Addr = Addr.withElementType(ElemTy: FieldIntTy); |
4847 | if (UseVolatile) { |
4848 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
4849 | if (VolatileOffset) |
4850 | Addr = Builder.CreateConstInBoundsGEP(Addr, Index: VolatileOffset); |
4851 | } |
4852 | |
4853 | QualType fieldType = |
4854 | field->getType().withCVRQualifiers(CVR: base.getVRQualifiers()); |
4855 | // TODO: Support TBAA for bit fields. |
4856 | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
4857 | return LValue::MakeBitfield(Addr, Info, type: fieldType, BaseInfo: FieldBaseInfo, |
4858 | TBAAInfo: TBAAAccessInfo()); |
4859 | } |
4860 | |
4861 | // Fields of may-alias structures are may-alias themselves. |
4862 | // FIXME: this should get propagated down through anonymous structs |
4863 | // and unions. |
4864 | QualType FieldType = field->getType(); |
4865 | const RecordDecl *rec = field->getParent(); |
4866 | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
4867 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: BaseAlignSource)); |
4868 | TBAAAccessInfo FieldTBAAInfo; |
4869 | if (base.getTBAAInfo().isMayAlias() || |
4870 | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { |
4871 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4872 | } else if (rec->isUnion()) { |
4873 | // TODO: Support TBAA for unions. |
4874 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4875 | } else { |
4876 | // If no base type been assigned for the base access, then try to generate |
4877 | // one for this base lvalue. |
4878 | FieldTBAAInfo = base.getTBAAInfo(); |
4879 | if (!FieldTBAAInfo.BaseType) { |
4880 | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(QTy: base.getType()); |
4881 | assert(!FieldTBAAInfo.Offset && |
4882 | "Nonzero offset for an access with no base type!" ); |
4883 | } |
4884 | |
4885 | // Adjust offset to be relative to the base type. |
4886 | const ASTRecordLayout &Layout = |
4887 | getContext().getASTRecordLayout(D: field->getParent()); |
4888 | unsigned CharWidth = getContext().getCharWidth(); |
4889 | if (FieldTBAAInfo.BaseType) |
4890 | FieldTBAAInfo.Offset += |
4891 | Layout.getFieldOffset(FieldNo: field->getFieldIndex()) / CharWidth; |
4892 | |
4893 | // Update the final access type and size. |
4894 | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(QTy: FieldType); |
4895 | FieldTBAAInfo.Size = |
4896 | getContext().getTypeSizeInChars(T: FieldType).getQuantity(); |
4897 | } |
4898 | |
4899 | Address addr = base.getAddress(); |
4900 | if (hasBPFPreserveStaticOffset(D: rec)) |
4901 | addr = wrapWithBPFPreserveStaticOffset(CGF&: *this, Addr&: addr); |
4902 | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(Val: rec)) { |
4903 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4904 | ClassDef->isDynamicClass()) { |
4905 | // Getting to any field of dynamic object requires stripping dynamic |
4906 | // information provided by invariant.group. This is because accessing |
4907 | // fields may leak the real address of dynamic object, which could result |
4908 | // in miscompilation when leaked pointer would be compared. |
4909 | auto *stripped = |
4910 | Builder.CreateStripInvariantGroup(Ptr: addr.emitRawPointer(CGF&: *this)); |
4911 | addr = Address(stripped, addr.getElementType(), addr.getAlignment()); |
4912 | } |
4913 | } |
4914 | |
4915 | unsigned RecordCVR = base.getVRQualifiers(); |
4916 | if (rec->isUnion()) { |
4917 | // For unions, there is no pointer adjustment. |
4918 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4919 | hasAnyVptr(Type: FieldType, Context: getContext())) |
4920 | // Because unions can easily skip invariant.barriers, we need to add |
4921 | // a barrier every time CXXRecord field with vptr is referenced. |
4922 | addr = Builder.CreateLaunderInvariantGroup(Addr: addr); |
4923 | |
4924 | if (IsInPreservedAIRegion || |
4925 | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4926 | // Remember the original union field index |
4927 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(Ty: base.getType(), |
4928 | Loc: rec->getLocation()); |
4929 | addr = |
4930 | Address(Builder.CreatePreserveUnionAccessIndex( |
4931 | Base: addr.emitRawPointer(CGF&: *this), |
4932 | FieldIndex: getDebugInfoFIndex(Rec: rec, FieldIndex: field->getFieldIndex()), DbgInfo), |
4933 | addr.getElementType(), addr.getAlignment()); |
4934 | } |
4935 | |
4936 | if (FieldType->isReferenceType()) |
4937 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4938 | } else { |
4939 | if (!IsInPreservedAIRegion && |
4940 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) |
4941 | // For structs, we GEP to the field that the record layout suggests. |
4942 | addr = emitAddrOfFieldStorage(CGF&: *this, base: addr, field); |
4943 | else |
4944 | // Remember the original struct field index |
4945 | addr = emitPreserveStructAccess(CGF&: *this, base, addr, field); |
4946 | } |
4947 | |
4948 | // If this is a reference field, load the reference right now. |
4949 | if (FieldType->isReferenceType()) { |
4950 | LValue RefLVal = |
4951 | MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4952 | if (RecordCVR & Qualifiers::Volatile) |
4953 | RefLVal.getQuals().addVolatile(); |
4954 | addr = EmitLoadOfReference(RefLVal, PointeeBaseInfo: &FieldBaseInfo, PointeeTBAAInfo: &FieldTBAAInfo); |
4955 | |
4956 | // Qualifiers on the struct don't apply to the referencee. |
4957 | RecordCVR = 0; |
4958 | FieldType = FieldType->getPointeeType(); |
4959 | } |
4960 | |
4961 | // Make sure that the address is pointing to the right type. This is critical |
4962 | // for both unions and structs. |
4963 | addr = addr.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: FieldType)); |
4964 | |
4965 | if (field->hasAttr<AnnotateAttr>()) |
4966 | addr = EmitFieldAnnotations(D: field, V: addr); |
4967 | |
4968 | LValue LV = MakeAddrLValue(Addr: addr, T: FieldType, BaseInfo: FieldBaseInfo, TBAAInfo: FieldTBAAInfo); |
4969 | LV.getQuals().addCVRQualifiers(mask: RecordCVR); |
4970 | |
4971 | // __weak attribute on a field is ignored. |
4972 | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
4973 | LV.getQuals().removeObjCGCAttr(); |
4974 | |
4975 | return LV; |
4976 | } |
4977 | |
4978 | LValue |
4979 | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
4980 | const FieldDecl *Field) { |
4981 | QualType FieldType = Field->getType(); |
4982 | |
4983 | if (!FieldType->isReferenceType()) |
4984 | return EmitLValueForField(base: Base, field: Field); |
4985 | |
4986 | Address V = emitAddrOfFieldStorage(CGF&: *this, base: Base.getAddress(), field: Field); |
4987 | |
4988 | // Make sure that the address is pointing to the right type. |
4989 | llvm::Type *llvmType = ConvertTypeForMem(T: FieldType); |
4990 | V = V.withElementType(ElemTy: llvmType); |
4991 | |
4992 | // TODO: Generate TBAA information that describes this access as a structure |
4993 | // member access and not just an access to an object of the field's type. This |
4994 | // should be similar to what we do in EmitLValueForField(). |
4995 | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
4996 | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
4997 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(Source: FieldAlignSource)); |
4998 | return MakeAddrLValue(Addr: V, T: FieldType, BaseInfo: FieldBaseInfo, |
4999 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base, AccessType: FieldType)); |
5000 | } |
5001 | |
5002 | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
5003 | if (E->isFileScope()) { |
5004 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
5005 | return MakeAddrLValue(Addr: GlobalPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5006 | } |
5007 | if (E->getType()->isVariablyModifiedType()) |
5008 | // make sure to emit the VLA size. |
5009 | EmitVariablyModifiedType(Ty: E->getType()); |
5010 | |
5011 | Address DeclPtr = CreateMemTemp(Ty: E->getType(), Name: ".compoundliteral" ); |
5012 | const Expr *InitExpr = E->getInitializer(); |
5013 | LValue Result = MakeAddrLValue(Addr: DeclPtr, T: E->getType(), Source: AlignmentSource::Decl); |
5014 | |
5015 | EmitAnyExprToMem(E: InitExpr, Location: DeclPtr, Quals: E->getType().getQualifiers(), |
5016 | /*Init*/ IsInit: true); |
5017 | |
5018 | // Block-scope compound literals are destroyed at the end of the enclosing |
5019 | // scope in C. |
5020 | if (!getLangOpts().CPlusPlus) |
5021 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
5022 | pushLifetimeExtendedDestroy(kind: getCleanupKind(kind: DtorKind), addr: DeclPtr, |
5023 | type: E->getType(), destroyer: getDestroyer(destructionKind: DtorKind), |
5024 | useEHCleanupForArray: DtorKind & EHCleanup); |
5025 | |
5026 | return Result; |
5027 | } |
5028 | |
5029 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
5030 | if (!E->isGLValue()) |
5031 | // Initializing an aggregate temporary in C++11: T{...}. |
5032 | return EmitAggExprToLValue(E); |
5033 | |
5034 | // An lvalue initializer list must be initializing a reference. |
5035 | assert(E->isTransparent() && "non-transparent glvalue init list" ); |
5036 | return EmitLValue(E: E->getInit(Init: 0)); |
5037 | } |
5038 | |
5039 | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
5040 | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
5041 | /// LValue is returned and the current block has been terminated. |
5042 | static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
5043 | const Expr *Operand) { |
5044 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Operand->IgnoreParens())) { |
5045 | CGF.EmitCXXThrowExpr(E: ThrowExpr, /*KeepInsertionPoint*/false); |
5046 | return std::nullopt; |
5047 | } |
5048 | |
5049 | return CGF.EmitLValue(E: Operand); |
5050 | } |
5051 | |
5052 | namespace { |
5053 | // Handle the case where the condition is a constant evaluatable simple integer, |
5054 | // which means we don't have to separately handle the true/false blocks. |
5055 | std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( |
5056 | CodeGenFunction &CGF, const AbstractConditionalOperator *E) { |
5057 | const Expr *condExpr = E->getCond(); |
5058 | bool CondExprBool; |
5059 | if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) { |
5060 | const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); |
5061 | if (!CondExprBool) |
5062 | std::swap(a&: Live, b&: Dead); |
5063 | |
5064 | if (!CGF.ContainsLabel(S: Dead)) { |
5065 | // If the true case is live, we need to track its region. |
5066 | if (CondExprBool) |
5067 | CGF.incrementProfileCounter(S: E); |
5068 | // If a throw expression we emit it and return an undefined lvalue |
5069 | // because it can't be used. |
5070 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Val: Live->IgnoreParens())) { |
5071 | CGF.EmitCXXThrowExpr(E: ThrowExpr); |
5072 | llvm::Type *ElemTy = CGF.ConvertType(T: Dead->getType()); |
5073 | llvm::Type *Ty = CGF.UnqualPtrTy; |
5074 | return CGF.MakeAddrLValue( |
5075 | Addr: Address(llvm::UndefValue::get(T: Ty), ElemTy, CharUnits::One()), |
5076 | T: Dead->getType()); |
5077 | } |
5078 | return CGF.EmitLValue(E: Live); |
5079 | } |
5080 | } |
5081 | return std::nullopt; |
5082 | } |
5083 | struct ConditionalInfo { |
5084 | llvm::BasicBlock *lhsBlock, *rhsBlock; |
5085 | std::optional<LValue> LHS, RHS; |
5086 | }; |
5087 | |
5088 | // Create and generate the 3 blocks for a conditional operator. |
5089 | // Leaves the 'current block' in the continuation basic block. |
5090 | template<typename FuncTy> |
5091 | ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, |
5092 | const AbstractConditionalOperator *E, |
5093 | const FuncTy &BranchGenFunc) { |
5094 | ConditionalInfo Info{.lhsBlock: CGF.createBasicBlock(name: "cond.true" ), |
5095 | .rhsBlock: CGF.createBasicBlock(name: "cond.false" ), .LHS: std::nullopt, |
5096 | .RHS: std::nullopt}; |
5097 | llvm::BasicBlock *endBlock = CGF.createBasicBlock(name: "cond.end" ); |
5098 | |
5099 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
5100 | CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: Info.lhsBlock, FalseBlock: Info.rhsBlock, |
5101 | TrueCount: CGF.getProfileCount(S: E)); |
5102 | |
5103 | // Any temporaries created here are conditional. |
5104 | CGF.EmitBlock(BB: Info.lhsBlock); |
5105 | CGF.incrementProfileCounter(S: E); |
5106 | eval.begin(CGF); |
5107 | Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); |
5108 | eval.end(CGF); |
5109 | Info.lhsBlock = CGF.Builder.GetInsertBlock(); |
5110 | |
5111 | if (Info.LHS) |
5112 | CGF.Builder.CreateBr(Dest: endBlock); |
5113 | |
5114 | // Any temporaries created here are conditional. |
5115 | CGF.EmitBlock(BB: Info.rhsBlock); |
5116 | eval.begin(CGF); |
5117 | Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); |
5118 | eval.end(CGF); |
5119 | Info.rhsBlock = CGF.Builder.GetInsertBlock(); |
5120 | CGF.EmitBlock(BB: endBlock); |
5121 | |
5122 | return Info; |
5123 | } |
5124 | } // namespace |
5125 | |
5126 | void CodeGenFunction::EmitIgnoredConditionalOperator( |
5127 | const AbstractConditionalOperator *E) { |
5128 | if (!E->isGLValue()) { |
5129 | // ?: here should be an aggregate. |
5130 | assert(hasAggregateEvaluationKind(E->getType()) && |
5131 | "Unexpected conditional operator!" ); |
5132 | return (void)EmitAggExprToLValue(E); |
5133 | } |
5134 | |
5135 | OpaqueValueMapping binding(*this, E); |
5136 | if (HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E)) |
5137 | return; |
5138 | |
5139 | EmitConditionalBlocks(CGF&: *this, E, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5140 | CGF.EmitIgnoredExpr(E); |
5141 | return LValue{}; |
5142 | }); |
5143 | } |
5144 | LValue CodeGenFunction::EmitConditionalOperatorLValue( |
5145 | const AbstractConditionalOperator *expr) { |
5146 | if (!expr->isGLValue()) { |
5147 | // ?: here should be an aggregate. |
5148 | assert(hasAggregateEvaluationKind(expr->getType()) && |
5149 | "Unexpected conditional operator!" ); |
5150 | return EmitAggExprToLValue(E: expr); |
5151 | } |
5152 | |
5153 | OpaqueValueMapping binding(*this, expr); |
5154 | if (std::optional<LValue> Res = |
5155 | HandleConditionalOperatorLValueSimpleCase(CGF&: *this, E: expr)) |
5156 | return *Res; |
5157 | |
5158 | ConditionalInfo Info = EmitConditionalBlocks( |
5159 | CGF&: *this, E: expr, BranchGenFunc: [](CodeGenFunction &CGF, const Expr *E) { |
5160 | return EmitLValueOrThrowExpression(CGF, Operand: E); |
5161 | }); |
5162 | |
5163 | if ((Info.LHS && !Info.LHS->isSimple()) || |
5164 | (Info.RHS && !Info.RHS->isSimple())) |
5165 | return EmitUnsupportedLValue(E: expr, Name: "conditional operator" ); |
5166 | |
5167 | if (Info.LHS && Info.RHS) { |
5168 | Address lhsAddr = Info.LHS->getAddress(); |
5169 | Address rhsAddr = Info.RHS->getAddress(); |
5170 | Address result = mergeAddressesInConditionalExpr( |
5171 | LHS: lhsAddr, RHS: rhsAddr, LHSBlock: Info.lhsBlock, RHSBlock: Info.rhsBlock, |
5172 | MergeBlock: Builder.GetInsertBlock(), MergedType: expr->getType()); |
5173 | AlignmentSource alignSource = |
5174 | std::max(a: Info.LHS->getBaseInfo().getAlignmentSource(), |
5175 | b: Info.RHS->getBaseInfo().getAlignmentSource()); |
5176 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
5177 | InfoA: Info.LHS->getTBAAInfo(), InfoB: Info.RHS->getTBAAInfo()); |
5178 | return MakeAddrLValue(Addr: result, T: expr->getType(), BaseInfo: LValueBaseInfo(alignSource), |
5179 | TBAAInfo); |
5180 | } else { |
5181 | assert((Info.LHS || Info.RHS) && |
5182 | "both operands of glvalue conditional are throw-expressions?" ); |
5183 | return Info.LHS ? *Info.LHS : *Info.RHS; |
5184 | } |
5185 | } |
5186 | |
5187 | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
5188 | /// type. If the cast is to a reference, we can have the usual lvalue result, |
5189 | /// otherwise if a cast is needed by the code generator in an lvalue context, |
5190 | /// then it must mean that we need the address of an aggregate in order to |
5191 | /// access one of its members. This can happen for all the reasons that casts |
5192 | /// are permitted with aggregate result, including noop aggregate casts, and |
5193 | /// cast from scalar to union. |
5194 | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
5195 | switch (E->getCastKind()) { |
5196 | case CK_ToVoid: |
5197 | case CK_BitCast: |
5198 | case CK_LValueToRValueBitCast: |
5199 | case CK_ArrayToPointerDecay: |
5200 | case CK_FunctionToPointerDecay: |
5201 | case CK_NullToMemberPointer: |
5202 | case CK_NullToPointer: |
5203 | case CK_IntegralToPointer: |
5204 | case CK_PointerToIntegral: |
5205 | case CK_PointerToBoolean: |
5206 | case CK_IntegralCast: |
5207 | case CK_BooleanToSignedIntegral: |
5208 | case CK_IntegralToBoolean: |
5209 | case CK_IntegralToFloating: |
5210 | case CK_FloatingToIntegral: |
5211 | case CK_FloatingToBoolean: |
5212 | case CK_FloatingCast: |
5213 | case CK_FloatingRealToComplex: |
5214 | case CK_FloatingComplexToReal: |
5215 | case CK_FloatingComplexToBoolean: |
5216 | case CK_FloatingComplexCast: |
5217 | case CK_FloatingComplexToIntegralComplex: |
5218 | case CK_IntegralRealToComplex: |
5219 | case CK_IntegralComplexToReal: |
5220 | case CK_IntegralComplexToBoolean: |
5221 | case CK_IntegralComplexCast: |
5222 | case CK_IntegralComplexToFloatingComplex: |
5223 | case CK_DerivedToBaseMemberPointer: |
5224 | case CK_BaseToDerivedMemberPointer: |
5225 | case CK_MemberPointerToBoolean: |
5226 | case CK_ReinterpretMemberPointer: |
5227 | case CK_AnyPointerToBlockPointerCast: |
5228 | case CK_ARCProduceObject: |
5229 | case CK_ARCConsumeObject: |
5230 | case CK_ARCReclaimReturnedObject: |
5231 | case CK_ARCExtendBlockObject: |
5232 | case CK_CopyAndAutoreleaseBlockObject: |
5233 | case CK_IntToOCLSampler: |
5234 | case CK_FloatingToFixedPoint: |
5235 | case CK_FixedPointToFloating: |
5236 | case CK_FixedPointCast: |
5237 | case CK_FixedPointToBoolean: |
5238 | case CK_FixedPointToIntegral: |
5239 | case CK_IntegralToFixedPoint: |
5240 | case CK_MatrixCast: |
5241 | case CK_HLSLVectorTruncation: |
5242 | case CK_HLSLArrayRValue: |
5243 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5244 | |
5245 | case CK_Dependent: |
5246 | llvm_unreachable("dependent cast kind in IR gen!" ); |
5247 | |
5248 | case CK_BuiltinFnToFnPtr: |
5249 | llvm_unreachable("builtin functions are handled elsewhere" ); |
5250 | |
5251 | // These are never l-values; just use the aggregate emission code. |
5252 | case CK_NonAtomicToAtomic: |
5253 | case CK_AtomicToNonAtomic: |
5254 | return EmitAggExprToLValue(E); |
5255 | |
5256 | case CK_Dynamic: { |
5257 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5258 | Address V = LV.getAddress(); |
5259 | const auto *DCE = cast<CXXDynamicCastExpr>(Val: E); |
5260 | return MakeNaturalAlignRawAddrLValue(V: EmitDynamicCast(V, DCE), T: E->getType()); |
5261 | } |
5262 | |
5263 | case CK_ConstructorConversion: |
5264 | case CK_UserDefinedConversion: |
5265 | case CK_CPointerToObjCPointerCast: |
5266 | case CK_BlockPointerToObjCPointerCast: |
5267 | case CK_LValueToRValue: |
5268 | return EmitLValue(E: E->getSubExpr()); |
5269 | |
5270 | case CK_NoOp: { |
5271 | // CK_NoOp can model a qualification conversion, which can remove an array |
5272 | // bound and change the IR type. |
5273 | // FIXME: Once pointee types are removed from IR, remove this. |
5274 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5275 | // Propagate the volatile qualifer to LValue, if exist in E. |
5276 | if (E->changesVolatileQualification()) |
5277 | LV.getQuals() = E->getType().getQualifiers(); |
5278 | if (LV.isSimple()) { |
5279 | Address V = LV.getAddress(); |
5280 | if (V.isValid()) { |
5281 | llvm::Type *T = ConvertTypeForMem(T: E->getType()); |
5282 | if (V.getElementType() != T) |
5283 | LV.setAddress(V.withElementType(ElemTy: T)); |
5284 | } |
5285 | } |
5286 | return LV; |
5287 | } |
5288 | |
5289 | case CK_UncheckedDerivedToBase: |
5290 | case CK_DerivedToBase: { |
5291 | const auto *DerivedClassTy = |
5292 | E->getSubExpr()->getType()->castAs<RecordType>(); |
5293 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5294 | |
5295 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5296 | Address This = LV.getAddress(); |
5297 | |
5298 | // Perform the derived-to-base conversion |
5299 | Address Base = GetAddressOfBaseClass( |
5300 | Value: This, Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5301 | /*NullCheckValue=*/false, Loc: E->getExprLoc()); |
5302 | |
5303 | // TODO: Support accesses to members of base classes in TBAA. For now, we |
5304 | // conservatively pretend that the complete object is of the base class |
5305 | // type. |
5306 | return MakeAddrLValue(Addr: Base, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5307 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5308 | } |
5309 | case CK_ToUnion: |
5310 | return EmitAggExprToLValue(E); |
5311 | case CK_BaseToDerived: { |
5312 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
5313 | auto *DerivedClassDecl = cast<CXXRecordDecl>(Val: DerivedClassTy->getDecl()); |
5314 | |
5315 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5316 | |
5317 | // Perform the base-to-derived conversion |
5318 | Address Derived = GetAddressOfDerivedClass( |
5319 | Value: LV.getAddress(), Derived: DerivedClassDecl, PathBegin: E->path_begin(), PathEnd: E->path_end(), |
5320 | /*NullCheckValue=*/false); |
5321 | |
5322 | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
5323 | // performed and the object is not of the derived type. |
5324 | if (sanitizePerformTypeCheck()) |
5325 | EmitTypeCheck(TCK: TCK_DowncastReference, Loc: E->getExprLoc(), Addr: Derived, |
5326 | Type: E->getType()); |
5327 | |
5328 | if (SanOpts.has(K: SanitizerKind::CFIDerivedCast)) |
5329 | EmitVTablePtrCheckForCast(T: E->getType(), Derived, |
5330 | /*MayBeNull=*/false, TCK: CFITCK_DerivedCast, |
5331 | Loc: E->getBeginLoc()); |
5332 | |
5333 | return MakeAddrLValue(Addr: Derived, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5334 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5335 | } |
5336 | case CK_LValueBitCast: { |
5337 | // This must be a reinterpret_cast (or c-style equivalent). |
5338 | const auto *CE = cast<ExplicitCastExpr>(Val: E); |
5339 | |
5340 | CGM.EmitExplicitCastExprType(E: CE, CGF: this); |
5341 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5342 | Address V = LV.getAddress().withElementType( |
5343 | ElemTy: ConvertTypeForMem(T: CE->getTypeAsWritten()->getPointeeType())); |
5344 | |
5345 | if (SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) |
5346 | EmitVTablePtrCheckForCast(T: E->getType(), Derived: V, |
5347 | /*MayBeNull=*/false, TCK: CFITCK_UnrelatedCast, |
5348 | Loc: E->getBeginLoc()); |
5349 | |
5350 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5351 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5352 | } |
5353 | case CK_AddressSpaceConversion: { |
5354 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5355 | QualType DestTy = getContext().getPointerType(T: E->getType()); |
5356 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
5357 | CGF&: *this, V: LV.getPointer(CGF&: *this), |
5358 | SrcAddr: E->getSubExpr()->getType().getAddressSpace(), |
5359 | DestAddr: E->getType().getAddressSpace(), DestTy: ConvertType(T: DestTy)); |
5360 | return MakeAddrLValue(Addr: Address(V, ConvertTypeForMem(T: E->getType()), |
5361 | LV.getAddress().getAlignment()), |
5362 | T: E->getType(), BaseInfo: LV.getBaseInfo(), TBAAInfo: LV.getTBAAInfo()); |
5363 | } |
5364 | case CK_ObjCObjectLValueCast: { |
5365 | LValue LV = EmitLValue(E: E->getSubExpr()); |
5366 | Address V = LV.getAddress().withElementType(ElemTy: ConvertType(T: E->getType())); |
5367 | return MakeAddrLValue(Addr: V, T: E->getType(), BaseInfo: LV.getBaseInfo(), |
5368 | TBAAInfo: CGM.getTBAAInfoForSubobject(Base: LV, AccessType: E->getType())); |
5369 | } |
5370 | case CK_ZeroToOCLOpaqueType: |
5371 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid" ); |
5372 | |
5373 | case CK_VectorSplat: { |
5374 | // LValue results of vector splats are only supported in HLSL. |
5375 | if (!getLangOpts().HLSL) |
5376 | return EmitUnsupportedLValue(E, Name: "unexpected cast lvalue" ); |
5377 | return EmitLValue(E: E->getSubExpr()); |
5378 | } |
5379 | } |
5380 | |
5381 | llvm_unreachable("Unhandled lvalue cast kind?" ); |
5382 | } |
5383 | |
5384 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
5385 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
5386 | return getOrCreateOpaqueLValueMapping(e); |
5387 | } |
5388 | |
5389 | LValue |
5390 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
5391 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
5392 | |
5393 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
5394 | it = OpaqueLValues.find(Val: e); |
5395 | |
5396 | if (it != OpaqueLValues.end()) |
5397 | return it->second; |
5398 | |
5399 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted" ); |
5400 | return EmitLValue(E: e->getSourceExpr()); |
5401 | } |
5402 | |
5403 | RValue |
5404 | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
5405 | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
5406 | |
5407 | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
5408 | it = OpaqueRValues.find(Val: e); |
5409 | |
5410 | if (it != OpaqueRValues.end()) |
5411 | return it->second; |
5412 | |
5413 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted" ); |
5414 | return EmitAnyExpr(E: e->getSourceExpr()); |
5415 | } |
5416 | |
5417 | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
5418 | const FieldDecl *FD, |
5419 | SourceLocation Loc) { |
5420 | QualType FT = FD->getType(); |
5421 | LValue FieldLV = EmitLValueForField(base: LV, field: FD); |
5422 | switch (getEvaluationKind(T: FT)) { |
5423 | case TEK_Complex: |
5424 | return RValue::getComplex(C: EmitLoadOfComplex(src: FieldLV, loc: Loc)); |
5425 | case TEK_Aggregate: |
5426 | return FieldLV.asAggregateRValue(); |
5427 | case TEK_Scalar: |
5428 | // This routine is used to load fields one-by-one to perform a copy, so |
5429 | // don't load reference fields. |
5430 | if (FD->getType()->isReferenceType()) |
5431 | return RValue::get(V: FieldLV.getPointer(CGF&: *this)); |
5432 | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
5433 | // primitive load. |
5434 | if (FieldLV.isBitField()) |
5435 | return EmitLoadOfLValue(LV: FieldLV, Loc); |
5436 | return RValue::get(V: EmitLoadOfScalar(lvalue: FieldLV, Loc)); |
5437 | } |
5438 | llvm_unreachable("bad evaluation kind" ); |
5439 | } |
5440 | |
5441 | //===--------------------------------------------------------------------===// |
5442 | // Expression Emission |
5443 | //===--------------------------------------------------------------------===// |
5444 | |
5445 | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
5446 | ReturnValueSlot ReturnValue) { |
5447 | // Builtins never have block type. |
5448 | if (E->getCallee()->getType()->isBlockPointerType()) |
5449 | return EmitBlockCallExpr(E, ReturnValue); |
5450 | |
5451 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: E)) |
5452 | return EmitCXXMemberCallExpr(E: CE, ReturnValue); |
5453 | |
5454 | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(Val: E)) |
5455 | return EmitCUDAKernelCallExpr(E: CE, ReturnValue); |
5456 | |
5457 | // A CXXOperatorCallExpr is created even for explicit object methods, but |
5458 | // these should be treated like static function call. |
5459 | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(Val: E)) |
5460 | if (const auto *MD = |
5461 | dyn_cast_if_present<CXXMethodDecl>(Val: CE->getCalleeDecl()); |
5462 | MD && MD->isImplicitObjectMemberFunction()) |
5463 | return EmitCXXOperatorMemberCallExpr(E: CE, MD, ReturnValue); |
5464 | |
5465 | CGCallee callee = EmitCallee(E: E->getCallee()); |
5466 | |
5467 | if (callee.isBuiltin()) { |
5468 | return EmitBuiltinExpr(GD: callee.getBuiltinDecl(), BuiltinID: callee.getBuiltinID(), |
5469 | E, ReturnValue); |
5470 | } |
5471 | |
5472 | if (callee.isPseudoDestructor()) { |
5473 | return EmitCXXPseudoDestructorExpr(E: callee.getPseudoDestructorExpr()); |
5474 | } |
5475 | |
5476 | return EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue); |
5477 | } |
5478 | |
5479 | /// Emit a CallExpr without considering whether it might be a subclass. |
5480 | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
5481 | ReturnValueSlot ReturnValue) { |
5482 | CGCallee Callee = EmitCallee(E: E->getCallee()); |
5483 | return EmitCall(FnType: E->getCallee()->getType(), Callee, E, ReturnValue); |
5484 | } |
5485 | |
5486 | // Detect the unusual situation where an inline version is shadowed by a |
5487 | // non-inline version. In that case we should pick the external one |
5488 | // everywhere. That's GCC behavior too. |
5489 | static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { |
5490 | for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) |
5491 | if (!PD->isInlineBuiltinDeclaration()) |
5492 | return false; |
5493 | return true; |
5494 | } |
5495 | |
5496 | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
5497 | const FunctionDecl *FD = cast<FunctionDecl>(Val: GD.getDecl()); |
5498 | |
5499 | if (auto builtinID = FD->getBuiltinID()) { |
5500 | std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); |
5501 | std::string NoBuiltins = "no-builtins" ; |
5502 | |
5503 | StringRef Ident = CGF.CGM.getMangledName(GD); |
5504 | std::string FDInlineName = (Ident + ".inline" ).str(); |
5505 | |
5506 | bool IsPredefinedLibFunction = |
5507 | CGF.getContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID); |
5508 | bool HasAttributeNoBuiltin = |
5509 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltinFD) || |
5510 | CGF.CurFn->getAttributes().hasFnAttr(Kind: NoBuiltins); |
5511 | |
5512 | // When directing calling an inline builtin, call it through it's mangled |
5513 | // name to make it clear it's not the actual builtin. |
5514 | if (CGF.CurFn->getName() != FDInlineName && |
5515 | OnlyHasInlineBuiltinDeclaration(FD)) { |
5516 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5517 | llvm::Function *Fn = llvm::cast<llvm::Function>(Val: CalleePtr); |
5518 | llvm::Module *M = Fn->getParent(); |
5519 | llvm::Function *Clone = M->getFunction(Name: FDInlineName); |
5520 | if (!Clone) { |
5521 | Clone = llvm::Function::Create(Ty: Fn->getFunctionType(), |
5522 | Linkage: llvm::GlobalValue::InternalLinkage, |
5523 | AddrSpace: Fn->getAddressSpace(), N: FDInlineName, M); |
5524 | Clone->addFnAttr(Kind: llvm::Attribute::AlwaysInline); |
5525 | } |
5526 | return CGCallee::forDirect(functionPtr: Clone, abstractInfo: GD); |
5527 | } |
5528 | |
5529 | // Replaceable builtins provide their own implementation of a builtin. If we |
5530 | // are in an inline builtin implementation, avoid trivial infinite |
5531 | // recursion. Honor __attribute__((no_builtin("foo"))) or |
5532 | // __attribute__((no_builtin)) on the current function unless foo is |
5533 | // not a predefined library function which means we must generate the |
5534 | // builtin no matter what. |
5535 | else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) |
5536 | return CGCallee::forBuiltin(builtinID, builtinDecl: FD); |
5537 | } |
5538 | |
5539 | llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); |
5540 | if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && |
5541 | FD->hasAttr<CUDAGlobalAttr>()) |
5542 | CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( |
5543 | Handle: cast<llvm::GlobalValue>(Val: CalleePtr->stripPointerCasts())); |
5544 | |
5545 | return CGCallee::forDirect(functionPtr: CalleePtr, abstractInfo: GD); |
5546 | } |
5547 | |
5548 | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
5549 | E = E->IgnoreParens(); |
5550 | |
5551 | // Look through function-to-pointer decay. |
5552 | if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: E)) { |
5553 | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
5554 | ICE->getCastKind() == CK_BuiltinFnToFnPtr) { |
5555 | return EmitCallee(E: ICE->getSubExpr()); |
5556 | } |
5557 | |
5558 | // Resolve direct calls. |
5559 | } else if (auto DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
5560 | if (auto FD = dyn_cast<FunctionDecl>(Val: DRE->getDecl())) { |
5561 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5562 | } |
5563 | } else if (auto ME = dyn_cast<MemberExpr>(Val: E)) { |
5564 | if (auto FD = dyn_cast<FunctionDecl>(Val: ME->getMemberDecl())) { |
5565 | EmitIgnoredExpr(E: ME->getBase()); |
5566 | return EmitDirectCallee(CGF&: *this, GD: FD); |
5567 | } |
5568 | |
5569 | // Look through template substitutions. |
5570 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(Val: E)) { |
5571 | return EmitCallee(E: NTTP->getReplacement()); |
5572 | |
5573 | // Treat pseudo-destructor calls differently. |
5574 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: E)) { |
5575 | return CGCallee::forPseudoDestructor(E: PDE); |
5576 | } |
5577 | |
5578 | // Otherwise, we have an indirect reference. |
5579 | llvm::Value *calleePtr; |
5580 | QualType functionType; |
5581 | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
5582 | calleePtr = EmitScalarExpr(E); |
5583 | functionType = ptrType->getPointeeType(); |
5584 | } else { |
5585 | functionType = E->getType(); |
5586 | calleePtr = EmitLValue(E, IsKnownNonNull: KnownNonNull).getPointer(CGF&: *this); |
5587 | } |
5588 | assert(functionType->isFunctionType()); |
5589 | |
5590 | GlobalDecl GD; |
5591 | if (const auto *VD = |
5592 | dyn_cast_or_null<VarDecl>(Val: E->getReferencedDeclOfCallee())) |
5593 | GD = GlobalDecl(VD); |
5594 | |
5595 | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
5596 | CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(T: functionType); |
5597 | CGCallee callee(calleeInfo, calleePtr, pointerAuth); |
5598 | return callee; |
5599 | } |
5600 | |
5601 | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
5602 | // Comma expressions just emit their LHS then their RHS as an l-value. |
5603 | if (E->getOpcode() == BO_Comma) { |
5604 | EmitIgnoredExpr(E: E->getLHS()); |
5605 | EnsureInsertPoint(); |
5606 | return EmitLValue(E: E->getRHS()); |
5607 | } |
5608 | |
5609 | if (E->getOpcode() == BO_PtrMemD || |
5610 | E->getOpcode() == BO_PtrMemI) |
5611 | return EmitPointerToDataMemberBinaryExpr(E); |
5612 | |
5613 | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value" ); |
5614 | |
5615 | // Note that in all of these cases, __block variables need the RHS |
5616 | // evaluated first just in case the variable gets moved by the RHS. |
5617 | |
5618 | switch (getEvaluationKind(T: E->getType())) { |
5619 | case TEK_Scalar: { |
5620 | switch (E->getLHS()->getType().getObjCLifetime()) { |
5621 | case Qualifiers::OCL_Strong: |
5622 | return EmitARCStoreStrong(e: E, /*ignored*/ false).first; |
5623 | |
5624 | case Qualifiers::OCL_Autoreleasing: |
5625 | return EmitARCStoreAutoreleasing(e: E).first; |
5626 | |
5627 | // No reason to do any of these differently. |
5628 | case Qualifiers::OCL_None: |
5629 | case Qualifiers::OCL_ExplicitNone: |
5630 | case Qualifiers::OCL_Weak: |
5631 | break; |
5632 | } |
5633 | |
5634 | // TODO: Can we de-duplicate this code with the corresponding code in |
5635 | // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works? |
5636 | RValue RV; |
5637 | llvm::Value *Previous = nullptr; |
5638 | QualType SrcType = E->getRHS()->getType(); |
5639 | // Check if LHS is a bitfield, if RHS contains an implicit cast expression |
5640 | // we want to extract that value and potentially (if the bitfield sanitizer |
5641 | // is enabled) use it to check for an implicit conversion. |
5642 | if (E->getLHS()->refersToBitField()) { |
5643 | llvm::Value *RHS = |
5644 | EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType); |
5645 | RV = RValue::get(V: RHS); |
5646 | } else |
5647 | RV = EmitAnyExpr(E: E->getRHS()); |
5648 | |
5649 | LValue LV = EmitCheckedLValue(E: E->getLHS(), TCK: TCK_Store); |
5650 | |
5651 | if (RV.isScalar()) |
5652 | EmitNullabilityCheck(LHS: LV, RHS: RV.getScalarVal(), Loc: E->getExprLoc()); |
5653 | |
5654 | if (LV.isBitField()) { |
5655 | llvm::Value *Result = nullptr; |
5656 | // If bitfield sanitizers are enabled we want to use the result |
5657 | // to check whether a truncation or sign change has occurred. |
5658 | if (SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) |
5659 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV, Result: &Result); |
5660 | else |
5661 | EmitStoreThroughBitfieldLValue(Src: RV, Dst: LV); |
5662 | |
5663 | // If the expression contained an implicit conversion, make sure |
5664 | // to use the value before the scalar conversion. |
5665 | llvm::Value *Src = Previous ? Previous : RV.getScalarVal(); |
5666 | QualType DstType = E->getLHS()->getType(); |
5667 | EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType, |
5668 | Info: LV.getBitFieldInfo(), Loc: E->getExprLoc()); |
5669 | } else |
5670 | EmitStoreThroughLValue(Src: RV, Dst: LV); |
5671 | |
5672 | if (getLangOpts().OpenMP) |
5673 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF&: *this, |
5674 | LHS: E->getLHS()); |
5675 | return LV; |
5676 | } |
5677 | |
5678 | case TEK_Complex: |
5679 | return EmitComplexAssignmentLValue(E); |
5680 | |
5681 | case TEK_Aggregate: |
5682 | return EmitAggExprToLValue(E); |
5683 | } |
5684 | llvm_unreachable("bad evaluation kind" ); |
5685 | } |
5686 | |
5687 | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { |
5688 | RValue RV = EmitCallExpr(E); |
5689 | |
5690 | if (!RV.isScalar()) |
5691 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5692 | Source: AlignmentSource::Decl); |
5693 | |
5694 | assert(E->getCallReturnType(getContext())->isReferenceType() && |
5695 | "Can't have a scalar return unless the return type is a " |
5696 | "reference type!" ); |
5697 | |
5698 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5699 | } |
5700 | |
5701 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
5702 | // FIXME: This shouldn't require another copy. |
5703 | return EmitAggExprToLValue(E); |
5704 | } |
5705 | |
5706 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
5707 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
5708 | && "binding l-value to type which needs a temporary" ); |
5709 | AggValueSlot Slot = CreateAggTemp(T: E->getType()); |
5710 | EmitCXXConstructExpr(E, Dest: Slot); |
5711 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
5712 | } |
5713 | |
5714 | LValue |
5715 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
5716 | return MakeNaturalAlignRawAddrLValue(V: EmitCXXTypeidExpr(E), T: E->getType()); |
5717 | } |
5718 | |
5719 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
5720 | return CGM.GetAddrOfMSGuidDecl(GD: E->getGuidDecl()) |
5721 | .withElementType(ElemTy: ConvertType(T: E->getType())); |
5722 | } |
5723 | |
5724 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
5725 | return MakeAddrLValue(Addr: EmitCXXUuidofExpr(E), T: E->getType(), |
5726 | Source: AlignmentSource::Decl); |
5727 | } |
5728 | |
5729 | LValue |
5730 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
5731 | AggValueSlot Slot = CreateAggTemp(T: E->getType(), Name: "temp.lvalue" ); |
5732 | Slot.setExternallyDestructed(); |
5733 | EmitAggExpr(E: E->getSubExpr(), AS: Slot); |
5734 | EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Slot.getAddress()); |
5735 | return MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType(), Source: AlignmentSource::Decl); |
5736 | } |
5737 | |
5738 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
5739 | RValue RV = EmitObjCMessageExpr(E); |
5740 | |
5741 | if (!RV.isScalar()) |
5742 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5743 | Source: AlignmentSource::Decl); |
5744 | |
5745 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
5746 | "Can't have a scalar return unless the return type is a " |
5747 | "reference type!" ); |
5748 | |
5749 | return MakeNaturalAlignPointeeAddrLValue(V: RV.getScalarVal(), T: E->getType()); |
5750 | } |
5751 | |
5752 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
5753 | Address V = |
5754 | CGM.getObjCRuntime().GetAddrOfSelector(CGF&: *this, Sel: E->getSelector()); |
5755 | return MakeAddrLValue(Addr: V, T: E->getType(), Source: AlignmentSource::Decl); |
5756 | } |
5757 | |
5758 | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
5759 | const ObjCIvarDecl *Ivar) { |
5760 | return CGM.getObjCRuntime().EmitIvarOffset(CGF&: *this, Interface, Ivar); |
5761 | } |
5762 | |
5763 | llvm::Value * |
5764 | CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, |
5765 | const ObjCIvarDecl *Ivar) { |
5766 | llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); |
5767 | QualType PointerDiffType = getContext().getPointerDiffType(); |
5768 | return Builder.CreateZExtOrTrunc(V: OffsetValue, |
5769 | DestTy: getTypes().ConvertType(T: PointerDiffType)); |
5770 | } |
5771 | |
5772 | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
5773 | llvm::Value *BaseValue, |
5774 | const ObjCIvarDecl *Ivar, |
5775 | unsigned CVRQualifiers) { |
5776 | return CGM.getObjCRuntime().EmitObjCValueForIvar(CGF&: *this, ObjectTy, BaseValue, |
5777 | Ivar, CVRQualifiers); |
5778 | } |
5779 | |
5780 | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
5781 | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
5782 | llvm::Value *BaseValue = nullptr; |
5783 | const Expr *BaseExpr = E->getBase(); |
5784 | Qualifiers BaseQuals; |
5785 | QualType ObjectTy; |
5786 | if (E->isArrow()) { |
5787 | BaseValue = EmitScalarExpr(E: BaseExpr); |
5788 | ObjectTy = BaseExpr->getType()->getPointeeType(); |
5789 | BaseQuals = ObjectTy.getQualifiers(); |
5790 | } else { |
5791 | LValue BaseLV = EmitLValue(E: BaseExpr); |
5792 | BaseValue = BaseLV.getPointer(CGF&: *this); |
5793 | ObjectTy = BaseExpr->getType(); |
5794 | BaseQuals = ObjectTy.getQualifiers(); |
5795 | } |
5796 | |
5797 | LValue LV = |
5798 | EmitLValueForIvar(ObjectTy, BaseValue, Ivar: E->getDecl(), |
5799 | CVRQualifiers: BaseQuals.getCVRQualifiers()); |
5800 | setObjCGCLValueClass(Ctx: getContext(), E, LV); |
5801 | return LV; |
5802 | } |
5803 | |
5804 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
5805 | // Can only get l-value for message expression returning aggregate type |
5806 | RValue RV = EmitAnyExprToTemp(E); |
5807 | return MakeAddrLValue(Addr: RV.getAggregateAddress(), T: E->getType(), |
5808 | Source: AlignmentSource::Decl); |
5809 | } |
5810 | |
5811 | RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, |
5812 | const CallExpr *E, ReturnValueSlot ReturnValue, |
5813 | llvm::Value *Chain) { |
5814 | // Get the actual function type. The callee type will always be a pointer to |
5815 | // function type or a block pointer type. |
5816 | assert(CalleeType->isFunctionPointerType() && |
5817 | "Call must have function pointer type!" ); |
5818 | |
5819 | const Decl *TargetDecl = |
5820 | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
5821 | |
5822 | assert((!isa_and_present<FunctionDecl>(TargetDecl) || |
5823 | !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && |
5824 | "trying to emit a call to an immediate function" ); |
5825 | |
5826 | CalleeType = getContext().getCanonicalType(T: CalleeType); |
5827 | |
5828 | auto PointeeType = cast<PointerType>(Val&: CalleeType)->getPointeeType(); |
5829 | |
5830 | CGCallee Callee = OrigCallee; |
5831 | |
5832 | if (SanOpts.has(K: SanitizerKind::Function) && |
5833 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl)) && |
5834 | !isa<FunctionNoProtoType>(Val: PointeeType)) { |
5835 | if (llvm::Constant *PrefixSig = |
5836 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
5837 | SanitizerScope SanScope(this); |
5838 | auto *TypeHash = getUBSanFunctionTypeHash(T: PointeeType); |
5839 | |
5840 | llvm::Type *PrefixSigType = PrefixSig->getType(); |
5841 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
5842 | Context&: CGM.getLLVMContext(), Elements: {PrefixSigType, Int32Ty}, /*isPacked=*/true); |
5843 | |
5844 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5845 | if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) { |
5846 | // Use raw pointer since we are using the callee pointer as data here. |
5847 | Address Addr = |
5848 | Address(CalleePtr, CalleePtr->getType(), |
5849 | CharUnits::fromQuantity( |
5850 | Quantity: CalleePtr->getPointerAlignment(DL: CGM.getDataLayout())), |
5851 | Callee.getPointerAuthInfo(), nullptr); |
5852 | CalleePtr = Addr.emitRawPointer(CGF&: *this); |
5853 | } |
5854 | |
5855 | // On 32-bit Arm, the low bit of a function pointer indicates whether |
5856 | // it's using the Arm or Thumb instruction set. The actual first |
5857 | // instruction lives at the same address either way, so we must clear |
5858 | // that low bit before using the function address to find the prefix |
5859 | // structure. |
5860 | // |
5861 | // This applies to both Arm and Thumb target triples, because |
5862 | // either one could be used in an interworking context where it |
5863 | // might be passed function pointers of both types. |
5864 | llvm::Value *AlignedCalleePtr; |
5865 | if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { |
5866 | llvm::Value *CalleeAddress = |
5867 | Builder.CreatePtrToInt(V: CalleePtr, DestTy: IntPtrTy); |
5868 | llvm::Value *Mask = llvm::ConstantInt::get(Ty: IntPtrTy, V: ~1); |
5869 | llvm::Value *AlignedCalleeAddress = |
5870 | Builder.CreateAnd(LHS: CalleeAddress, RHS: Mask); |
5871 | AlignedCalleePtr = |
5872 | Builder.CreateIntToPtr(V: AlignedCalleeAddress, DestTy: CalleePtr->getType()); |
5873 | } else { |
5874 | AlignedCalleePtr = CalleePtr; |
5875 | } |
5876 | |
5877 | llvm::Value *CalleePrefixStruct = AlignedCalleePtr; |
5878 | llvm::Value *CalleeSigPtr = |
5879 | Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 0); |
5880 | llvm::Value *CalleeSig = |
5881 | Builder.CreateAlignedLoad(Ty: PrefixSigType, Addr: CalleeSigPtr, Align: getIntAlign()); |
5882 | llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(LHS: CalleeSig, RHS: PrefixSig); |
5883 | |
5884 | llvm::BasicBlock *Cont = createBasicBlock(name: "cont" ); |
5885 | llvm::BasicBlock *TypeCheck = createBasicBlock(name: "typecheck" ); |
5886 | Builder.CreateCondBr(Cond: CalleeSigMatch, True: TypeCheck, False: Cont); |
5887 | |
5888 | EmitBlock(BB: TypeCheck); |
5889 | llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( |
5890 | Ty: Int32Ty, |
5891 | Addr: Builder.CreateConstGEP2_32(Ty: PrefixStructTy, Ptr: CalleePrefixStruct, Idx0: -1, Idx1: 1), |
5892 | Align: getPointerAlign()); |
5893 | llvm::Value *CalleeTypeHashMatch = |
5894 | Builder.CreateICmpEQ(LHS: CalleeTypeHash, RHS: TypeHash); |
5895 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5896 | EmitCheckTypeDescriptor(T: CalleeType)}; |
5897 | EmitCheck(Checked: std::make_pair(x&: CalleeTypeHashMatch, y: SanitizerKind::Function), |
5898 | CheckHandler: SanitizerHandler::FunctionTypeMismatch, StaticArgs: StaticData, |
5899 | DynamicArgs: {CalleePtr}); |
5900 | |
5901 | Builder.CreateBr(Dest: Cont); |
5902 | EmitBlock(BB: Cont); |
5903 | } |
5904 | } |
5905 | |
5906 | const auto *FnType = cast<FunctionType>(Val&: PointeeType); |
5907 | |
5908 | // If we are checking indirect calls and this call is indirect, check that the |
5909 | // function pointer is a member of the bit set for the function type. |
5910 | if (SanOpts.has(K: SanitizerKind::CFIICall) && |
5911 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
5912 | SanitizerScope SanScope(this); |
5913 | EmitSanitizerStatReport(SSK: llvm::SanStat_CFI_ICall); |
5914 | |
5915 | llvm::Metadata *MD; |
5916 | if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) |
5917 | MD = CGM.CreateMetadataIdentifierGeneralized(T: QualType(FnType, 0)); |
5918 | else |
5919 | MD = CGM.CreateMetadataIdentifierForType(T: QualType(FnType, 0)); |
5920 | |
5921 | llvm::Value *TypeId = llvm::MetadataAsValue::get(Context&: getLLVMContext(), MD); |
5922 | |
5923 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5924 | llvm::Value *TypeTest = Builder.CreateCall( |
5925 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::type_test), Args: {CalleePtr, TypeId}); |
5926 | |
5927 | auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); |
5928 | llvm::Constant *StaticData[] = { |
5929 | llvm::ConstantInt::get(Ty: Int8Ty, V: CFITCK_ICall), |
5930 | EmitCheckSourceLocation(Loc: E->getBeginLoc()), |
5931 | EmitCheckTypeDescriptor(T: QualType(FnType, 0)), |
5932 | }; |
5933 | if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { |
5934 | EmitCfiSlowPathCheck(Kind: SanitizerKind::CFIICall, Cond: TypeTest, TypeId: CrossDsoTypeId, |
5935 | Ptr: CalleePtr, StaticArgs: StaticData); |
5936 | } else { |
5937 | EmitCheck(Checked: std::make_pair(x&: TypeTest, y: SanitizerKind::CFIICall), |
5938 | CheckHandler: SanitizerHandler::CFICheckFail, StaticArgs: StaticData, |
5939 | DynamicArgs: {CalleePtr, llvm::UndefValue::get(T: IntPtrTy)}); |
5940 | } |
5941 | } |
5942 | |
5943 | CallArgList Args; |
5944 | if (Chain) |
5945 | Args.add(rvalue: RValue::get(V: Chain), type: CGM.getContext().VoidPtrTy); |
5946 | |
5947 | // C++17 requires that we evaluate arguments to a call using assignment syntax |
5948 | // right-to-left, and that we evaluate arguments to certain other operators |
5949 | // left-to-right. Note that we allow this to override the order dictated by |
5950 | // the calling convention on the MS ABI, which means that parameter |
5951 | // destruction order is not necessarily reverse construction order. |
5952 | // FIXME: Revisit this based on C++ committee response to unimplementability. |
5953 | EvaluationOrder Order = EvaluationOrder::Default; |
5954 | bool StaticOperator = false; |
5955 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) { |
5956 | if (OCE->isAssignmentOp()) |
5957 | Order = EvaluationOrder::ForceRightToLeft; |
5958 | else { |
5959 | switch (OCE->getOperator()) { |
5960 | case OO_LessLess: |
5961 | case OO_GreaterGreater: |
5962 | case OO_AmpAmp: |
5963 | case OO_PipePipe: |
5964 | case OO_Comma: |
5965 | case OO_ArrowStar: |
5966 | Order = EvaluationOrder::ForceLeftToRight; |
5967 | break; |
5968 | default: |
5969 | break; |
5970 | } |
5971 | } |
5972 | |
5973 | if (const auto *MD = |
5974 | dyn_cast_if_present<CXXMethodDecl>(Val: OCE->getCalleeDecl()); |
5975 | MD && MD->isStatic()) |
5976 | StaticOperator = true; |
5977 | } |
5978 | |
5979 | auto Arguments = E->arguments(); |
5980 | if (StaticOperator) { |
5981 | // If we're calling a static operator, we need to emit the object argument |
5982 | // and ignore it. |
5983 | EmitIgnoredExpr(E: E->getArg(Arg: 0)); |
5984 | Arguments = drop_begin(RangeOrContainer&: Arguments, N: 1); |
5985 | } |
5986 | EmitCallArgs(Args, Prototype: dyn_cast<FunctionProtoType>(Val: FnType), ArgRange: Arguments, |
5987 | AC: E->getDirectCallee(), /*ParamsToSkip=*/0, Order); |
5988 | |
5989 | const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( |
5990 | Args, Ty: FnType, /*ChainCall=*/Chain); |
5991 | |
5992 | // C99 6.5.2.2p6: |
5993 | // If the expression that denotes the called function has a type |
5994 | // that does not include a prototype, [the default argument |
5995 | // promotions are performed]. If the number of arguments does not |
5996 | // equal the number of parameters, the behavior is undefined. If |
5997 | // the function is defined with a type that includes a prototype, |
5998 | // and either the prototype ends with an ellipsis (, ...) or the |
5999 | // types of the arguments after promotion are not compatible with |
6000 | // the types of the parameters, the behavior is undefined. If the |
6001 | // function is defined with a type that does not include a |
6002 | // prototype, and the types of the arguments after promotion are |
6003 | // not compatible with those of the parameters after promotion, |
6004 | // the behavior is undefined [except in some trivial cases]. |
6005 | // That is, in the general case, we should assume that a call |
6006 | // through an unprototyped function type works like a *non-variadic* |
6007 | // call. The way we make this work is to cast to the exact type |
6008 | // of the promoted arguments. |
6009 | // |
6010 | // Chain calls use this same code path to add the invisible chain parameter |
6011 | // to the function type. |
6012 | if (isa<FunctionNoProtoType>(Val: FnType) || Chain) { |
6013 | llvm::Type *CalleeTy = getTypes().GetFunctionType(Info: FnInfo); |
6014 | int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); |
6015 | CalleeTy = CalleeTy->getPointerTo(AddrSpace: AS); |
6016 | |
6017 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
6018 | CalleePtr = Builder.CreateBitCast(V: CalleePtr, DestTy: CalleeTy, Name: "callee.knr.cast" ); |
6019 | Callee.setFunctionPointer(CalleePtr); |
6020 | } |
6021 | |
6022 | // HIP function pointer contains kernel handle when it is used in triple |
6023 | // chevron. The kernel stub needs to be loaded from kernel handle and used |
6024 | // as callee. |
6025 | if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && |
6026 | isa<CUDAKernelCallExpr>(Val: E) && |
6027 | (!TargetDecl || !isa<FunctionDecl>(Val: TargetDecl))) { |
6028 | llvm::Value *Handle = Callee.getFunctionPointer(); |
6029 | auto *Stub = Builder.CreateLoad( |
6030 | Addr: Address(Handle, Handle->getType(), CGM.getPointerAlign())); |
6031 | Callee.setFunctionPointer(Stub); |
6032 | } |
6033 | llvm::CallBase *CallOrInvoke = nullptr; |
6034 | RValue Call = EmitCall(CallInfo: FnInfo, Callee, ReturnValue, Args, callOrInvoke: &CallOrInvoke, |
6035 | IsMustTail: E == MustTailCall, Loc: E->getExprLoc()); |
6036 | |
6037 | // Generate function declaration DISuprogram in order to be used |
6038 | // in debug info about call sites. |
6039 | if (CGDebugInfo *DI = getDebugInfo()) { |
6040 | if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl)) { |
6041 | FunctionArgList Args; |
6042 | QualType ResTy = BuildFunctionArgList(GD: CalleeDecl, Args); |
6043 | DI->EmitFuncDeclForCallSite(CallOrInvoke, |
6044 | CalleeType: DI->getFunctionType(FD: CalleeDecl, RetTy: ResTy, Args), |
6045 | CalleeDecl); |
6046 | } |
6047 | } |
6048 | |
6049 | return Call; |
6050 | } |
6051 | |
6052 | LValue CodeGenFunction:: |
6053 | EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { |
6054 | Address BaseAddr = Address::invalid(); |
6055 | if (E->getOpcode() == BO_PtrMemI) { |
6056 | BaseAddr = EmitPointerWithAlignment(E: E->getLHS()); |
6057 | } else { |
6058 | BaseAddr = EmitLValue(E: E->getLHS()).getAddress(); |
6059 | } |
6060 | |
6061 | llvm::Value *OffsetV = EmitScalarExpr(E: E->getRHS()); |
6062 | const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); |
6063 | |
6064 | LValueBaseInfo BaseInfo; |
6065 | TBAAAccessInfo TBAAInfo; |
6066 | Address MemberAddr = |
6067 | EmitCXXMemberDataPointerAddress(E, base: BaseAddr, memberPtr: OffsetV, memberPtrType: MPT, BaseInfo: &BaseInfo, |
6068 | TBAAInfo: &TBAAInfo); |
6069 | |
6070 | return MakeAddrLValue(Addr: MemberAddr, T: MPT->getPointeeType(), BaseInfo, TBAAInfo); |
6071 | } |
6072 | |
6073 | /// Given the address of a temporary variable, produce an r-value of |
6074 | /// its type. |
6075 | RValue CodeGenFunction::convertTempToRValue(Address addr, |
6076 | QualType type, |
6077 | SourceLocation loc) { |
6078 | LValue lvalue = MakeAddrLValue(Addr: addr, T: type, Source: AlignmentSource::Decl); |
6079 | switch (getEvaluationKind(T: type)) { |
6080 | case TEK_Complex: |
6081 | return RValue::getComplex(C: EmitLoadOfComplex(src: lvalue, loc)); |
6082 | case TEK_Aggregate: |
6083 | return lvalue.asAggregateRValue(); |
6084 | case TEK_Scalar: |
6085 | return RValue::get(V: EmitLoadOfScalar(lvalue, Loc: loc)); |
6086 | } |
6087 | llvm_unreachable("bad evaluation kind" ); |
6088 | } |
6089 | |
6090 | void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { |
6091 | assert(Val->getType()->isFPOrFPVectorTy()); |
6092 | if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) |
6093 | return; |
6094 | |
6095 | llvm::MDBuilder MDHelper(getLLVMContext()); |
6096 | llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); |
6097 | |
6098 | cast<llvm::Instruction>(Val)->setMetadata(KindID: llvm::LLVMContext::MD_fpmath, Node); |
6099 | } |
6100 | |
6101 | void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { |
6102 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6103 | if (!EltTy->isFloatTy()) |
6104 | return; |
6105 | |
6106 | if ((getLangOpts().OpenCL && |
6107 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6108 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6109 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6110 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp |
6111 | // |
6112 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6113 | // build option allows an application to specify that single precision |
6114 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6115 | // source are correctly rounded. |
6116 | // |
6117 | // TODO: CUDA has a prec-sqrt flag |
6118 | SetFPAccuracy(Val, Accuracy: 3.0f); |
6119 | } |
6120 | } |
6121 | |
6122 | void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { |
6123 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6124 | if (!EltTy->isFloatTy()) |
6125 | return; |
6126 | |
6127 | if ((getLangOpts().OpenCL && |
6128 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6129 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6130 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6131 | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp |
6132 | // |
6133 | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6134 | // build option allows an application to specify that single precision |
6135 | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6136 | // source are correctly rounded. |
6137 | // |
6138 | // TODO: CUDA has a prec-div flag |
6139 | SetFPAccuracy(Val, Accuracy: 2.5f); |
6140 | } |
6141 | } |
6142 | |
6143 | namespace { |
6144 | struct LValueOrRValue { |
6145 | LValue LV; |
6146 | RValue RV; |
6147 | }; |
6148 | } |
6149 | |
6150 | static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, |
6151 | const PseudoObjectExpr *E, |
6152 | bool forLValue, |
6153 | AggValueSlot slot) { |
6154 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
6155 | |
6156 | // Find the result expression, if any. |
6157 | const Expr *resultExpr = E->getResultExpr(); |
6158 | LValueOrRValue result; |
6159 | |
6160 | for (PseudoObjectExpr::const_semantics_iterator |
6161 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
6162 | const Expr *semantic = *i; |
6163 | |
6164 | // If this semantic expression is an opaque value, bind it |
6165 | // to the result of its source expression. |
6166 | if (const auto *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) { |
6167 | // Skip unique OVEs. |
6168 | if (ov->isUnique()) { |
6169 | assert(ov != resultExpr && |
6170 | "A unique OVE cannot be used as the result expression" ); |
6171 | continue; |
6172 | } |
6173 | |
6174 | // If this is the result expression, we may need to evaluate |
6175 | // directly into the slot. |
6176 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
6177 | OVMA opaqueData; |
6178 | if (ov == resultExpr && ov->isPRValue() && !forLValue && |
6179 | CodeGenFunction::hasAggregateEvaluationKind(T: ov->getType())) { |
6180 | CGF.EmitAggExpr(E: ov->getSourceExpr(), AS: slot); |
6181 | LValue LV = CGF.MakeAddrLValue(Addr: slot.getAddress(), T: ov->getType(), |
6182 | Source: AlignmentSource::Decl); |
6183 | opaqueData = OVMA::bind(CGF, ov, lv: LV); |
6184 | result.RV = slot.asRValue(); |
6185 | |
6186 | // Otherwise, emit as normal. |
6187 | } else { |
6188 | opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr()); |
6189 | |
6190 | // If this is the result, also evaluate the result now. |
6191 | if (ov == resultExpr) { |
6192 | if (forLValue) |
6193 | result.LV = CGF.EmitLValue(E: ov); |
6194 | else |
6195 | result.RV = CGF.EmitAnyExpr(E: ov, aggSlot: slot); |
6196 | } |
6197 | } |
6198 | |
6199 | opaques.push_back(Elt: opaqueData); |
6200 | |
6201 | // Otherwise, if the expression is the result, evaluate it |
6202 | // and remember the result. |
6203 | } else if (semantic == resultExpr) { |
6204 | if (forLValue) |
6205 | result.LV = CGF.EmitLValue(E: semantic); |
6206 | else |
6207 | result.RV = CGF.EmitAnyExpr(E: semantic, aggSlot: slot); |
6208 | |
6209 | // Otherwise, evaluate the expression in an ignored context. |
6210 | } else { |
6211 | CGF.EmitIgnoredExpr(E: semantic); |
6212 | } |
6213 | } |
6214 | |
6215 | // Unbind all the opaques now. |
6216 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
6217 | opaques[i].unbind(CGF); |
6218 | |
6219 | return result; |
6220 | } |
6221 | |
6222 | RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, |
6223 | AggValueSlot slot) { |
6224 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: false, slot).RV; |
6225 | } |
6226 | |
6227 | LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { |
6228 | return emitPseudoObjectExpr(CGF&: *this, E, forLValue: true, slot: AggValueSlot::ignored()).LV; |
6229 | } |
6230 | |