1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
25#include "clang/AST/ASTContext.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
29#include "clang/AST/ParentMapContext.h"
30#include "clang/AST/RecordLayout.h"
31#include "clang/AST/StmtVisitor.h"
32#include "clang/Basic/CodeGenOptions.h"
33#include "clang/Basic/DiagnosticTrap.h"
34#include "clang/Basic/TargetInfo.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
86 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
89 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHS: LHSCI, RHS: RHSCI, Opcode, Signed: Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
184 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Consider OverflowBehaviorType and language options to calculate the final
196/// overflow behavior for an expression. There are no language options for
197/// unsigned overflow semantics so there is nothing to consider there.
198static LangOptions::OverflowBehaviorKind
199getOverflowBehaviorConsideringType(const CodeGenFunction &CGF,
200 const QualType Ty) {
201 const OverflowBehaviorType *OBT = Ty->getAs<OverflowBehaviorType>();
202 /// FIXME: Having two enums named `OverflowBehaviorKind` is not ideal, these
203 /// should be unified into one coherent enum that supports both unsigned and
204 /// signed overflow behavior semantics.
205 if (OBT) {
206 switch (OBT->getBehaviorKind()) {
207 case OverflowBehaviorType::OverflowBehaviorKind::Wrap:
208 return LangOptions::OverflowBehaviorKind::OB_Wrap;
209 case OverflowBehaviorType::OverflowBehaviorKind::Trap:
210 return LangOptions::OverflowBehaviorKind::OB_Trap;
211 }
212 llvm_unreachable("Unknown OverflowBehaviorKind");
213 }
214
215 if (Ty->isUnsignedIntegerType()) {
216 return LangOptions::OverflowBehaviorKind::OB_Unset;
217 }
218
219 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
220 case LangOptions::SignedOverflowBehaviorTy::SOB_Defined:
221 return LangOptions::OverflowBehaviorKind::OB_SignedAndDefined;
222 case LangOptions::SignedOverflowBehaviorTy::SOB_Undefined:
223 return LangOptions::OverflowBehaviorKind::OB_Unset;
224 case LangOptions::SignedOverflowBehaviorTy::SOB_Trapping:
225 return LangOptions::OverflowBehaviorKind::OB_Trap;
226 }
227 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
228}
229
230/// Check if we can skip the overflow check for \p Op.
231static bool CanElideOverflowCheck(ASTContext &Ctx, const BinOpInfo &Op) {
232 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
233 "Expected a unary or binary operator");
234
235 // If the binop has constant inputs and we can prove there is no overflow,
236 // we can elide the overflow check.
237 if (!Op.mayHaveIntegerOverflow())
238 return true;
239
240 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Op.E);
241 if (UO && Ctx.isUnaryOverflowPatternExcluded(UO))
242 return true;
243
244 const auto *BO = dyn_cast<BinaryOperator>(Val: Op.E);
245 if (BO && BO->hasExcludedOverflowPattern())
246 return true;
247
248 if (Op.Ty.isWrapType())
249 return true;
250 if (Op.Ty.isTrapType())
251 return false;
252
253 if (Op.Ty->isSignedIntegerType() &&
254 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::SignedIntegerOverflow,
255 Ty: Op.Ty)) {
256 return true;
257 }
258
259 if (Op.Ty->isUnsignedIntegerType() &&
260 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::UnsignedIntegerOverflow,
261 Ty: Op.Ty)) {
262 return true;
263 }
264
265 // If a unary op has a widened operand, the op cannot overflow.
266 if (UO)
267 return !UO->canOverflow();
268
269 // We usually don't need overflow checks for binops with widened operands.
270 // Multiplication with promoted unsigned operands is a special case.
271 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, E: BO->getLHS());
272 if (!OptionalLHSTy)
273 return false;
274
275 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, E: BO->getRHS());
276 if (!OptionalRHSTy)
277 return false;
278
279 QualType LHSTy = *OptionalLHSTy;
280 QualType RHSTy = *OptionalRHSTy;
281
282 // This is the simple case: binops without unsigned multiplication, and with
283 // widened operands. No overflow check is needed here.
284 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
285 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
286 return true;
287
288 // For unsigned multiplication the overflow check can be elided if either one
289 // of the unpromoted types are less than half the size of the promoted type.
290 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
291 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
292 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
293}
294
295class ScalarExprEmitter
296 : public StmtVisitor<ScalarExprEmitter, Value*> {
297 CodeGenFunction &CGF;
298 CGBuilderTy &Builder;
299 bool IgnoreResultAssign;
300 llvm::LLVMContext &VMContext;
301public:
302
303 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
304 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
305 VMContext(cgf.getLLVMContext()) {
306 }
307
308 //===--------------------------------------------------------------------===//
309 // Utilities
310 //===--------------------------------------------------------------------===//
311
312 bool TestAndClearIgnoreResultAssign() {
313 bool I = IgnoreResultAssign;
314 IgnoreResultAssign = false;
315 return I;
316 }
317
318 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
319 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
320 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
321 return CGF.EmitCheckedLValue(E, TCK);
322 }
323
324 void EmitBinOpCheck(
325 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
326 const BinOpInfo &Info);
327
328 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
329 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
330 }
331
332 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
333 const AlignValueAttr *AVAttr = nullptr;
334 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
335 const ValueDecl *VD = DRE->getDecl();
336
337 if (VD->getType()->isReferenceType()) {
338 if (const auto *TTy =
339 VD->getType().getNonReferenceType()->getAs<TypedefType>())
340 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
341 } else {
342 // Assumptions for function parameters are emitted at the start of the
343 // function, so there is no need to repeat that here,
344 // unless the alignment-assumption sanitizer is enabled,
345 // then we prefer the assumption over alignment attribute
346 // on IR function param.
347 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
348 return;
349
350 AVAttr = VD->getAttr<AlignValueAttr>();
351 }
352 }
353
354 if (!AVAttr)
355 if (const auto *TTy = E->getType()->getAs<TypedefType>())
356 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
357
358 if (!AVAttr)
359 return;
360
361 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
362 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
363 CGF.emitAlignmentAssumption(PtrValue: V, E, AssumptionLoc: AVAttr->getLocation(), Alignment: AlignmentCI);
364 }
365
366 /// EmitLoadOfLValue - Given an expression with complex type that represents a
367 /// value l-value, this method emits the address of the l-value, then loads
368 /// and returns the result.
369 Value *EmitLoadOfLValue(const Expr *E) {
370 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
371 Loc: E->getExprLoc());
372
373 EmitLValueAlignmentAssumption(E, V);
374 return V;
375 }
376
377 /// EmitConversionToBool - Convert the specified expression value to a
378 /// boolean (i1) truth value. This is equivalent to "Val != 0".
379 Value *EmitConversionToBool(Value *Src, QualType DstTy);
380
381 /// Emit a check that a conversion from a floating-point type does not
382 /// overflow.
383 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
384 Value *Src, QualType SrcType, QualType DstType,
385 llvm::Type *DstTy, SourceLocation Loc);
386
387 /// Known implicit conversion check kinds.
388 /// This is used for bitfield conversion checks as well.
389 /// Keep in sync with the enum of the same name in ubsan_handlers.h
390 enum ImplicitConversionCheckKind : unsigned char {
391 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
392 ICCK_UnsignedIntegerTruncation = 1,
393 ICCK_SignedIntegerTruncation = 2,
394 ICCK_IntegerSignChange = 3,
395 ICCK_SignedIntegerTruncationOrSignChange = 4,
396 };
397
398 /// Emit a check that an [implicit] truncation of an integer does not
399 /// discard any bits. It is not UB, so we use the value after truncation.
400 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
401 QualType DstType, SourceLocation Loc,
402 bool OBTrapInvolved = false);
403
404 /// Emit a check that an [implicit] conversion of an integer does not change
405 /// the sign of the value. It is not UB, so we use the value after conversion.
406 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
407 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
408 QualType DstType, SourceLocation Loc,
409 bool OBTrapInvolved = false);
410
411 /// Emit a conversion from the specified type to the specified destination
412 /// type, both of which are LLVM scalar types.
413 struct ScalarConversionOpts {
414 bool TreatBooleanAsSigned;
415 bool EmitImplicitIntegerTruncationChecks;
416 bool EmitImplicitIntegerSignChangeChecks;
417 /* Potential -fsanitize-undefined-ignore-overflow-pattern= */
418 bool PatternExcluded;
419
420 ScalarConversionOpts()
421 : TreatBooleanAsSigned(false),
422 EmitImplicitIntegerTruncationChecks(false),
423 EmitImplicitIntegerSignChangeChecks(false), PatternExcluded(false) {}
424
425 ScalarConversionOpts(clang::SanitizerSet SanOpts)
426 : TreatBooleanAsSigned(false),
427 EmitImplicitIntegerTruncationChecks(
428 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
429 EmitImplicitIntegerSignChangeChecks(
430 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)),
431 PatternExcluded(false) {}
432 };
433 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
434 llvm::Type *SrcTy, llvm::Type *DstTy,
435 ScalarConversionOpts Opts);
436 Value *
437 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
438 SourceLocation Loc,
439 ScalarConversionOpts Opts = ScalarConversionOpts());
440
441 /// Convert between either a fixed point and other fixed point or fixed point
442 /// and an integer.
443 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
444 SourceLocation Loc);
445
446 /// Emit a conversion from the specified complex type to the specified
447 /// destination type, where the destination type is an LLVM scalar type.
448 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
449 QualType SrcTy, QualType DstTy,
450 SourceLocation Loc);
451
452 /// EmitNullValue - Emit a value that corresponds to null for the given type.
453 Value *EmitNullValue(QualType Ty);
454
455 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
456 Value *EmitFloatToBoolConversion(Value *V) {
457 // Compare against 0.0 for fp scalars.
458 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
459 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
460 }
461
462 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
463 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
464 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
465
466 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
467 }
468
469 Value *EmitIntToBoolConversion(Value *V) {
470 // Because of the type rules of C, we often end up computing a
471 // logical value, then zero extending it to int, then wanting it
472 // as a logical value again. Optimize this common case.
473 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
474 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
475 Value *Result = ZI->getOperand(i_nocapture: 0);
476 // If there aren't any more uses, zap the instruction to save space.
477 // Note that there can be more uses, for example if this
478 // is the result of an assignment.
479 if (ZI->use_empty())
480 ZI->eraseFromParent();
481 return Result;
482 }
483 }
484
485 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
486 }
487
488 //===--------------------------------------------------------------------===//
489 // Visitor Methods
490 //===--------------------------------------------------------------------===//
491
492 Value *Visit(Expr *E) {
493 ApplyDebugLocation DL(CGF, E);
494 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(S: E);
495 }
496
497 Value *VisitStmt(Stmt *S) {
498 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
499 llvm_unreachable("Stmt can't have complex result type!");
500 }
501 Value *VisitExpr(Expr *S);
502
503 Value *VisitConstantExpr(ConstantExpr *E) {
504 // A constant expression of type 'void' generates no code and produces no
505 // value.
506 if (E->getType()->isVoidType())
507 return nullptr;
508
509 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
510 if (E->isGLValue()) {
511 // This was already converted to an rvalue when it was constant
512 // evaluated.
513 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
514 return Result;
515 return CGF.EmitLoadOfScalar(
516 Addr: Address(Result, CGF.convertTypeForLoadStore(ASTTy: E->getType()),
517 CGF.getContext().getTypeAlignInChars(T: E->getType())),
518 /*Volatile*/ false, Ty: E->getType(), Loc: E->getExprLoc());
519 }
520 return Result;
521 }
522 return Visit(E: E->getSubExpr());
523 }
524 Value *VisitParenExpr(ParenExpr *PE) {
525 return Visit(E: PE->getSubExpr());
526 }
527 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
528 return Visit(E: E->getReplacement());
529 }
530 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
531 return Visit(E: GE->getResultExpr());
532 }
533 Value *VisitCoawaitExpr(CoawaitExpr *S) {
534 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
535 }
536 Value *VisitCoyieldExpr(CoyieldExpr *S) {
537 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
538 }
539 Value *VisitUnaryCoawait(const UnaryOperator *E) {
540 return Visit(E: E->getSubExpr());
541 }
542
543 // Leaves.
544 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
545 return Builder.getInt(AI: E->getValue());
546 }
547 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
548 return Builder.getInt(AI: E->getValue());
549 }
550 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
551 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
552 }
553 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
554 // Character literals are always stored in an unsigned (even for signed
555 // char), so allow implicit truncation here.
556 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue(),
557 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
558 }
559 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
560 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
561 }
562 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
563 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
564 }
565 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
566 if (E->getType()->isVoidType())
567 return nullptr;
568
569 return EmitNullValue(Ty: E->getType());
570 }
571 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
572 return EmitNullValue(Ty: E->getType());
573 }
574 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
575 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
576 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
577 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
578 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
579 }
580
581 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
582 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),V: E->getPackLength());
583 }
584
585 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
586 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
587 }
588
589 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
590 Value *VisitEmbedExpr(EmbedExpr *E);
591
592 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
593 if (E->isGLValue())
594 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
595 Loc: E->getExprLoc());
596
597 // Otherwise, assume the mapping is the scalar directly.
598 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
599 }
600
601 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
602 llvm_unreachable("Codegen for this isn't defined/implemented");
603 }
604
605 // l-values.
606 Value *VisitDeclRefExpr(DeclRefExpr *E) {
607 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(RefExpr: E))
608 return CGF.emitScalarConstant(Constant, E);
609 return EmitLoadOfLValue(E);
610 }
611
612 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
613 return CGF.EmitObjCSelectorExpr(E);
614 }
615 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
616 return CGF.EmitObjCProtocolExpr(E);
617 }
618 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
619 return EmitLoadOfLValue(E);
620 }
621 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
622 if (E->getMethodDecl() &&
623 E->getMethodDecl()->getReturnType()->isReferenceType())
624 return EmitLoadOfLValue(E);
625 return CGF.EmitObjCMessageExpr(E).getScalarVal();
626 }
627
628 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
629 LValue LV = CGF.EmitObjCIsaExpr(E);
630 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
631 return V;
632 }
633
634 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
635 VersionTuple Version = E->getVersion();
636
637 // If we're checking for a platform older than our minimum deployment
638 // target, we can fold the check away.
639 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
640 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
641
642 return CGF.EmitBuiltinAvailable(Version);
643 }
644
645 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
646 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
647 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
648 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
649 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
650 Value *VisitMemberExpr(MemberExpr *E);
651 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
652 Value *VisitMatrixElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
653 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
654 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
655 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
656 // literals aren't l-values in C++. We do so simply because that's the
657 // cleanest way to handle compound literals in C++.
658 // See the discussion here: https://reviews.llvm.org/D64464
659 return EmitLoadOfLValue(E);
660 }
661
662 Value *VisitInitListExpr(InitListExpr *E);
663
664 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
665 assert(CGF.getArrayInitIndex() &&
666 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
667 return CGF.getArrayInitIndex();
668 }
669
670 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
671 return EmitNullValue(Ty: E->getType());
672 }
673 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
674 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
675 return VisitCastExpr(E);
676 }
677 Value *VisitCastExpr(CastExpr *E);
678
679 Value *VisitCallExpr(const CallExpr *E) {
680 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
681 return EmitLoadOfLValue(E);
682
683 Value *V = CGF.EmitCallExpr(E).getScalarVal();
684
685 EmitLValueAlignmentAssumption(E, V);
686 return V;
687 }
688
689 Value *VisitStmtExpr(const StmtExpr *E);
690
691 // Unary Operators.
692 Value *VisitUnaryPostDec(const UnaryOperator *E) {
693 LValue LV = EmitLValue(E: E->getSubExpr());
694 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
695 }
696 Value *VisitUnaryPostInc(const UnaryOperator *E) {
697 LValue LV = EmitLValue(E: E->getSubExpr());
698 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
699 }
700 Value *VisitUnaryPreDec(const UnaryOperator *E) {
701 LValue LV = EmitLValue(E: E->getSubExpr());
702 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
703 }
704 Value *VisitUnaryPreInc(const UnaryOperator *E) {
705 LValue LV = EmitLValue(E: E->getSubExpr());
706 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
707 }
708
709 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
710 llvm::Value *InVal,
711 bool IsInc);
712
713 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
714 bool isInc, bool isPre);
715
716
717 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
718 if (isa<MemberPointerType>(Val: E->getType())) // never sugared
719 return CGF.CGM.getMemberPointerConstant(e: E);
720
721 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
722 }
723 Value *VisitUnaryDeref(const UnaryOperator *E) {
724 if (E->getType()->isVoidType())
725 return Visit(E: E->getSubExpr()); // the actual value should be unused
726 return EmitLoadOfLValue(E);
727 }
728
729 Value *VisitUnaryPlus(const UnaryOperator *E,
730 QualType PromotionType = QualType());
731 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
732 Value *VisitUnaryMinus(const UnaryOperator *E,
733 QualType PromotionType = QualType());
734 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
735
736 Value *VisitUnaryNot (const UnaryOperator *E);
737 Value *VisitUnaryLNot (const UnaryOperator *E);
738 Value *VisitUnaryReal(const UnaryOperator *E,
739 QualType PromotionType = QualType());
740 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
741 Value *VisitUnaryImag(const UnaryOperator *E,
742 QualType PromotionType = QualType());
743 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
744 Value *VisitUnaryExtension(const UnaryOperator *E) {
745 return Visit(E: E->getSubExpr());
746 }
747
748 // C++
749 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
750 return EmitLoadOfLValue(E);
751 }
752 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
753 auto &Ctx = CGF.getContext();
754 APValue Evaluated =
755 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
756 return ConstantEmitter(CGF).emitAbstract(loc: SLE->getLocation(), value: Evaluated,
757 T: SLE->getType());
758 }
759
760 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
761 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
762 return Visit(E: DAE->getExpr());
763 }
764 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
765 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
766 return Visit(E: DIE->getExpr());
767 }
768 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
769 return CGF.LoadCXXThis();
770 }
771
772 Value *VisitExprWithCleanups(ExprWithCleanups *E);
773 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
774 return CGF.EmitCXXNewExpr(E);
775 }
776 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
777 CGF.EmitCXXDeleteExpr(E);
778 return nullptr;
779 }
780
781 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
782 if (E->isStoredAsBoolean())
783 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
784 V: E->getBoolValue());
785 assert(E->getAPValue().isInt() && "APValue type not supported");
786 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
787 V: E->getAPValue().getInt());
788 }
789
790 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
791 return Builder.getInt1(V: E->isSatisfied());
792 }
793
794 Value *VisitRequiresExpr(const RequiresExpr *E) {
795 return Builder.getInt1(V: E->isSatisfied());
796 }
797
798 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
799 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
800 }
801
802 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
803 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
804 }
805
806 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
807 // C++ [expr.pseudo]p1:
808 // The result shall only be used as the operand for the function call
809 // operator (), and the result of such a call has type void. The only
810 // effect is the evaluation of the postfix-expression before the dot or
811 // arrow.
812 CGF.EmitScalarExpr(E: E->getBase());
813 return nullptr;
814 }
815
816 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
817 return EmitNullValue(Ty: E->getType());
818 }
819
820 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
821 CGF.EmitCXXThrowExpr(E);
822 return nullptr;
823 }
824
825 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
826 return Builder.getInt1(V: E->getValue());
827 }
828
829 // Binary Operators.
830 Value *EmitMul(const BinOpInfo &Ops) {
831 if (Ops.Ty->isSignedIntegerOrEnumerationType() ||
832 Ops.Ty->isUnsignedIntegerType()) {
833 const bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
834 const bool hasSan =
835 isSigned ? CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)
836 : CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow);
837 switch (getOverflowBehaviorConsideringType(CGF, Ty: Ops.Ty)) {
838 case LangOptions::OB_Wrap:
839 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
840 case LangOptions::OB_SignedAndDefined:
841 if (!hasSan)
842 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
843 [[fallthrough]];
844 case LangOptions::OB_Unset:
845 if (!hasSan)
846 return isSigned ? Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul")
847 : Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
848 [[fallthrough]];
849 case LangOptions::OB_Trap:
850 if (CanElideOverflowCheck(Ctx&: CGF.getContext(), Op: Ops))
851 return isSigned ? Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul")
852 : Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
853 return EmitOverflowCheckedBinOp(Ops);
854 }
855 }
856
857 if (Ops.Ty->isConstantMatrixType()) {
858 llvm::MatrixBuilder MB(Builder);
859 // We need to check the types of the operands of the operator to get the
860 // correct matrix dimensions.
861 auto *BO = cast<BinaryOperator>(Val: Ops.E);
862 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
863 Val: BO->getLHS()->getType().getCanonicalType());
864 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
865 Val: BO->getRHS()->getType().getCanonicalType());
866 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
867 if (LHSMatTy && RHSMatTy)
868 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
869 LHSColumns: LHSMatTy->getNumColumns(),
870 RHSColumns: RHSMatTy->getNumColumns());
871 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
872 }
873
874 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
875 // Preserve the old values
876 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
877 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
878 }
879 if (Ops.isFixedPointOp())
880 return EmitFixedPointBinOp(Ops);
881 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
882 }
883 /// Create a binary op that checks for overflow.
884 /// Currently only supports +, - and *.
885 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
886
887 // Check for undefined division and modulus behaviors.
888 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
889 llvm::Value *Zero,bool isDiv);
890 // Common helper for getting how wide LHS of shift is.
891 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
892
893 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
894 // non powers of two.
895 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
896
897 Value *EmitDiv(const BinOpInfo &Ops);
898 Value *EmitRem(const BinOpInfo &Ops);
899 Value *EmitAdd(const BinOpInfo &Ops);
900 Value *EmitSub(const BinOpInfo &Ops);
901 Value *EmitShl(const BinOpInfo &Ops);
902 Value *EmitShr(const BinOpInfo &Ops);
903 Value *EmitAnd(const BinOpInfo &Ops) {
904 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
905 }
906 Value *EmitXor(const BinOpInfo &Ops) {
907 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
908 }
909 Value *EmitOr (const BinOpInfo &Ops) {
910 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
911 }
912
913 // Helper functions for fixed point binary operations.
914 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
915
916 BinOpInfo EmitBinOps(const BinaryOperator *E,
917 QualType PromotionTy = QualType());
918
919 Value *EmitPromotedValue(Value *result, QualType PromotionType);
920 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
921 Value *EmitPromoted(const Expr *E, QualType PromotionType);
922
923 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
924 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
925 Value *&Result);
926
927 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
928 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
929
930 QualType getPromotionType(QualType Ty) {
931 const auto &Ctx = CGF.getContext();
932 if (auto *CT = Ty->getAs<ComplexType>()) {
933 QualType ElementType = CT->getElementType();
934 if (ElementType.UseExcessPrecision(Ctx))
935 return Ctx.getComplexType(T: Ctx.FloatTy);
936 }
937
938 if (Ty.UseExcessPrecision(Ctx)) {
939 if (auto *VT = Ty->getAs<VectorType>()) {
940 unsigned NumElements = VT->getNumElements();
941 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
942 }
943 return Ctx.FloatTy;
944 }
945
946 return QualType();
947 }
948
949 // Binary operators and binary compound assignment operators.
950#define HANDLEBINOP(OP) \
951 Value *VisitBin##OP(const BinaryOperator *E) { \
952 QualType promotionTy = getPromotionType(E->getType()); \
953 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
954 if (result && !promotionTy.isNull()) \
955 result = EmitUnPromotedValue(result, E->getType()); \
956 return result; \
957 } \
958 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
959 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
960 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
961 }
962 HANDLEBINOP(Mul)
963 HANDLEBINOP(Div)
964 HANDLEBINOP(Rem)
965 HANDLEBINOP(Add)
966 HANDLEBINOP(Sub)
967 HANDLEBINOP(Shl)
968 HANDLEBINOP(Shr)
969 HANDLEBINOP(And)
970 HANDLEBINOP(Xor)
971 HANDLEBINOP(Or)
972#undef HANDLEBINOP
973
974 // Comparisons.
975 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
976 llvm::CmpInst::Predicate SICmpOpc,
977 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
978#define VISITCOMP(CODE, UI, SI, FP, SIG) \
979 Value *VisitBin##CODE(const BinaryOperator *E) { \
980 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
981 llvm::FCmpInst::FP, SIG); }
982 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
983 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
984 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
985 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
986 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
987 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
988#undef VISITCOMP
989
990 Value *VisitBinAssign (const BinaryOperator *E);
991
992 Value *VisitBinLAnd (const BinaryOperator *E);
993 Value *VisitBinLOr (const BinaryOperator *E);
994 Value *VisitBinComma (const BinaryOperator *E);
995
996 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
997 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
998
999 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
1000 return Visit(E: E->getSemanticForm());
1001 }
1002
1003 // Other Operators.
1004 Value *VisitBlockExpr(const BlockExpr *BE);
1005 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
1006 Value *VisitChooseExpr(ChooseExpr *CE);
1007 Value *VisitVAArgExpr(VAArgExpr *VE);
1008 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
1009 return CGF.EmitObjCStringLiteral(E);
1010 }
1011 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
1012 return CGF.EmitObjCBoxedExpr(E);
1013 }
1014 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
1015 return CGF.EmitObjCArrayLiteral(E);
1016 }
1017 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
1018 return CGF.EmitObjCDictionaryLiteral(E);
1019 }
1020 Value *VisitAsTypeExpr(AsTypeExpr *CE);
1021 Value *VisitAtomicExpr(AtomicExpr *AE);
1022 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
1023 return Visit(E: E->getSelectedExpr());
1024 }
1025};
1026} // end anonymous namespace.
1027
1028//===----------------------------------------------------------------------===//
1029// Utilities
1030//===----------------------------------------------------------------------===//
1031
1032/// EmitConversionToBool - Convert the specified expression value to a
1033/// boolean (i1) truth value. This is equivalent to "Val != 0".
1034Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
1035 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
1036
1037 if (SrcType->isRealFloatingType())
1038 return EmitFloatToBoolConversion(V: Src);
1039
1040 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
1041 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
1042
1043 // The conversion is a NOP, and will be done when CodeGening the builtin.
1044 if (SrcType == CGF.getContext().AMDGPUFeaturePredicateTy)
1045 return Src;
1046
1047 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
1048 "Unknown scalar type to convert");
1049
1050 if (isa<llvm::IntegerType>(Val: Src->getType()))
1051 return EmitIntToBoolConversion(V: Src);
1052
1053 assert(isa<llvm::PointerType>(Src->getType()));
1054 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
1055}
1056
1057void ScalarExprEmitter::EmitFloatConversionCheck(
1058 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1059 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1060 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1061 if (!isa<llvm::IntegerType>(Val: DstTy))
1062 return;
1063
1064 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1065 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1066 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1067 using llvm::APFloat;
1068 using llvm::APSInt;
1069
1070 llvm::Value *Check = nullptr;
1071 const llvm::fltSemantics &SrcSema =
1072 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
1073
1074 // Floating-point to integer. This has undefined behavior if the source is
1075 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1076 // to an integer).
1077 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
1078 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
1079
1080 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
1081 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1082 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1083 APFloat::opOverflow)
1084 // Don't need an overflow check for lower bound. Just check for
1085 // -Inf/NaN.
1086 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
1087 else
1088 // Find the largest value which is too small to represent (before
1089 // truncation toward zero).
1090 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
1091
1092 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
1093 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1094 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1095 APFloat::opOverflow)
1096 // Don't need an overflow check for upper bound. Just check for
1097 // +Inf/NaN.
1098 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
1099 else
1100 // Find the smallest value which is too large to represent (before
1101 // truncation toward zero).
1102 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
1103
1104 // If we're converting from __half, convert the range to float to match
1105 // the type of src.
1106 if (OrigSrcType->isHalfType()) {
1107 const llvm::fltSemantics &Sema =
1108 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1109 bool IsInexact;
1110 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1111 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1112 }
1113
1114 llvm::Value *GE =
1115 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1116 llvm::Value *LE =
1117 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1118 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1119
1120 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1121 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1122 CGF.EmitCheckTypeDescriptor(T: DstType)};
1123 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckOrdinal), Check: CheckHandler, StaticArgs,
1124 DynamicArgs: OrigSrc);
1125}
1126
1127// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1128// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1129static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1130 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1131EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1132 QualType DstType, CGBuilderTy &Builder) {
1133 llvm::Type *SrcTy = Src->getType();
1134 llvm::Type *DstTy = Dst->getType();
1135 (void)DstTy; // Only used in assert()
1136
1137 // This should be truncation of integral types.
1138 assert(Src != Dst);
1139 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1140 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1141 "non-integer llvm type");
1142
1143 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1144 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1145
1146 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1147 // Else, it is a signed truncation.
1148 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1149 SanitizerKind::SanitizerOrdinal Ordinal;
1150 if (!SrcSigned && !DstSigned) {
1151 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1152 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1153 } else {
1154 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1155 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1156 }
1157
1158 llvm::Value *Check = nullptr;
1159 // 1. Extend the truncated value back to the same width as the Src.
1160 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1161 // 2. Equality-compare with the original source value
1162 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1163 // If the comparison result is 'i1 false', then the truncation was lossy.
1164 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Ordinal));
1165}
1166
1167static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1168 QualType SrcType, QualType DstType) {
1169 return SrcType->isIntegerType() && DstType->isIntegerType();
1170}
1171
1172void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1173 Value *Dst, QualType DstType,
1174 SourceLocation Loc,
1175 bool OBTrapInvolved) {
1176 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation) &&
1177 !OBTrapInvolved)
1178 return;
1179
1180 // We only care about int->int conversions here.
1181 // We ignore conversions to/from pointer and/or bool.
1182 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1183 DstType))
1184 return;
1185
1186 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1187 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1188 // This must be truncation. Else we do not care.
1189 if (SrcBits <= DstBits)
1190 return;
1191
1192 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1193
1194 // If the integer sign change sanitizer is enabled,
1195 // and we are truncating from larger unsigned type to smaller signed type,
1196 // let that next sanitizer deal with it.
1197 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1198 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1199 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1200 (!SrcSigned && DstSigned))
1201 return;
1202
1203 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1204 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1205 Check;
1206
1207 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1208 {
1209 // We don't know the check kind until we call
1210 // EmitIntegerTruncationCheckHelper, but we want to annotate
1211 // EmitIntegerTruncationCheckHelper's instructions too.
1212 SanitizerDebugLocation SanScope(
1213 &CGF,
1214 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1215 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1216 CheckHandler);
1217 Check =
1218 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1219 // If the comparison result is 'i1 false', then the truncation was lossy.
1220 }
1221
1222 // Do we care about this type of truncation?
1223 if (!CGF.SanOpts.has(O: Check.second.second)) {
1224 // Just emit a trap check if an __ob_trap was involved but appropriate
1225 // sanitizer isn't enabled.
1226 if (OBTrapInvolved)
1227 CGF.EmitTrapCheck(Checked: Check.second.first, CheckHandlerID: CheckHandler);
1228 return;
1229 }
1230
1231 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1232
1233 // Does some SSCL ignore this type?
1234 const bool ignoredBySanitizer = CGF.getContext().isTypeIgnoredBySanitizer(
1235 Mask: SanitizerMask::bitPosToMask(Pos: Check.second.second), Ty: DstType);
1236
1237 // Consider OverflowBehaviorTypes which override SSCL type entries for
1238 // truncation sanitizers.
1239 if (const auto *OBT = DstType->getAs<OverflowBehaviorType>()) {
1240 if (OBT->isWrapKind())
1241 return;
1242 }
1243 if (ignoredBySanitizer && !OBTrapInvolved)
1244 return;
1245
1246 llvm::Constant *StaticArgs[] = {
1247 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1248 CGF.EmitCheckTypeDescriptor(T: DstType),
1249 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1250 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1251
1252 CGF.EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1253}
1254
1255static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1256 const char *Name,
1257 CGBuilderTy &Builder) {
1258 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1259 llvm::Type *VTy = V->getType();
1260 if (!VSigned) {
1261 // If the value is unsigned, then it is never negative.
1262 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1263 }
1264 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1265 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1266 Name: llvm::Twine(Name) + "." + V->getName() +
1267 ".negativitycheck");
1268}
1269
1270// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1271// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1272static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1273 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1274EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1275 QualType DstType, CGBuilderTy &Builder) {
1276 llvm::Type *SrcTy = Src->getType();
1277 llvm::Type *DstTy = Dst->getType();
1278
1279 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1280 "non-integer llvm type");
1281
1282 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1283 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1284 (void)SrcSigned; // Only used in assert()
1285 (void)DstSigned; // Only used in assert()
1286 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1287 unsigned DstBits = DstTy->getScalarSizeInBits();
1288 (void)SrcBits; // Only used in assert()
1289 (void)DstBits; // Only used in assert()
1290
1291 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1292 "either the widths should be different, or the signednesses.");
1293
1294 // 1. Was the old Value negative?
1295 llvm::Value *SrcIsNegative =
1296 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1297 // 2. Is the new Value negative?
1298 llvm::Value *DstIsNegative =
1299 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1300 // 3. Now, was the 'negativity status' preserved during the conversion?
1301 // NOTE: conversion from negative to zero is considered to change the sign.
1302 // (We want to get 'false' when the conversion changed the sign)
1303 // So we should just equality-compare the negativity statuses.
1304 llvm::Value *Check = nullptr;
1305 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1306 // If the comparison result is 'false', then the conversion changed the sign.
1307 return std::make_pair(
1308 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1309 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitIntegerSignChange));
1310}
1311
1312void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1313 Value *Dst, QualType DstType,
1314 SourceLocation Loc,
1315 bool OBTrapInvolved) {
1316 if (!CGF.SanOpts.has(O: SanitizerKind::SO_ImplicitIntegerSignChange) &&
1317 !OBTrapInvolved)
1318 return;
1319
1320 llvm::Type *SrcTy = Src->getType();
1321 llvm::Type *DstTy = Dst->getType();
1322
1323 // We only care about int->int conversions here.
1324 // We ignore conversions to/from pointer and/or bool.
1325 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1326 DstType))
1327 return;
1328
1329 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1330 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1331 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1332 unsigned DstBits = DstTy->getScalarSizeInBits();
1333
1334 // Now, we do not need to emit the check in *all* of the cases.
1335 // We can avoid emitting it in some obvious cases where it would have been
1336 // dropped by the opt passes (instcombine) always anyways.
1337 // If it's a cast between effectively the same type, no check.
1338 // NOTE: this is *not* equivalent to checking the canonical types.
1339 if (SrcSigned == DstSigned && SrcBits == DstBits)
1340 return;
1341 // At least one of the values needs to have signed type.
1342 // If both are unsigned, then obviously, neither of them can be negative.
1343 if (!SrcSigned && !DstSigned)
1344 return;
1345 // If the conversion is to *larger* *signed* type, then no check is needed.
1346 // Because either sign-extension happens (so the sign will remain),
1347 // or zero-extension will happen (the sign bit will be zero.)
1348 if ((DstBits > SrcBits) && DstSigned)
1349 return;
1350 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1351 (SrcBits > DstBits) && SrcSigned) {
1352 // If the signed integer truncation sanitizer is enabled,
1353 // and this is a truncation from signed type, then no check is needed.
1354 // Because here sign change check is interchangeable with truncation check.
1355 return;
1356 }
1357 // Does an SSCL have an entry for the DstType under its respective sanitizer
1358 // section? Don't check this if an __ob_trap type is involved as it has
1359 // priority to emit checks regardless of sanitizer case lists.
1360 if (!OBTrapInvolved) {
1361 if (DstSigned &&
1362 CGF.getContext().isTypeIgnoredBySanitizer(
1363 Mask: SanitizerKind::ImplicitSignedIntegerTruncation, Ty: DstType))
1364 return;
1365 if (!DstSigned &&
1366 CGF.getContext().isTypeIgnoredBySanitizer(
1367 Mask: SanitizerKind::ImplicitUnsignedIntegerTruncation, Ty: DstType))
1368 return;
1369 }
1370 // That's it. We can't rule out any more cases with the data we have.
1371
1372 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1373 SanitizerDebugLocation SanScope(
1374 &CGF,
1375 {SanitizerKind::SO_ImplicitIntegerSignChange,
1376 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1377 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1378 CheckHandler);
1379
1380 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1381 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1382 Check;
1383
1384 // Each of these checks needs to return 'false' when an issue was detected.
1385 ImplicitConversionCheckKind CheckKind;
1386 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1387 2>
1388 Checks;
1389 // So we can 'and' all the checks together, and still get 'false',
1390 // if at least one of the checks detected an issue.
1391
1392 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1393 CheckKind = Check.first;
1394 Checks.emplace_back(Args&: Check.second);
1395
1396 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1397 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1398 // If the signed integer truncation sanitizer was enabled,
1399 // and we are truncating from larger unsigned type to smaller signed type,
1400 // let's handle the case we skipped in that check.
1401 Check =
1402 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1403 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1404 Checks.emplace_back(Args&: Check.second);
1405 // If the comparison result is 'i1 false', then the truncation was lossy.
1406 }
1407
1408 if (!CGF.SanOpts.has(O: SanitizerKind::SO_ImplicitIntegerSignChange)) {
1409 if (OBTrapInvolved) {
1410 llvm::Value *Combined = Check.second.first;
1411 for (const auto &C : Checks)
1412 Combined = Builder.CreateAnd(LHS: Combined, RHS: C.first);
1413 CGF.EmitTrapCheck(Checked: Combined, CheckHandlerID: CheckHandler);
1414 }
1415 return;
1416 }
1417
1418 llvm::Constant *StaticArgs[] = {
1419 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1420 CGF.EmitCheckTypeDescriptor(T: DstType),
1421 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1422 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1423 // EmitCheck() will 'and' all the checks together.
1424 CGF.EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1425}
1426
1427// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1428// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1429static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1430 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1431EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1432 QualType DstType, CGBuilderTy &Builder) {
1433 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1434 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1435
1436 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1437 if (!SrcSigned && !DstSigned)
1438 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1439 else
1440 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1441
1442 llvm::Value *Check = nullptr;
1443 // 1. Extend the truncated value back to the same width as the Src.
1444 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1445 // 2. Equality-compare with the original source value
1446 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1447 // If the comparison result is 'i1 false', then the truncation was lossy.
1448
1449 return std::make_pair(
1450 x&: Kind,
1451 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1452}
1453
1454// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1455// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1456static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1457 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1458EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1459 QualType DstType, CGBuilderTy &Builder) {
1460 // 1. Was the old Value negative?
1461 llvm::Value *SrcIsNegative =
1462 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1463 // 2. Is the new Value negative?
1464 llvm::Value *DstIsNegative =
1465 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1466 // 3. Now, was the 'negativity status' preserved during the conversion?
1467 // NOTE: conversion from negative to zero is considered to change the sign.
1468 // (We want to get 'false' when the conversion changed the sign)
1469 // So we should just equality-compare the negativity statuses.
1470 llvm::Value *Check = nullptr;
1471 Check =
1472 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1473 // If the comparison result is 'false', then the conversion changed the sign.
1474 return std::make_pair(
1475 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1476 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1477}
1478
1479void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1480 Value *Dst, QualType DstType,
1481 const CGBitFieldInfo &Info,
1482 SourceLocation Loc) {
1483
1484 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1485 return;
1486
1487 // We only care about int->int conversions here.
1488 // We ignore conversions to/from pointer and/or bool.
1489 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1490 DstType))
1491 return;
1492
1493 if (DstType->isBooleanType() || SrcType->isBooleanType())
1494 return;
1495
1496 // This should be truncation of integral types.
1497 assert(isa<llvm::IntegerType>(Src->getType()) &&
1498 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1499
1500 // TODO: Calculate src width to avoid emitting code
1501 // for unecessary cases.
1502 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1503 unsigned DstBits = Info.Size;
1504
1505 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1506 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1507
1508 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1509 SanitizerDebugLocation SanScope(
1510 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1511
1512 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1513 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1514 Check;
1515
1516 // Truncation
1517 bool EmitTruncation = DstBits < SrcBits;
1518 // If Dst is signed and Src unsigned, we want to be more specific
1519 // about the CheckKind we emit, in this case we want to emit
1520 // ICCK_SignedIntegerTruncationOrSignChange.
1521 bool EmitTruncationFromUnsignedToSigned =
1522 EmitTruncation && DstSigned && !SrcSigned;
1523 // Sign change
1524 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1525 bool BothUnsigned = !SrcSigned && !DstSigned;
1526 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1527 // We can avoid emitting sign change checks in some obvious cases
1528 // 1. If Src and Dst have the same signedness and size
1529 // 2. If both are unsigned sign check is unecessary!
1530 // 3. If Dst is signed and bigger than Src, either
1531 // sign-extension or zero-extension will make sure
1532 // the sign remains.
1533 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1534
1535 if (EmitTruncation)
1536 Check =
1537 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1538 else if (EmitSignChange) {
1539 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1540 "either the widths should be different, or the signednesses.");
1541 Check =
1542 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1543 } else
1544 return;
1545
1546 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1547 if (EmitTruncationFromUnsignedToSigned)
1548 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1549
1550 llvm::Constant *StaticArgs[] = {
1551 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1552 EmitCheckTypeDescriptor(T: DstType),
1553 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1554 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1555
1556 EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1557}
1558
1559Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1560 QualType DstType, llvm::Type *SrcTy,
1561 llvm::Type *DstTy,
1562 ScalarConversionOpts Opts) {
1563 // The Element types determine the type of cast to perform.
1564 llvm::Type *SrcElementTy;
1565 llvm::Type *DstElementTy;
1566 QualType SrcElementType;
1567 QualType DstElementType;
1568 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1569 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1570 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1571 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1572 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1573 } else {
1574 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1575 "cannot cast between matrix and non-matrix types");
1576 SrcElementTy = SrcTy;
1577 DstElementTy = DstTy;
1578 SrcElementType = SrcType;
1579 DstElementType = DstType;
1580 }
1581
1582 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1583 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1584 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1585 InputSigned = true;
1586 }
1587
1588 if (isa<llvm::IntegerType>(Val: DstElementTy))
1589 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1590 if (InputSigned)
1591 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1592 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1593 }
1594
1595 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1596 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1597 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1598
1599 // If we can't recognize overflow as undefined behavior, assume that
1600 // overflow saturates. This protects against normal optimizations if we are
1601 // compiling with non-standard FP semantics.
1602 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1603 llvm::Intrinsic::ID IID =
1604 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1605 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1606 }
1607
1608 if (IsSigned)
1609 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1610 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1611 }
1612
1613 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1614 Value *FloatVal = Builder.CreateFPExt(V: Src, DestTy: Builder.getFloatTy(), Name: "fpext");
1615 return Builder.CreateFPTrunc(V: FloatVal, DestTy: DstTy, Name: "fptrunc");
1616 }
1617 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1618 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1619 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1620}
1621
1622/// Emit a conversion from the specified type to the specified destination type,
1623/// both of which are LLVM scalar types.
1624Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1625 QualType DstType,
1626 SourceLocation Loc,
1627 ScalarConversionOpts Opts) {
1628 // All conversions involving fixed point types should be handled by the
1629 // EmitFixedPoint family functions. This is done to prevent bloating up this
1630 // function more, and although fixed point numbers are represented by
1631 // integers, we do not want to follow any logic that assumes they should be
1632 // treated as integers.
1633 // TODO(leonardchan): When necessary, add another if statement checking for
1634 // conversions to fixed point types from other types.
1635 if (SrcType->isFixedPointType()) {
1636 if (DstType->isBooleanType())
1637 // It is important that we check this before checking if the dest type is
1638 // an integer because booleans are technically integer types.
1639 // We do not need to check the padding bit on unsigned types if unsigned
1640 // padding is enabled because overflow into this bit is undefined
1641 // behavior.
1642 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1643 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1644 DstType->isRealFloatingType())
1645 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1646
1647 llvm_unreachable(
1648 "Unhandled scalar conversion from a fixed point type to another type.");
1649 } else if (DstType->isFixedPointType()) {
1650 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1651 // This also includes converting booleans and enums to fixed point types.
1652 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1653
1654 llvm_unreachable(
1655 "Unhandled scalar conversion to a fixed point type from another type.");
1656 }
1657
1658 QualType NoncanonicalSrcType = SrcType;
1659 QualType NoncanonicalDstType = DstType;
1660
1661 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1662 DstType = CGF.getContext().getCanonicalType(T: DstType);
1663 if (SrcType == DstType) return Src;
1664
1665 if (DstType->isVoidType()) return nullptr;
1666
1667 llvm::Value *OrigSrc = Src;
1668 QualType OrigSrcType = SrcType;
1669 llvm::Type *SrcTy = Src->getType();
1670
1671 // Handle conversions to bool first, they are special: comparisons against 0.
1672 if (DstType->isBooleanType())
1673 return EmitConversionToBool(Src, SrcType);
1674
1675 llvm::Type *DstTy = ConvertType(T: DstType);
1676
1677 // Determine whether an overflow behavior of 'trap' has been specified for
1678 // either the destination or the source types. If so, we can elide sanitizer
1679 // capability checks as this overflow behavior kind is also capable of
1680 // emitting traps without runtime sanitizer support.
1681 // Also skip instrumentation if either source or destination has 'wrap'
1682 // behavior - the user has explicitly indicated they accept wrapping
1683 // semantics. Use non-canonical types to preserve OBT annotations.
1684 const auto *DstOBT = NoncanonicalDstType->getAs<OverflowBehaviorType>();
1685 const auto *SrcOBT = NoncanonicalSrcType->getAs<OverflowBehaviorType>();
1686 bool OBTrapInvolved =
1687 (DstOBT && DstOBT->isTrapKind()) || (SrcOBT && SrcOBT->isTrapKind());
1688 bool OBWrapInvolved =
1689 (DstOBT && DstOBT->isWrapKind()) || (SrcOBT && SrcOBT->isWrapKind());
1690
1691 // Cast from half through float if half isn't a native type.
1692 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1693 // Cast to FP using the intrinsic if the half type itself isn't supported.
1694 if (DstTy->isFloatingPointTy()) {
1695 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1696 Value *BitCast = Builder.CreateBitCast(V: Src, DestTy: CGF.CGM.HalfTy);
1697 return Builder.CreateFPExt(V: BitCast, DestTy: DstTy, Name: "conv");
1698 }
1699 } else {
1700 // Cast to other types through float, using either the intrinsic or FPExt,
1701 // depending on whether the half type itself is supported
1702 // (as opposed to operations on half, available with NativeHalfType).
1703
1704 if (Src->getType() != CGF.CGM.HalfTy) {
1705 assert(CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics());
1706 Src = Builder.CreateBitCast(V: Src, DestTy: CGF.CGM.HalfTy);
1707 }
1708
1709 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1710 SrcType = CGF.getContext().FloatTy;
1711 SrcTy = CGF.FloatTy;
1712 }
1713 }
1714
1715 // Ignore conversions like int -> uint.
1716 if (SrcTy == DstTy) {
1717 if (Opts.EmitImplicitIntegerSignChangeChecks ||
1718 (OBTrapInvolved && !OBWrapInvolved))
1719 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1720 DstType: NoncanonicalDstType, Loc, OBTrapInvolved);
1721
1722 return Src;
1723 }
1724
1725 // Handle pointer conversions next: pointers can only be converted to/from
1726 // other pointers and integers. Check for pointer types in terms of LLVM, as
1727 // some native types (like Obj-C id) may map to a pointer type.
1728 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1729 // The source value may be an integer, or a pointer.
1730 if (isa<llvm::PointerType>(Val: SrcTy))
1731 return Src;
1732
1733 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1734 // First, convert to the correct width so that we control the kind of
1735 // extension.
1736 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1737 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1738 llvm::Value* IntResult =
1739 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1740 // Then, cast to pointer.
1741 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1742 }
1743
1744 if (isa<llvm::PointerType>(Val: SrcTy)) {
1745 // Must be an ptr to int cast.
1746 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1747 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1748 }
1749
1750 // A scalar can be splatted to an extended vector of the same element type
1751 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1752 // Sema should add casts to make sure that the source expression's type is
1753 // the same as the vector's element type (sans qualifiers)
1754 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1755 SrcType.getTypePtr() &&
1756 "Splatted expr doesn't match with vector element type?");
1757
1758 // Splat the element across to all elements
1759 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1760 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1761 }
1762
1763 if (SrcType->isMatrixType() && DstType->isMatrixType())
1764 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1765
1766 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1767 // Allow bitcast from vector to integer/fp of the same size.
1768 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1769 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1770 if (SrcSize == DstSize)
1771 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1772
1773 // Conversions between vectors of different sizes are not allowed except
1774 // when vectors of half are involved. Operations on storage-only half
1775 // vectors require promoting half vector operands to float vectors and
1776 // truncating the result, which is either an int or float vector, to a
1777 // short or half vector.
1778
1779 // Source and destination are both expected to be vectors.
1780 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1781 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1782 (void)DstElementTy;
1783
1784 assert(((SrcElementTy->isIntegerTy() &&
1785 DstElementTy->isIntegerTy()) ||
1786 (SrcElementTy->isFloatingPointTy() &&
1787 DstElementTy->isFloatingPointTy())) &&
1788 "unexpected conversion between a floating-point vector and an "
1789 "integer vector");
1790
1791 // Truncate an i32 vector to an i16 vector.
1792 if (SrcElementTy->isIntegerTy())
1793 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1794
1795 // Truncate a float vector to a half vector.
1796 if (SrcSize > DstSize)
1797 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1798
1799 // Promote a half vector to a float vector.
1800 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1801 }
1802
1803 // Finally, we have the arithmetic types: real int/float.
1804 Value *Res = nullptr;
1805 llvm::Type *ResTy = DstTy;
1806
1807 // An overflowing conversion has undefined behavior if either the source type
1808 // or the destination type is a floating-point type. However, we consider the
1809 // range of representable values for all floating-point types to be
1810 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1811 // floating-point type.
1812 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1813 OrigSrcType->isFloatingType())
1814 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1815 Loc);
1816
1817 // Cast to half through float if half isn't a native type.
1818 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1819 // Make sure we cast in a single step if from another FP type.
1820 if (SrcTy->isFloatingPointTy()) {
1821 // Handle the case where the half type is represented as an integer (as
1822 // opposed to operations on half, available with NativeHalfType).
1823
1824 // If the half type is supported, just use an fptrunc.
1825 Value *Res = Builder.CreateFPTrunc(V: Src, DestTy: CGF.CGM.HalfTy, Name: "conv");
1826 if (DstTy == CGF.CGM.HalfTy)
1827 return Res;
1828
1829 assert(DstTy->isIntegerTy(16) &&
1830 CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics() &&
1831 "Only half FP requires extra conversion");
1832 return Builder.CreateBitCast(V: Res, DestTy: DstTy);
1833 }
1834
1835 DstTy = CGF.FloatTy;
1836 }
1837
1838 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1839
1840 if (DstTy != ResTy) {
1841 Res = Builder.CreateFPTrunc(V: Res, DestTy: CGF.CGM.HalfTy, Name: "conv");
1842
1843 if (ResTy != CGF.CGM.HalfTy) {
1844 assert(ResTy->isIntegerTy(16) &&
1845 CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics() &&
1846 "Only half FP requires extra conversion");
1847 Res = Builder.CreateBitCast(V: Res, DestTy: ResTy);
1848 }
1849 }
1850
1851 if ((Opts.EmitImplicitIntegerTruncationChecks || OBTrapInvolved) &&
1852 !OBWrapInvolved && !Opts.PatternExcluded)
1853 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1854 DstType: NoncanonicalDstType, Loc, OBTrapInvolved);
1855
1856 if (Opts.EmitImplicitIntegerSignChangeChecks ||
1857 (OBTrapInvolved && !OBWrapInvolved))
1858 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1859 DstType: NoncanonicalDstType, Loc, OBTrapInvolved);
1860
1861 return Res;
1862}
1863
1864Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1865 QualType DstTy,
1866 SourceLocation Loc) {
1867 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1868 llvm::Value *Result;
1869 if (SrcTy->isRealFloatingType())
1870 Result = FPBuilder.CreateFloatingToFixed(Src,
1871 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1872 else if (DstTy->isRealFloatingType())
1873 Result = FPBuilder.CreateFixedToFloating(Src,
1874 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1875 DstTy: ConvertType(T: DstTy));
1876 else {
1877 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1878 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1879
1880 if (DstTy->isIntegerType())
1881 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1882 DstWidth: DstFPSema.getWidth(),
1883 DstIsSigned: DstFPSema.isSigned());
1884 else if (SrcTy->isIntegerType())
1885 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1886 DstSema: DstFPSema);
1887 else
1888 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1889 }
1890 return Result;
1891}
1892
1893/// Emit a conversion from the specified complex type to the specified
1894/// destination type, where the destination type is an LLVM scalar type.
1895Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1896 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1897 SourceLocation Loc) {
1898 // Get the source element type.
1899 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1900
1901 // Handle conversions to bool first, they are special: comparisons against 0.
1902 if (DstTy->isBooleanType()) {
1903 // Complex != 0 -> (Real != 0) | (Imag != 0)
1904 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1905 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1906 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1907 }
1908
1909 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1910 // the imaginary part of the complex value is discarded and the value of the
1911 // real part is converted according to the conversion rules for the
1912 // corresponding real type.
1913 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1914}
1915
1916Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1917 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1918}
1919
1920/// Emit a sanitization check for the given "binary" operation (which
1921/// might actually be a unary increment which has been lowered to a binary
1922/// operation). The check passes if all values in \p Checks (which are \c i1),
1923/// are \c true.
1924void ScalarExprEmitter::EmitBinOpCheck(
1925 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1926 const BinOpInfo &Info) {
1927 assert(CGF.IsSanitizerScope);
1928 SanitizerHandler Check;
1929 SmallVector<llvm::Constant *, 4> StaticData;
1930 SmallVector<llvm::Value *, 2> DynamicData;
1931 TrapReason TR;
1932
1933 BinaryOperatorKind Opcode = Info.Opcode;
1934 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1935 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1936
1937 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1938 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1939 if (UO && UO->getOpcode() == UO_Minus) {
1940 Check = SanitizerHandler::NegateOverflow;
1941 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1942 DynamicData.push_back(Elt: Info.RHS);
1943 } else {
1944 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1945 // Shift LHS negative or too large, or RHS out of bounds.
1946 Check = SanitizerHandler::ShiftOutOfBounds;
1947 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1948 StaticData.push_back(
1949 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1950 StaticData.push_back(
1951 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1952 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1953 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1954 Check = SanitizerHandler::DivremOverflow;
1955 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1956 } else {
1957 // Arithmetic overflow (+, -, *).
1958 int ArithOverflowKind = 0;
1959 switch (Opcode) {
1960 case BO_Add: {
1961 Check = SanitizerHandler::AddOverflow;
1962 ArithOverflowKind = diag::UBSanArithKind::Add;
1963 break;
1964 }
1965 case BO_Sub: {
1966 Check = SanitizerHandler::SubOverflow;
1967 ArithOverflowKind = diag::UBSanArithKind::Sub;
1968 break;
1969 }
1970 case BO_Mul: {
1971 Check = SanitizerHandler::MulOverflow;
1972 ArithOverflowKind = diag::UBSanArithKind::Mul;
1973 break;
1974 }
1975 default:
1976 llvm_unreachable("unexpected opcode for bin op check");
1977 }
1978 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1979 if (CGF.CGM.getCodeGenOpts().SanitizeTrap.has(
1980 K: SanitizerKind::UnsignedIntegerOverflow) ||
1981 CGF.CGM.getCodeGenOpts().SanitizeTrap.has(
1982 K: SanitizerKind::SignedIntegerOverflow)) {
1983 // Only pay the cost for constructing the trap diagnostic if they are
1984 // going to be used.
1985 CGF.CGM.BuildTrapReason(DiagID: diag::trap_ubsan_arith_overflow, TR)
1986 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1987 << Info.E;
1988 }
1989 }
1990 DynamicData.push_back(Elt: Info.LHS);
1991 DynamicData.push_back(Elt: Info.RHS);
1992 }
1993
1994 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData, TR: &TR);
1995}
1996
1997//===----------------------------------------------------------------------===//
1998// Visitor Methods
1999//===----------------------------------------------------------------------===//
2000
2001Value *ScalarExprEmitter::VisitExpr(Expr *E) {
2002 CGF.ErrorUnsupported(S: E, Type: "scalar expression");
2003 if (E->getType()->isVoidType())
2004 return nullptr;
2005 return llvm::PoisonValue::get(T: CGF.ConvertType(T: E->getType()));
2006}
2007
2008Value *
2009ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
2010 ASTContext &Context = CGF.getContext();
2011 unsigned AddrSpace =
2012 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
2013 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
2014 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
2015
2016 llvm::Type *ExprTy = ConvertType(T: E->getType());
2017 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
2018 Name: "usn_addr_cast");
2019}
2020
2021Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
2022 assert(E->getDataElementCount() == 1);
2023 auto It = E->begin();
2024 return Builder.getInt(AI: (*It)->getValue());
2025}
2026
2027Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
2028 // Vector Mask Case
2029 if (E->getNumSubExprs() == 2) {
2030 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
2031 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
2032 Value *Mask;
2033
2034 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
2035 unsigned LHSElts = LTy->getNumElements();
2036
2037 Mask = RHS;
2038
2039 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
2040
2041 // Mask off the high bits of each shuffle index.
2042 Value *MaskBits =
2043 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
2044 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
2045
2046 // newv = undef
2047 // mask = mask & maskbits
2048 // for each elt
2049 // n = extract mask i
2050 // x = extract val n
2051 // newv = insert newv, x, i
2052 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
2053 NumElts: MTy->getNumElements());
2054 Value* NewV = llvm::PoisonValue::get(T: RTy);
2055 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
2056 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
2057 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
2058
2059 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
2060 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
2061 }
2062 return NewV;
2063 }
2064
2065 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
2066 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
2067
2068 SmallVector<int, 32> Indices;
2069 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
2070 llvm::APSInt Idx = E->getShuffleMaskIdx(N: i - 2);
2071 // Check for -1 and output it as undef in the IR.
2072 if (Idx.isSigned() && Idx.isAllOnes())
2073 Indices.push_back(Elt: -1);
2074 else
2075 Indices.push_back(Elt: Idx.getZExtValue());
2076 }
2077
2078 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
2079}
2080
2081Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
2082 QualType SrcType = E->getSrcExpr()->getType(),
2083 DstType = E->getType();
2084
2085 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
2086
2087 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
2088 DstType = CGF.getContext().getCanonicalType(T: DstType);
2089 if (SrcType == DstType) return Src;
2090
2091 assert(SrcType->isVectorType() &&
2092 "ConvertVector source type must be a vector");
2093 assert(DstType->isVectorType() &&
2094 "ConvertVector destination type must be a vector");
2095
2096 llvm::Type *SrcTy = Src->getType();
2097 llvm::Type *DstTy = ConvertType(T: DstType);
2098
2099 // Ignore conversions like int -> uint.
2100 if (SrcTy == DstTy)
2101 return Src;
2102
2103 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2104 DstEltType = DstType->castAs<VectorType>()->getElementType();
2105
2106 assert(SrcTy->isVectorTy() &&
2107 "ConvertVector source IR type must be a vector");
2108 assert(DstTy->isVectorTy() &&
2109 "ConvertVector destination IR type must be a vector");
2110
2111 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
2112 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
2113
2114 if (DstEltType->isBooleanType()) {
2115 assert((SrcEltTy->isFloatingPointTy() ||
2116 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2117
2118 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
2119 if (SrcEltTy->isFloatingPointTy()) {
2120 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2121 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
2122 } else {
2123 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
2124 }
2125 }
2126
2127 // We have the arithmetic types: real int/float.
2128 Value *Res = nullptr;
2129
2130 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
2131 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2132 if (isa<llvm::IntegerType>(Val: DstEltTy))
2133 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
2134 else {
2135 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2136 if (InputSigned)
2137 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
2138 else
2139 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
2140 }
2141 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
2142 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2143 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2144 if (DstEltType->isSignedIntegerOrEnumerationType())
2145 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
2146 else
2147 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
2148 } else {
2149 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2150 "Unknown real conversion");
2151 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2152 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2153 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
2154 else
2155 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
2156 }
2157
2158 return Res;
2159}
2160
2161Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2162 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
2163 CGF.EmitIgnoredExpr(E: E->getBase());
2164 return CGF.emitScalarConstant(Constant, E);
2165 } else {
2166 Expr::EvalResult Result;
2167 if (E->EvaluateAsInt(Result, Ctx: CGF.getContext(), AllowSideEffects: Expr::SE_AllowSideEffects)) {
2168 llvm::APSInt Value = Result.Val.getInt();
2169 CGF.EmitIgnoredExpr(E: E->getBase());
2170 return Builder.getInt(AI: Value);
2171 }
2172 }
2173
2174 llvm::Value *Result = EmitLoadOfLValue(E);
2175
2176 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2177 // debug info for the pointer, even if there is no variable associated with
2178 // the pointer's expression.
2179 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2180 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Val: Result)) {
2181 if (llvm::GetElementPtrInst *GEP =
2182 dyn_cast<llvm::GetElementPtrInst>(Val: Load->getPointerOperand())) {
2183 if (llvm::Instruction *Pointer =
2184 dyn_cast<llvm::Instruction>(Val: GEP->getPointerOperand())) {
2185 QualType Ty = E->getBase()->getType();
2186 if (!E->isArrow())
2187 Ty = CGF.getContext().getPointerType(T: Ty);
2188 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Value: Pointer, Ty);
2189 }
2190 }
2191 }
2192 }
2193 return Result;
2194}
2195
2196Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2197 TestAndClearIgnoreResultAssign();
2198
2199 // Emit subscript expressions in rvalue context's. For most cases, this just
2200 // loads the lvalue formed by the subscript expr. However, we have to be
2201 // careful, because the base of a vector subscript is occasionally an rvalue,
2202 // so we can't get it as an lvalue.
2203 if (!E->getBase()->getType()->isVectorType() &&
2204 !E->getBase()->getType()->isSveVLSBuiltinType())
2205 return EmitLoadOfLValue(E);
2206
2207 // Handle the vector case. The base must be a vector, the index must be an
2208 // integer value.
2209 Value *Base = Visit(E: E->getBase());
2210 Value *Idx = Visit(E: E->getIdx());
2211 QualType IdxTy = E->getIdx()->getType();
2212
2213 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
2214 CGF.EmitBoundsCheck(ArrayExpr: E, ArrayExprBase: E->getBase(), Index: Idx, IndexType: IdxTy, /*Accessed*/true);
2215
2216 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
2217}
2218
2219Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2220 MatrixSingleSubscriptExpr *E) {
2221 TestAndClearIgnoreResultAssign();
2222
2223 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2224 unsigned NumRows = MatrixTy->getNumRows();
2225 unsigned NumColumns = MatrixTy->getNumColumns();
2226
2227 // Row index
2228 Value *RowIdx = CGF.EmitMatrixIndexExpr(E: E->getRowIdx());
2229 llvm::MatrixBuilder MB(Builder);
2230
2231 // The row index must be in [0, NumRows)
2232 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2233 MB.CreateIndexAssumption(Idx: RowIdx, NumElements: NumRows);
2234
2235 Value *FlatMatrix = Visit(E: E->getBase());
2236 llvm::Type *ElemTy = CGF.ConvertTypeForMem(T: MatrixTy->getElementType());
2237 auto *ResultTy = llvm::FixedVectorType::get(ElementType: ElemTy, NumElts: NumColumns);
2238 Value *RowVec = llvm::PoisonValue::get(T: ResultTy);
2239
2240 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2241 Value *ColVal = llvm::ConstantInt::get(Ty: RowIdx->getType(), V: Col);
2242 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2243 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2244 Value *EltIdx = MB.CreateIndex(RowIdx, ColumnIdx: ColVal, NumRows, NumCols: NumColumns,
2245 IsMatrixRowMajor, Name: "matrix_row_idx");
2246 Value *Elt =
2247 Builder.CreateExtractElement(Vec: FlatMatrix, Idx: EltIdx, Name: "matrix_elem");
2248 Value *Lane = llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Col);
2249 RowVec = Builder.CreateInsertElement(Vec: RowVec, NewElt: Elt, Idx: Lane, Name: "matrix_row_ins");
2250 }
2251
2252 return CGF.EmitFromMemory(Value: RowVec, Ty: E->getType());
2253}
2254
2255Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2256 TestAndClearIgnoreResultAssign();
2257
2258 // Handle the vector case. The base must be a vector, the index must be an
2259 // integer value.
2260 Value *RowIdx = CGF.EmitMatrixIndexExpr(E: E->getRowIdx());
2261 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E: E->getColumnIdx());
2262
2263 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2264 llvm::MatrixBuilder MB(Builder);
2265
2266 Value *Idx;
2267 unsigned NumCols = MatrixTy->getNumColumns();
2268 unsigned NumRows = MatrixTy->getNumRows();
2269 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2270 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2271 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2272
2273 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2274 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
2275
2276 Value *Matrix = Visit(E: E->getBase());
2277
2278 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2279 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
2280}
2281
2282static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2283 unsigned Off) {
2284 int MV = SVI->getMaskValue(Elt: Idx);
2285 if (MV == -1)
2286 return -1;
2287 return Off + MV;
2288}
2289
2290static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2291 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2292 "Index operand too large for shufflevector mask!");
2293 return C->getZExtValue();
2294}
2295
2296Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2297 bool Ignore = TestAndClearIgnoreResultAssign();
2298 (void)Ignore;
2299 unsigned NumInitElements = E->getNumInits();
2300 assert((Ignore == false ||
2301 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2302 "init list ignored");
2303
2304 // HLSL initialization lists in the AST are an expansion which can contain
2305 // side-effecting expressions wrapped in opaque value expressions. To properly
2306 // emit these we need to emit the opaque values before we emit the argument
2307 // expressions themselves. This is a little hacky, but it prevents us needing
2308 // to do a bigger AST-level change for a language feature that we need
2309 // deprecate in the near future. See related HLSL language proposals in the
2310 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2311 // * 0005-strict-initializer-lists.md
2312 // * 0032-constructors.md
2313 if (CGF.getLangOpts().HLSL)
2314 CGF.CGM.getHLSLRuntime().emitInitListOpaqueValues(CGF, E);
2315
2316 if (E->hadArrayRangeDesignator())
2317 CGF.ErrorUnsupported(S: E, Type: "GNU array range designator extension");
2318
2319 llvm::VectorType *VType =
2320 dyn_cast<llvm::VectorType>(Val: ConvertType(T: E->getType()));
2321
2322 if (!VType) {
2323 if (NumInitElements == 0) {
2324 // C++11 value-initialization for the scalar.
2325 return EmitNullValue(Ty: E->getType());
2326 }
2327 // We have a scalar in braces. Just use the first element.
2328 return Visit(E: E->getInit(Init: 0));
2329 }
2330
2331 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2332 if (NumInitElements == 0) {
2333 // C++11 value-initialization for the vector.
2334 return EmitNullValue(Ty: E->getType());
2335 }
2336
2337 if (NumInitElements == 1) {
2338 Expr *InitVector = E->getInit(Init: 0);
2339
2340 // Initialize from another scalable vector of the same type.
2341 if (InitVector->getType().getCanonicalType() ==
2342 E->getType().getCanonicalType())
2343 return Visit(E: InitVector);
2344 }
2345
2346 llvm_unreachable("Unexpected initialization of a scalable vector!");
2347 }
2348
2349 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2350
2351 // For column-major matrix types, we insert elements directly at their
2352 // column-major positions rather than inserting sequentially and shuffling.
2353 const ConstantMatrixType *ColMajorMT = nullptr;
2354 if (const auto *MT = E->getType()->getAs<ConstantMatrixType>();
2355 MT && CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2356 LangOptions::MatrixMemoryLayout::MatrixColMajor)
2357 ColMajorMT = MT;
2358
2359 // Loop over initializers collecting the Value for each, and remembering
2360 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2361 // us to fold the shuffle for the swizzle into the shuffle for the vector
2362 // initializer, since LLVM optimizers generally do not want to touch
2363 // shuffles.
2364 unsigned CurIdx = 0;
2365 bool VIsPoisonShuffle = false;
2366 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2367 for (unsigned i = 0; i != NumInitElements; ++i) {
2368 Expr *IE = E->getInit(Init: i);
2369 Value *Init = Visit(E: IE);
2370 SmallVector<int, 16> Args;
2371
2372 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2373
2374 // Handle scalar elements. If the scalar initializer is actually one
2375 // element of a different vector of the same width, use shuffle instead of
2376 // extract+insert.
2377 if (!VVT) {
2378 if (isa<ExtVectorElementExpr>(Val: IE)) {
2379 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2380
2381 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2382 ->getNumElements() == ResElts) {
2383 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2384 Value *LHS = nullptr, *RHS = nullptr;
2385 if (CurIdx == 0) {
2386 // insert into poison -> shuffle (src, poison)
2387 // shufflemask must use an i32
2388 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2389 Args.resize(N: ResElts, NV: -1);
2390
2391 LHS = EI->getVectorOperand();
2392 RHS = V;
2393 VIsPoisonShuffle = true;
2394 } else if (VIsPoisonShuffle) {
2395 // insert into poison shuffle && size match -> shuffle (v, src)
2396 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2397 for (unsigned j = 0; j != CurIdx; ++j)
2398 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2399 Args.push_back(Elt: ResElts + C->getZExtValue());
2400 Args.resize(N: ResElts, NV: -1);
2401
2402 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2403 RHS = EI->getVectorOperand();
2404 VIsPoisonShuffle = false;
2405 }
2406 if (!Args.empty()) {
2407 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2408 ++CurIdx;
2409 continue;
2410 }
2411 }
2412 }
2413 unsigned InsertIdx =
2414 ColMajorMT
2415 ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(RowMajorIdx: CurIdx)
2416 : CurIdx;
2417 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: InsertIdx),
2418 Name: "vecinit");
2419 VIsPoisonShuffle = false;
2420 ++CurIdx;
2421 continue;
2422 }
2423
2424 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2425
2426 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2427 // input is the same width as the vector being constructed, generate an
2428 // optimized shuffle of the swizzle input into the result.
2429 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2430 if (isa<ExtVectorElementExpr>(Val: IE)) {
2431 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2432 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2433 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2434
2435 if (OpTy->getNumElements() == ResElts) {
2436 for (unsigned j = 0; j != CurIdx; ++j) {
2437 // If the current vector initializer is a shuffle with poison, merge
2438 // this shuffle directly into it.
2439 if (VIsPoisonShuffle) {
2440 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2441 } else {
2442 Args.push_back(Elt: j);
2443 }
2444 }
2445 for (unsigned j = 0, je = InitElts; j != je; ++j)
2446 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2447 Args.resize(N: ResElts, NV: -1);
2448
2449 if (VIsPoisonShuffle)
2450 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2451
2452 Init = SVOp;
2453 }
2454 }
2455
2456 // Extend init to result vector length, and then shuffle its contribution
2457 // to the vector initializer into V.
2458 if (Args.empty()) {
2459 for (unsigned j = 0; j != InitElts; ++j)
2460 Args.push_back(Elt: j);
2461 Args.resize(N: ResElts, NV: -1);
2462 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2463
2464 Args.clear();
2465 for (unsigned j = 0; j != CurIdx; ++j)
2466 Args.push_back(Elt: j);
2467 for (unsigned j = 0; j != InitElts; ++j)
2468 Args.push_back(Elt: j + Offset);
2469 Args.resize(N: ResElts, NV: -1);
2470 }
2471
2472 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2473 // merging subsequent shuffles into this one.
2474 if (CurIdx == 0)
2475 std::swap(a&: V, b&: Init);
2476 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2477 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2478 CurIdx += InitElts;
2479 }
2480
2481 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2482 // Emit remaining default initializers.
2483 llvm::Type *EltTy = VType->getElementType();
2484
2485 // Emit remaining default initializers
2486 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2487 unsigned InsertIdx =
2488 ColMajorMT ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(RowMajorIdx: CurIdx)
2489 : CurIdx;
2490 Value *Idx = Builder.getInt32(C: InsertIdx);
2491 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2492 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2493 }
2494
2495 return V;
2496}
2497
2498static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D) {
2499 return !D->isWeak();
2500}
2501
2502static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2503 E = E->IgnoreParens();
2504
2505 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2506 if (UO->getOpcode() == UO_Deref)
2507 return CGF.isPointerKnownNonNull(E: UO->getSubExpr());
2508
2509 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E))
2510 return isDeclRefKnownNonNull(CGF, D: DRE->getDecl());
2511
2512 if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) {
2513 if (isa<FieldDecl>(Val: ME->getMemberDecl()))
2514 return true;
2515 return isDeclRefKnownNonNull(CGF, D: ME->getMemberDecl());
2516 }
2517
2518 // Array subscripts? Anything else?
2519
2520 return false;
2521}
2522
2523bool CodeGenFunction::isPointerKnownNonNull(const Expr *E) {
2524 assert(E->getType()->isSignableType(getContext()));
2525
2526 E = E->IgnoreParens();
2527
2528 if (isa<CXXThisExpr>(Val: E))
2529 return true;
2530
2531 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2532 if (UO->getOpcode() == UO_AddrOf)
2533 return isLValueKnownNonNull(CGF&: *this, E: UO->getSubExpr());
2534
2535 if (const auto *CE = dyn_cast<CastExpr>(Val: E))
2536 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2537 CE->getCastKind() == CK_ArrayToPointerDecay)
2538 return isLValueKnownNonNull(CGF&: *this, E: CE->getSubExpr());
2539
2540 // Maybe honor __nonnull?
2541
2542 return false;
2543}
2544
2545bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2546 const Expr *E = CE->getSubExpr();
2547
2548 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2549 return false;
2550
2551 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2552 // We always assume that 'this' is never null.
2553 return false;
2554 }
2555
2556 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2557 // And that glvalue casts are never null.
2558 if (ICE->isGLValue())
2559 return false;
2560 }
2561
2562 return true;
2563}
2564
2565// RHS is an aggregate type
2566static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue SrcVal,
2567 QualType DestTy, SourceLocation Loc) {
2568 SmallVector<LValue, 16> LoadList;
2569 CGF.FlattenAccessAndTypeLValue(LVal: SrcVal, AccessList&: LoadList);
2570 // Dest is either a vector, constant matrix, or a builtin
2571 // if its a vector create a temp alloca to store into and return that
2572 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2573 assert(LoadList.size() >= VecTy->getNumElements() &&
2574 "Flattened type on RHS must have the same number or more elements "
2575 "than vector on LHS.");
2576 llvm::Value *V = CGF.Builder.CreateLoad(
2577 Addr: CGF.CreateIRTempWithoutCast(T: DestTy, Name: "flatcast.tmp"));
2578 // write to V.
2579 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2580 RValue RVal = CGF.EmitLoadOfLValue(V: LoadList[I], Loc);
2581 assert(RVal.isScalar() &&
2582 "All flattened source values should be scalars.");
2583 llvm::Value *Cast =
2584 CGF.EmitScalarConversion(Src: RVal.getScalarVal(), SrcTy: LoadList[I].getType(),
2585 DstTy: VecTy->getElementType(), Loc);
2586 V = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx: I);
2587 }
2588 return V;
2589 }
2590 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2591 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2592 "Flattened type on RHS must have the same number or more elements "
2593 "than vector on LHS.");
2594
2595 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2596 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2597
2598 llvm::Value *V = CGF.Builder.CreateLoad(
2599 Addr: CGF.CreateIRTempWithoutCast(T: DestTy, Name: "flatcast.tmp"));
2600 // V is an allocated temporary for constructing the matrix.
2601 for (unsigned Row = 0, RE = MatTy->getNumRows(); Row < RE; Row++) {
2602 for (unsigned Col = 0, CE = MatTy->getNumColumns(); Col < CE; Col++) {
2603 // When interpreted as a matrix, \p LoadList is *always* row-major order
2604 // regardless of the default matrix memory layout.
2605 unsigned LoadIdx = MatTy->getRowMajorFlattenedIndex(Row, Column: Col);
2606 RValue RVal = CGF.EmitLoadOfLValue(V: LoadList[LoadIdx], Loc);
2607 assert(RVal.isScalar() &&
2608 "All flattened source values should be scalars.");
2609 llvm::Value *Cast = CGF.EmitScalarConversion(
2610 Src: RVal.getScalarVal(), SrcTy: LoadList[LoadIdx].getType(),
2611 DstTy: MatTy->getElementType(), Loc);
2612 unsigned MatrixIdx = MatTy->getFlattenedIndex(Row, Column: Col, IsRowMajor);
2613 V = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx: MatrixIdx);
2614 }
2615 }
2616 return V;
2617 }
2618 // if its a builtin just do an extract element or load.
2619 assert(DestTy->isBuiltinType() &&
2620 "Destination type must be a vector, matrix, or builtin type.");
2621 RValue RVal = CGF.EmitLoadOfLValue(V: LoadList[0], Loc);
2622 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2623 return CGF.EmitScalarConversion(Src: RVal.getScalarVal(), SrcTy: LoadList[0].getType(),
2624 DstTy: DestTy, Loc);
2625}
2626
2627// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2628// have to handle a more broad range of conversions than explicit casts, as they
2629// handle things like function to ptr-to-function decay etc.
2630Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2631 llvm::scope_exit RestoreCurCast(
2632 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2633 CGF.CurCast = CE;
2634
2635 Expr *E = CE->getSubExpr();
2636 QualType DestTy = CE->getType();
2637 CastKind Kind = CE->getCastKind();
2638 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2639
2640 // These cases are generally not written to ignore the result of
2641 // evaluating their sub-expressions, so we clear this now.
2642 bool Ignored = TestAndClearIgnoreResultAssign();
2643
2644 // Since almost all cast kinds apply to scalars, this switch doesn't have
2645 // a default case, so the compiler will warn on a missing case. The cases
2646 // are in the same order as in the CastKind enum.
2647 switch (Kind) {
2648 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2649 case CK_BuiltinFnToFnPtr:
2650 llvm_unreachable("builtin functions are handled elsewhere");
2651
2652 case CK_LValueBitCast:
2653 case CK_ObjCObjectLValueCast: {
2654 Address Addr = EmitLValue(E).getAddress();
2655 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2656 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2657 return EmitLoadOfLValue(LV, Loc: CE->getExprLoc());
2658 }
2659
2660 case CK_LValueToRValueBitCast: {
2661 LValue SourceLVal = CGF.EmitLValue(E);
2662 Address Addr =
2663 SourceLVal.getAddress().withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2664 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2665 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2666 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2667 }
2668
2669 case CK_CPointerToObjCPointerCast:
2670 case CK_BlockPointerToObjCPointerCast:
2671 case CK_AnyPointerToBlockPointerCast:
2672 case CK_BitCast: {
2673 Value *Src = Visit(E);
2674 llvm::Type *SrcTy = Src->getType();
2675 llvm::Type *DstTy = ConvertType(T: DestTy);
2676
2677 // FIXME: this is a gross but seemingly necessary workaround for an issue
2678 // manifesting when a target uses a non-default AS for indirect sret args,
2679 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2680 // on the address of a local struct that gets returned by value yields an
2681 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2682 // DefaultAS. We can only do this subversive thing because sret args are
2683 // manufactured and them residing in the IndirectAS is a target specific
2684 // detail, and doing an AS cast here still retains the semantics the user
2685 // expects. It is desirable to remove this iff a better solution is found.
2686 if (auto A = dyn_cast<llvm::Argument>(Val: Src); A && A->hasStructRetAttr())
2687 return CGF.performAddrSpaceCast(Src, DestTy: DstTy);
2688
2689 // FIXME: Similarly to the sret case above, we need to handle BitCasts that
2690 // involve implicit address space conversions. This arises when the source
2691 // language lacks explicit address spaces, but the target's data layout
2692 // assigns different address spaces (e.g., program address space for
2693 // function pointers). Since Sema operates on Clang types (which don't carry
2694 // this information) and selects CK_BitCast, we must detect the address
2695 // space mismatch here in CodeGen when lowering to LLVM types. The most
2696 // common case is casting function pointers (which get the program AS from
2697 // the data layout) to/from object pointers (which use the default AS).
2698 // Ideally, this would be resolved at a higher level, but that would require
2699 // exposing data layout details to Sema.
2700 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2701 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2702 return CGF.performAddrSpaceCast(Src, DestTy: DstTy);
2703 }
2704
2705 assert(
2706 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2707 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2708 "Address-space cast must be used to convert address spaces");
2709
2710 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2711 if (auto *PT = DestTy->getAs<PointerType>()) {
2712 CGF.EmitVTablePtrCheckForCast(
2713 T: PT->getPointeeType(),
2714 Derived: Address(Src,
2715 CGF.ConvertTypeForMem(
2716 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2717 CGF.getPointerAlign()),
2718 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2719 Loc: CE->getBeginLoc());
2720 }
2721 }
2722
2723 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2724 const QualType SrcType = E->getType();
2725
2726 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2727 // Casting to pointer that could carry dynamic information (provided by
2728 // invariant.group) requires launder.
2729 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2730 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2731 // Casting to pointer that does not carry dynamic information (provided
2732 // by invariant.group) requires stripping it. Note that we don't do it
2733 // if the source could not be dynamic type and destination could be
2734 // dynamic because dynamic information is already laundered. It is
2735 // because launder(strip(src)) == launder(src), so there is no need to
2736 // add extra strip before launder.
2737 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2738 }
2739 }
2740
2741 // Update heapallocsite metadata when there is an explicit pointer cast.
2742 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2743 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2744 !isa<CastExpr>(Val: E)) {
2745 QualType PointeeType = DestTy->getPointeeType();
2746 if (!PointeeType.isNull())
2747 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2748 Loc: CE->getExprLoc());
2749 }
2750 }
2751
2752 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2753 // same element type, use the llvm.vector.insert intrinsic to perform the
2754 // bitcast.
2755 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2756 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: DstTy)) {
2757 // If we are casting a fixed i8 vector to a scalable i1 predicate
2758 // vector, use a vector insert and bitcast the result.
2759 if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2760 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2761 ScalableDstTy = llvm::ScalableVectorType::get(
2762 ElementType: FixedSrcTy->getElementType(),
2763 MinNumElts: llvm::divideCeil(
2764 Numerator: ScalableDstTy->getElementCount().getKnownMinValue(), Denominator: 8));
2765 }
2766 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2767 llvm::Value *PoisonVec = llvm::PoisonValue::get(T: ScalableDstTy);
2768 llvm::Value *Result = Builder.CreateInsertVector(
2769 DstType: ScalableDstTy, SrcVec: PoisonVec, SubVec: Src, Idx: uint64_t(0), Name: "cast.scalable");
2770 ScalableDstTy = cast<llvm::ScalableVectorType>(
2771 Val: llvm::VectorType::getWithSizeAndScalar(SizeTy: ScalableDstTy, EltTy: DstTy));
2772 if (Result->getType() != ScalableDstTy)
2773 Result = Builder.CreateBitCast(V: Result, DestTy: ScalableDstTy);
2774 if (Result->getType() != DstTy)
2775 Result = Builder.CreateExtractVector(DstType: DstTy, SrcVec: Result, Idx: uint64_t(0));
2776 return Result;
2777 }
2778 }
2779 }
2780
2781 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2782 // same element type, use the llvm.vector.extract intrinsic to perform the
2783 // bitcast.
2784 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2785 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: DstTy)) {
2786 // If we are casting a scalable i1 predicate vector to a fixed i8
2787 // vector, bitcast the source and use a vector extract.
2788 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2789 FixedDstTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2790 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8)) {
2791 ScalableSrcTy = llvm::ScalableVectorType::get(
2792 ElementType: ScalableSrcTy->getElementType(),
2793 MinNumElts: llvm::alignTo<8>(
2794 Value: ScalableSrcTy->getElementCount().getKnownMinValue()));
2795 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: ScalableSrcTy);
2796 Src = Builder.CreateInsertVector(DstType: ScalableSrcTy, SrcVec: ZeroVec, SubVec: Src,
2797 Idx: uint64_t(0));
2798 }
2799
2800 ScalableSrcTy = llvm::ScalableVectorType::get(
2801 ElementType: FixedDstTy->getElementType(),
2802 MinNumElts: ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2803 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2804 }
2805 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2806 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: uint64_t(0),
2807 Name: "cast.fixed");
2808 }
2809 }
2810
2811 // Perform VLAT <-> VLST bitcast through memory.
2812 // TODO: since the llvm.vector.{insert,extract} intrinsics
2813 // require the element types of the vectors to be the same, we
2814 // need to keep this around for bitcasts between VLAT <-> VLST where
2815 // the element types of the vectors are not the same, until we figure
2816 // out a better way of doing these casts.
2817 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2818 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2819 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2820 isa<llvm::FixedVectorType>(Val: DstTy))) {
2821 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2822 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2823 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2824 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2825 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2826 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2827 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2828 }
2829
2830 llvm::Value *Result = Builder.CreateBitCast(V: Src, DestTy: DstTy);
2831 return CGF.authPointerToPointerCast(ResultPtr: Result, SourceType: E->getType(), DestType: DestTy);
2832 }
2833 case CK_AddressSpaceConversion: {
2834 Expr::EvalResult Result;
2835 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2836 Result.Val.isNullPointer()) {
2837 // If E has side effect, it is emitted even if its final result is a
2838 // null pointer. In that case, a DCE pass should be able to
2839 // eliminate the useless instructions emitted during translating E.
2840 if (Result.HasSideEffects)
2841 Visit(E);
2842 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2843 Val: ConvertType(T: DestTy)), QT: DestTy);
2844 }
2845 // Since target may map different address spaces in AST to the same address
2846 // space, an address space conversion may end up as a bitcast.
2847 return CGF.performAddrSpaceCast(Src: Visit(E), DestTy: ConvertType(T: DestTy));
2848 }
2849 case CK_AtomicToNonAtomic:
2850 case CK_NonAtomicToAtomic:
2851 case CK_UserDefinedConversion:
2852 return Visit(E);
2853
2854 case CK_NoOp: {
2855 return CE->changesVolatileQualification() ? EmitLoadOfLValue(E: CE) : Visit(E);
2856 }
2857
2858 case CK_BaseToDerived: {
2859 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2860 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2861
2862 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2863 Address Derived =
2864 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2865 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2866 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2867
2868 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2869 // performed and the object is not of the derived type.
2870 if (CGF.sanitizePerformTypeCheck())
2871 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DowncastPointer, Loc: CE->getExprLoc(),
2872 Addr: Derived, Type: DestTy->getPointeeType());
2873
2874 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2875 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2876 /*MayBeNull=*/true,
2877 TCK: CodeGenFunction::CFITCK_DerivedCast,
2878 Loc: CE->getBeginLoc());
2879
2880 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2881 }
2882 case CK_UncheckedDerivedToBase:
2883 case CK_DerivedToBase: {
2884 // The EmitPointerWithAlignment path does this fine; just discard
2885 // the alignment.
2886 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(Addr: CE),
2887 PointeeType: CE->getType()->getPointeeType());
2888 }
2889
2890 case CK_Dynamic: {
2891 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2892 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2893 return CGF.EmitDynamicCast(V, DCE);
2894 }
2895
2896 case CK_ArrayToPointerDecay:
2897 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2898 PointeeType: CE->getType()->getPointeeType());
2899 case CK_FunctionToPointerDecay:
2900 return EmitLValue(E).getPointer(CGF);
2901
2902 case CK_NullToPointer:
2903 if (MustVisitNullValue(E))
2904 CGF.EmitIgnoredExpr(E);
2905
2906 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2907 QT: DestTy);
2908
2909 case CK_NullToMemberPointer: {
2910 if (MustVisitNullValue(E))
2911 CGF.EmitIgnoredExpr(E);
2912
2913 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2914 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2915 }
2916
2917 case CK_ReinterpretMemberPointer:
2918 case CK_BaseToDerivedMemberPointer:
2919 case CK_DerivedToBaseMemberPointer: {
2920 Value *Src = Visit(E);
2921
2922 // Note that the AST doesn't distinguish between checked and
2923 // unchecked member pointer conversions, so we always have to
2924 // implement checked conversions here. This is inefficient when
2925 // actual control flow may be required in order to perform the
2926 // check, which it is for data member pointers (but not member
2927 // function pointers on Itanium and ARM).
2928 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2929 }
2930
2931 case CK_ARCProduceObject:
2932 return CGF.EmitARCRetainScalarExpr(expr: E);
2933 case CK_ARCConsumeObject:
2934 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2935 case CK_ARCReclaimReturnedObject:
2936 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2937 case CK_ARCExtendBlockObject:
2938 return CGF.EmitARCExtendBlockObject(expr: E);
2939
2940 case CK_CopyAndAutoreleaseBlockObject:
2941 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2942
2943 case CK_FloatingRealToComplex:
2944 case CK_FloatingComplexCast:
2945 case CK_IntegralRealToComplex:
2946 case CK_IntegralComplexCast:
2947 case CK_IntegralComplexToFloatingComplex:
2948 case CK_FloatingComplexToIntegralComplex:
2949 case CK_ConstructorConversion:
2950 case CK_ToUnion:
2951 case CK_HLSLArrayRValue:
2952 llvm_unreachable("scalar cast to non-scalar value");
2953
2954 case CK_LValueToRValue:
2955 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2956 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2957 return Visit(E);
2958
2959 case CK_IntegralToPointer: {
2960 Value *Src = Visit(E);
2961
2962 // First, convert to the correct width so that we control the kind of
2963 // extension.
2964 auto DestLLVMTy = ConvertType(T: DestTy);
2965 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2966 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2967 llvm::Value* IntResult =
2968 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2969
2970 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2971
2972 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2973 // Going from integer to pointer that could be dynamic requires reloading
2974 // dynamic information from invariant.group.
2975 if (DestTy.mayBeDynamicClass())
2976 IntToPtr = Builder.CreateLaunderInvariantGroup(Ptr: IntToPtr);
2977 }
2978
2979 IntToPtr = CGF.authPointerToPointerCast(ResultPtr: IntToPtr, SourceType: E->getType(), DestType: DestTy);
2980 return IntToPtr;
2981 }
2982 case CK_PointerToIntegral: {
2983 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2984 auto *PtrExpr = Visit(E);
2985
2986 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2987 const QualType SrcType = E->getType();
2988
2989 // Casting to integer requires stripping dynamic information as it does
2990 // not carries it.
2991 if (SrcType.mayBeDynamicClass())
2992 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2993 }
2994
2995 PtrExpr = CGF.authPointerToPointerCast(ResultPtr: PtrExpr, SourceType: E->getType(), DestType: DestTy);
2996 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2997 }
2998 case CK_ToVoid: {
2999 CGF.EmitIgnoredExpr(E);
3000 return nullptr;
3001 }
3002 case CK_MatrixCast: {
3003 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3004 Loc: CE->getExprLoc());
3005 }
3006 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
3007 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
3008 // To perform any necessary Scalar Cast, so this Cast can be handled
3009 // by the regular Vector Splat cast code.
3010 case CK_HLSLAggregateSplatCast:
3011 case CK_VectorSplat: {
3012 llvm::Type *DstTy = ConvertType(T: DestTy);
3013 Value *Elt = Visit(E);
3014 // Splat the element across to all elements
3015 llvm::ElementCount NumElements =
3016 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
3017 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
3018 }
3019
3020 case CK_FixedPointCast:
3021 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3022 Loc: CE->getExprLoc());
3023
3024 case CK_FixedPointToBoolean:
3025 assert(E->getType()->isFixedPointType() &&
3026 "Expected src type to be fixed point type");
3027 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
3028 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3029 Loc: CE->getExprLoc());
3030
3031 case CK_FixedPointToIntegral:
3032 assert(E->getType()->isFixedPointType() &&
3033 "Expected src type to be fixed point type");
3034 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
3035 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3036 Loc: CE->getExprLoc());
3037
3038 case CK_IntegralToFixedPoint:
3039 assert(E->getType()->isIntegerType() &&
3040 "Expected src type to be an integer");
3041 assert(DestTy->isFixedPointType() &&
3042 "Expected dest type to be fixed point type");
3043 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3044 Loc: CE->getExprLoc());
3045
3046 case CK_IntegralCast: {
3047 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
3048 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3049 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
3050 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
3051 Name: "conv");
3052 }
3053 ScalarConversionOpts Opts;
3054 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
3055 if (!ICE->isPartOfExplicitCast())
3056 Opts = ScalarConversionOpts(CGF.SanOpts);
3057 }
3058 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3059 Loc: CE->getExprLoc(), Opts);
3060 }
3061 case CK_IntegralToFloating: {
3062 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3063 // TODO: Support constrained FP intrinsics.
3064 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3065 if (SrcElTy->isSignedIntegerOrEnumerationType())
3066 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3067 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3068 }
3069 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3070 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3071 Loc: CE->getExprLoc());
3072 }
3073 case CK_FloatingToIntegral: {
3074 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3075 // TODO: Support constrained FP intrinsics.
3076 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3077 if (DstElTy->isSignedIntegerOrEnumerationType())
3078 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3079 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3080 }
3081 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3082 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3083 Loc: CE->getExprLoc());
3084 }
3085 case CK_FloatingCast: {
3086 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3087 // TODO: Support constrained FP intrinsics.
3088 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3089 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3090 if (DstElTy->castAs<BuiltinType>()->getKind() <
3091 SrcElTy->castAs<BuiltinType>()->getKind())
3092 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3093 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
3094 }
3095 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3096 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3097 Loc: CE->getExprLoc());
3098 }
3099 case CK_FixedPointToFloating:
3100 case CK_FloatingToFixedPoint: {
3101 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3102 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3103 Loc: CE->getExprLoc());
3104 }
3105 case CK_BooleanToSignedIntegral: {
3106 ScalarConversionOpts Opts;
3107 Opts.TreatBooleanAsSigned = true;
3108 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
3109 Loc: CE->getExprLoc(), Opts);
3110 }
3111 case CK_IntegralToBoolean:
3112 return EmitIntToBoolConversion(V: Visit(E));
3113 case CK_PointerToBoolean:
3114 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
3115 case CK_FloatingToBoolean: {
3116 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3117 return EmitFloatToBoolConversion(V: Visit(E));
3118 }
3119 case CK_MemberPointerToBoolean: {
3120 llvm::Value *MemPtr = Visit(E);
3121 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
3122 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
3123 }
3124
3125 case CK_FloatingComplexToReal:
3126 case CK_IntegralComplexToReal:
3127 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
3128
3129 case CK_FloatingComplexToBoolean:
3130 case CK_IntegralComplexToBoolean: {
3131 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
3132
3133 // TODO: kill this function off, inline appropriate case here
3134 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
3135 Loc: CE->getExprLoc());
3136 }
3137
3138 case CK_ZeroToOCLOpaqueType: {
3139 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3140 DestTy->isOCLIntelSubgroupAVCType()) &&
3141 "CK_ZeroToOCLEvent cast on non-event type");
3142 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
3143 }
3144
3145 case CK_IntToOCLSampler:
3146 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3147
3148 case CK_HLSLVectorTruncation: {
3149 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3150 "Destination type must be a vector or builtin type.");
3151 Value *Vec = Visit(E);
3152 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3153 SmallVector<int> Mask;
3154 unsigned NumElts = VecTy->getNumElements();
3155 for (unsigned I = 0; I != NumElts; ++I)
3156 Mask.push_back(Elt: I);
3157
3158 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
3159 }
3160 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.SizeTy);
3161 return Builder.CreateExtractElement(Vec, Idx: Zero, Name: "cast.vtrunc");
3162 }
3163 case CK_HLSLMatrixTruncation: {
3164 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3165 "Destination type must be a matrix or builtin type.");
3166 Value *Mat = Visit(E);
3167 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3168 SmallVector<int> Mask(MatTy->getNumElementsFlattened());
3169 unsigned NumCols = MatTy->getNumColumns();
3170 unsigned NumRows = MatTy->getNumRows();
3171 auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>();
3172 assert(SrcMatTy && "Source type must be a matrix type.");
3173 assert(NumRows <= SrcMatTy->getNumRows());
3174 assert(NumCols <= SrcMatTy->getNumColumns());
3175 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
3176 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
3177 for (unsigned R = 0; R < NumRows; R++)
3178 for (unsigned C = 0; C < NumCols; C++)
3179 Mask[MatTy->getFlattenedIndex(Row: R, Column: C, IsRowMajor)] =
3180 SrcMatTy->getFlattenedIndex(Row: R, Column: C, IsRowMajor);
3181
3182 return Builder.CreateShuffleVector(V: Mat, Mask, Name: "trunc");
3183 }
3184 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.SizeTy);
3185 return Builder.CreateExtractElement(Vec: Mat, Idx: Zero, Name: "cast.mtrunc");
3186 }
3187 case CK_HLSLElementwiseCast: {
3188 RValue RV = CGF.EmitAnyExpr(E);
3189 SourceLocation Loc = CE->getExprLoc();
3190
3191 Address SrcAddr = Address::invalid();
3192
3193 if (RV.isAggregate()) {
3194 SrcAddr = RV.getAggregateAddress();
3195 } else {
3196 SrcAddr = CGF.CreateMemTemp(T: E->getType(), Name: "hlsl.ewcast.src");
3197 LValue TmpLV = CGF.MakeAddrLValue(Addr: SrcAddr, T: E->getType());
3198 CGF.EmitStoreThroughLValue(Src: RV, Dst: TmpLV);
3199 }
3200
3201 LValue SrcVal = CGF.MakeAddrLValue(Addr: SrcAddr, T: E->getType());
3202 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3203 }
3204
3205 } // end of switch
3206
3207 llvm_unreachable("unknown scalar cast");
3208}
3209
3210Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3211 CodeGenFunction::StmtExprEvaluation eval(CGF);
3212 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
3213 GetLast: !E->getType()->isVoidType());
3214 if (!RetAlloca.isValid())
3215 return nullptr;
3216 return CGF.EmitLoadOfScalar(lvalue: CGF.MakeAddrLValue(Addr: RetAlloca, T: E->getType()),
3217 Loc: E->getExprLoc());
3218}
3219
3220Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3221 CodeGenFunction::RunCleanupsScope Scope(CGF);
3222 Value *V = Visit(E: E->getSubExpr());
3223 // Defend against dominance problems caused by jumps out of expression
3224 // evaluation through the shared cleanup block.
3225 Scope.ForceCleanup(ValuesToReload: {&V});
3226 return V;
3227}
3228
3229//===----------------------------------------------------------------------===//
3230// Unary Operators
3231//===----------------------------------------------------------------------===//
3232
3233static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
3234 llvm::Value *InVal, bool IsInc,
3235 FPOptions FPFeatures) {
3236 BinOpInfo BinOp;
3237 BinOp.LHS = InVal;
3238 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
3239 BinOp.Ty = E->getType();
3240 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3241 BinOp.FPFeatures = FPFeatures;
3242 BinOp.E = E;
3243 return BinOp;
3244}
3245
3246llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3247 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3248 // Treat positive amount as unsigned to support inc of i1 (needed for
3249 // unsigned _BitInt(1)).
3250 llvm::Value *Amount =
3251 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: !IsInc);
3252 StringRef Name = IsInc ? "inc" : "dec";
3253 QualType Ty = E->getType();
3254 const bool isSigned = Ty->isSignedIntegerOrEnumerationType();
3255 const bool hasSan =
3256 isSigned ? CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)
3257 : CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow);
3258
3259 switch (getOverflowBehaviorConsideringType(CGF, Ty)) {
3260 case LangOptions::OB_Wrap:
3261 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3262 case LangOptions::OB_SignedAndDefined:
3263 if (!hasSan)
3264 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3265 [[fallthrough]];
3266 case LangOptions::OB_Unset:
3267 if (!E->canOverflow())
3268 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3269 if (!hasSan)
3270 return isSigned ? Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name)
3271 : Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3272 [[fallthrough]];
3273 case LangOptions::OB_Trap:
3274 if (!Ty->getAs<OverflowBehaviorType>() && !E->canOverflow())
3275 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3276 BinOpInfo Info = createBinOpInfoFromIncDec(
3277 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3278 if (CanElideOverflowCheck(Ctx&: CGF.getContext(), Op: Info))
3279 return isSigned ? Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name)
3280 : Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
3281 return EmitOverflowCheckedBinOp(Ops: Info);
3282 }
3283 llvm_unreachable("Unknown OverflowBehaviorKind");
3284}
3285
3286namespace {
3287/// Handles check and update for lastprivate conditional variables.
3288class OMPLastprivateConditionalUpdateRAII {
3289private:
3290 CodeGenFunction &CGF;
3291 const UnaryOperator *E;
3292
3293public:
3294 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3295 const UnaryOperator *E)
3296 : CGF(CGF), E(E) {}
3297 ~OMPLastprivateConditionalUpdateRAII() {
3298 if (CGF.getLangOpts().OpenMP)
3299 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
3300 CGF, LHS: E->getSubExpr());
3301 }
3302};
3303} // namespace
3304
3305llvm::Value *
3306ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3307 bool isInc, bool isPre) {
3308 ApplyAtomGroup Grp(CGF.getDebugInfo());
3309 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3310 QualType type = E->getSubExpr()->getType();
3311 llvm::PHINode *atomicPHI = nullptr;
3312 llvm::Value *value;
3313 llvm::Value *input;
3314 llvm::Value *Previous = nullptr;
3315 QualType SrcType = E->getType();
3316
3317 int amount = (isInc ? 1 : -1);
3318 bool isSubtraction = !isInc;
3319
3320 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3321 type = atomicTy->getValueType();
3322 if (isInc && type->isBooleanType()) {
3323 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
3324 if (isPre) {
3325 Builder.CreateStore(Val: True, Addr: LV.getAddress(), IsVolatile: LV.isVolatileQualified())
3326 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3327 return Builder.getTrue();
3328 }
3329 // For atomic bool increment, we just store true and return it for
3330 // preincrement, do an atomic swap with true for postincrement
3331 return Builder.CreateAtomicRMW(
3332 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(), Val: True,
3333 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3334 }
3335 // Special case for atomic increment / decrement on integers, emit
3336 // atomicrmw instructions. We skip this if we want to be doing overflow
3337 // checking, and fall into the slow path with the atomic cmpxchg loop.
3338 if (!type->isBooleanType() && type->isIntegerType() &&
3339 !(type->isUnsignedIntegerType() &&
3340 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3341 CGF.getLangOpts().getSignedOverflowBehavior() !=
3342 LangOptions::SOB_Trapping) {
3343 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3344 llvm::AtomicRMWInst::Sub;
3345 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3346 llvm::Instruction::Sub;
3347 llvm::Value *amt = CGF.EmitToMemory(
3348 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
3349 llvm::Value *old =
3350 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
3351 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3352 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3353 }
3354 // Special case for atomic increment/decrement on floats.
3355 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3356 if (type->isFloatingType()) {
3357 llvm::Type *Ty = ConvertType(T: type);
3358 if (llvm::has_single_bit(Value: Ty->getScalarSizeInBits())) {
3359 llvm::AtomicRMWInst::BinOp aop =
3360 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3361 llvm::Instruction::BinaryOps op =
3362 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3363 llvm::Value *amt = llvm::ConstantFP::get(Ty, V: 1.0);
3364 llvm::AtomicRMWInst *old =
3365 CGF.emitAtomicRMWInst(Op: aop, Addr: LV.getAddress(), Val: amt,
3366 Order: llvm::AtomicOrdering::SequentiallyConsistent);
3367
3368 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3369 }
3370 }
3371 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3372 input = value;
3373 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3374 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3375 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3376 value = CGF.EmitToMemory(Value: value, Ty: type);
3377 Builder.CreateBr(Dest: opBB);
3378 Builder.SetInsertPoint(opBB);
3379 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
3380 atomicPHI->addIncoming(V: value, BB: startBB);
3381 value = atomicPHI;
3382 } else {
3383 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3384 input = value;
3385 }
3386
3387 // Special case of integer increment that we have to check first: bool++.
3388 // Due to promotion rules, we get:
3389 // bool++ -> bool = bool + 1
3390 // -> bool = (int)bool + 1
3391 // -> bool = ((int)bool + 1 != 0)
3392 // An interesting aspect of this is that increment is always true.
3393 // Decrement does not have this property.
3394 if (isInc && type->isBooleanType()) {
3395 value = Builder.getTrue();
3396
3397 // Most common case by far: integer increment.
3398 } else if (type->isIntegerType()) {
3399 QualType promotedType;
3400 bool canPerformLossyDemotionCheck = false;
3401
3402 if (CGF.getContext().isPromotableIntegerType(T: type)) {
3403 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
3404 assert(promotedType != type && "Shouldn't promote to the same type.");
3405 canPerformLossyDemotionCheck = true;
3406 canPerformLossyDemotionCheck &=
3407 CGF.getContext().getCanonicalType(T: type) !=
3408 CGF.getContext().getCanonicalType(T: promotedType);
3409 canPerformLossyDemotionCheck &=
3410 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
3411 SrcType: type, DstType: promotedType);
3412 assert((!canPerformLossyDemotionCheck ||
3413 type->isSignedIntegerOrEnumerationType() ||
3414 promotedType->isSignedIntegerOrEnumerationType() ||
3415 ConvertType(type)->getScalarSizeInBits() ==
3416 ConvertType(promotedType)->getScalarSizeInBits()) &&
3417 "The following check expects that if we do promotion to different "
3418 "underlying canonical type, at least one of the types (either "
3419 "base or promoted) will be signed, or the bitwidths will match.");
3420 }
3421 if (CGF.SanOpts.hasOneOf(
3422 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
3423 SanitizerKind::ImplicitBitfieldConversion) &&
3424 canPerformLossyDemotionCheck) {
3425 // While `x += 1` (for `x` with width less than int) is modeled as
3426 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3427 // ease; inc/dec with width less than int can't overflow because of
3428 // promotion rules, so we omit promotion+demotion, which means that we can
3429 // not catch lossy "demotion". Because we still want to catch these cases
3430 // when the sanitizer is enabled, we perform the promotion, then perform
3431 // the increment/decrement in the wider type, and finally
3432 // perform the demotion. This will catch lossy demotions.
3433
3434 // We have a special case for bitfields defined using all the bits of the
3435 // type. In this case we need to do the same trick as for the integer
3436 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3437
3438 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
3439 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3440 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3441 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3442 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3443 // checks will take care of the conversion.
3444 ScalarConversionOpts Opts;
3445 if (!LV.isBitField())
3446 Opts = ScalarConversionOpts(CGF.SanOpts);
3447 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
3448 Previous = value;
3449 SrcType = promotedType;
3450 }
3451
3452 Opts.PatternExcluded = CGF.getContext().isUnaryOverflowPatternExcluded(UO: E);
3453 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
3454 Opts);
3455
3456 // Note that signed integer inc/dec with width less than int can't
3457 // overflow because of promotion rules; we're just eliding a few steps
3458 // here.
3459 } else if (type->isSignedIntegerOrEnumerationType() ||
3460 type->isUnsignedIntegerType()) {
3461 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
3462 } else {
3463 // Treat positive amount as unsigned to support inc of i1 (needed for
3464 // unsigned _BitInt(1)).
3465 llvm::Value *amt =
3466 llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: !isInc);
3467 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3468 }
3469
3470 // Next most common: pointer increment.
3471 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3472 QualType type = ptr->getPointeeType();
3473
3474 // VLA types don't have constant size.
3475 if (const VariableArrayType *vla
3476 = CGF.getContext().getAsVariableArrayType(T: type)) {
3477 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3478 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
3479 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3480 if (CGF.getLangOpts().PointerOverflowDefined)
3481 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
3482 else
3483 value = CGF.EmitCheckedInBoundsGEP(
3484 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3485 Loc: E->getExprLoc(), Name: "vla.inc");
3486
3487 // Arithmetic on function pointers (!) is just +-1.
3488 } else if (type->isFunctionType()) {
3489 llvm::Value *amt = Builder.getInt32(C: amount);
3490
3491 if (CGF.getLangOpts().PointerOverflowDefined)
3492 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
3493 else
3494 value =
3495 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
3496 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3497 Loc: E->getExprLoc(), Name: "incdec.funcptr");
3498
3499 // For everything else, we can just do a simple increment.
3500 } else {
3501 llvm::Value *amt = Builder.getInt32(C: amount);
3502 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
3503 if (CGF.getLangOpts().PointerOverflowDefined)
3504 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
3505 else
3506 value = CGF.EmitCheckedInBoundsGEP(
3507 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3508 Loc: E->getExprLoc(), Name: "incdec.ptr");
3509 }
3510
3511 // Vector increment/decrement.
3512 } else if (type->isVectorType()) {
3513 if (type->hasIntegerRepresentation()) {
3514 llvm::Value *amt = llvm::ConstantInt::getSigned(Ty: value->getType(), V: amount);
3515
3516 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3517 } else {
3518 value = Builder.CreateFAdd(
3519 L: value,
3520 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
3521 Name: isInc ? "inc" : "dec");
3522 }
3523
3524 // Floating point.
3525 } else if (type->isRealFloatingType()) {
3526 // Add the inc/dec to the real part.
3527 llvm::Value *amt;
3528 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3529
3530 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3531 // Another special case: half FP increment should be done via float. If
3532 // the input isn't already half, it may be i16.
3533 Value *bitcast = Builder.CreateBitCast(V: input, DestTy: CGF.CGM.HalfTy);
3534 value = Builder.CreateFPExt(V: bitcast, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
3535 }
3536
3537 if (value->getType()->isFloatTy())
3538 amt = llvm::ConstantFP::get(Context&: VMContext,
3539 V: llvm::APFloat(static_cast<float>(amount)));
3540 else if (value->getType()->isDoubleTy())
3541 amt = llvm::ConstantFP::get(Context&: VMContext,
3542 V: llvm::APFloat(static_cast<double>(amount)));
3543 else {
3544 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3545 // Convert from float.
3546 llvm::APFloat F(static_cast<float>(amount));
3547 bool ignored;
3548 const llvm::fltSemantics *FS;
3549 // Don't use getFloatTypeSemantics because Half isn't
3550 // necessarily represented using the "half" LLVM type.
3551 if (value->getType()->isFP128Ty())
3552 FS = &CGF.getTarget().getFloat128Format();
3553 else if (value->getType()->isHalfTy())
3554 FS = &CGF.getTarget().getHalfFormat();
3555 else if (value->getType()->isBFloatTy())
3556 FS = &CGF.getTarget().getBFloat16Format();
3557 else if (value->getType()->isPPC_FP128Ty())
3558 FS = &CGF.getTarget().getIbm128Format();
3559 else
3560 FS = &CGF.getTarget().getLongDoubleFormat();
3561 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
3562 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
3563 }
3564 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3565
3566 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3567 value = Builder.CreateFPTrunc(V: value, DestTy: CGF.CGM.HalfTy, Name: "incdec.conv");
3568 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3569 }
3570
3571 // Fixed-point types.
3572 } else if (type->isFixedPointType()) {
3573 // Fixed-point types are tricky. In some cases, it isn't possible to
3574 // represent a 1 or a -1 in the type at all. Piggyback off of
3575 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3576 BinOpInfo Info;
3577 Info.E = E;
3578 Info.Ty = E->getType();
3579 Info.Opcode = isInc ? BO_Add : BO_Sub;
3580 Info.LHS = value;
3581 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3582 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3583 // since -1 is guaranteed to be representable.
3584 if (type->isSignedFixedPointType()) {
3585 Info.Opcode = isInc ? BO_Sub : BO_Add;
3586 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3587 }
3588 // Now, convert from our invented integer literal to the type of the unary
3589 // op. This will upscale and saturate if necessary. This value can become
3590 // undef in some cases.
3591 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3592 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3593 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema);
3594 value = EmitFixedPointBinOp(Ops: Info);
3595
3596 // Objective-C pointer types.
3597 } else {
3598 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3599
3600 CharUnits size = CGF.getContext().getTypeSizeInChars(T: OPT->getObjectType());
3601 if (!isInc) size = -size;
3602 llvm::Value *sizeValue =
3603 llvm::ConstantInt::getSigned(Ty: CGF.SizeTy, V: size.getQuantity());
3604
3605 if (CGF.getLangOpts().PointerOverflowDefined)
3606 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3607 else
3608 value = CGF.EmitCheckedInBoundsGEP(
3609 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3610 Loc: E->getExprLoc(), Name: "incdec.objptr");
3611 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3612 }
3613
3614 if (atomicPHI) {
3615 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3616 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3617 auto Pair = CGF.EmitAtomicCompareExchange(
3618 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3619 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3620 llvm::Value *success = Pair.second;
3621 atomicPHI->addIncoming(V: old, BB: curBlock);
3622 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3623 Builder.SetInsertPoint(contBB);
3624 return isPre ? value : input;
3625 }
3626
3627 // Store the updated result through the lvalue.
3628 if (LV.isBitField()) {
3629 Value *Src = Previous ? Previous : value;
3630 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3631 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3632 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3633 } else
3634 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3635
3636 // If this is a postinc, return the value read from memory, otherwise use the
3637 // updated value.
3638 return isPre ? value : input;
3639}
3640
3641
3642Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3643 QualType PromotionType) {
3644 QualType promotionTy = PromotionType.isNull()
3645 ? getPromotionType(Ty: E->getSubExpr()->getType())
3646 : PromotionType;
3647 Value *result = VisitPlus(E, PromotionType: promotionTy);
3648 if (result && !promotionTy.isNull())
3649 result = EmitUnPromotedValue(result, ExprType: E->getType());
3650 return result;
3651}
3652
3653Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3654 QualType PromotionType) {
3655 // This differs from gcc, though, most likely due to a bug in gcc.
3656 TestAndClearIgnoreResultAssign();
3657 if (!PromotionType.isNull())
3658 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3659 return Visit(E: E->getSubExpr());
3660}
3661
3662Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3663 QualType PromotionType) {
3664 QualType promotionTy = PromotionType.isNull()
3665 ? getPromotionType(Ty: E->getSubExpr()->getType())
3666 : PromotionType;
3667 Value *result = VisitMinus(E, PromotionType: promotionTy);
3668 if (result && !promotionTy.isNull())
3669 result = EmitUnPromotedValue(result, ExprType: E->getType());
3670 return result;
3671}
3672
3673Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3674 QualType PromotionType) {
3675 TestAndClearIgnoreResultAssign();
3676 Value *Op;
3677 if (!PromotionType.isNull())
3678 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3679 else
3680 Op = Visit(E: E->getSubExpr());
3681
3682 // Generate a unary FNeg for FP ops.
3683 if (Op->getType()->isFPOrFPVectorTy())
3684 return Builder.CreateFNeg(V: Op, Name: "fneg");
3685
3686 // Emit unary minus with EmitSub so we handle overflow cases etc.
3687 BinOpInfo BinOp;
3688 BinOp.RHS = Op;
3689 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3690 BinOp.Ty = E->getType();
3691 BinOp.Opcode = BO_Sub;
3692 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3693 BinOp.E = E;
3694 return EmitSub(Ops: BinOp);
3695}
3696
3697Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3698 TestAndClearIgnoreResultAssign();
3699 Value *Op = Visit(E: E->getSubExpr());
3700 return Builder.CreateNot(V: Op, Name: "not");
3701}
3702
3703Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3704 // Perform vector logical not on comparison with zero vector.
3705 if (E->getType()->isVectorType() &&
3706 E->getType()->castAs<VectorType>()->getVectorKind() ==
3707 VectorKind::Generic) {
3708 Value *Oper = Visit(E: E->getSubExpr());
3709 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3710 Value *Result;
3711 if (Oper->getType()->isFPOrFPVectorTy()) {
3712 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3713 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3714 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3715 } else
3716 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3717 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3718 }
3719
3720 // Compare operand to zero.
3721 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3722
3723 // Invert value.
3724 // TODO: Could dynamically modify easy computations here. For example, if
3725 // the operand is an icmp ne, turn into icmp eq.
3726 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3727
3728 // ZExt result to the expr type.
3729 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3730}
3731
3732Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3733 // Try folding the offsetof to a constant.
3734 Expr::EvalResult EVResult;
3735 if (E->EvaluateAsInt(Result&: EVResult, Ctx: CGF.getContext())) {
3736 llvm::APSInt Value = EVResult.Val.getInt();
3737 return Builder.getInt(AI: Value);
3738 }
3739
3740 // Loop over the components of the offsetof to compute the value.
3741 unsigned n = E->getNumComponents();
3742 llvm::Type* ResultType = ConvertType(T: E->getType());
3743 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3744 QualType CurrentType = E->getTypeSourceInfo()->getType();
3745 for (unsigned i = 0; i != n; ++i) {
3746 OffsetOfNode ON = E->getComponent(Idx: i);
3747 llvm::Value *Offset = nullptr;
3748 switch (ON.getKind()) {
3749 case OffsetOfNode::Array: {
3750 // Compute the index
3751 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3752 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3753 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3754 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3755
3756 // Save the element type
3757 CurrentType =
3758 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3759
3760 // Compute the element size
3761 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3762 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3763
3764 // Multiply out to compute the result
3765 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3766 break;
3767 }
3768
3769 case OffsetOfNode::Field: {
3770 FieldDecl *MemberDecl = ON.getField();
3771 auto *RD = CurrentType->castAsRecordDecl();
3772 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3773
3774 // Get the index of the field in its parent.
3775 unsigned FieldIndex = MemberDecl->getFieldIndex();
3776
3777 // Compute the offset to the field
3778 int64_t OffsetInt =
3779 RL.getFieldOffset(FieldNo: FieldIndex) / CGF.getContext().getCharWidth();
3780 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3781
3782 // Save the element type.
3783 CurrentType = MemberDecl->getType();
3784 break;
3785 }
3786
3787 case OffsetOfNode::Identifier:
3788 llvm_unreachable("dependent __builtin_offsetof");
3789
3790 case OffsetOfNode::Base: {
3791 if (ON.getBase()->isVirtual()) {
3792 CGF.ErrorUnsupported(S: E, Type: "virtual base in offsetof");
3793 continue;
3794 }
3795
3796 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3797 D: CurrentType->castAsCanonical<RecordType>()->getDecl());
3798
3799 // Save the element type.
3800 CurrentType = ON.getBase()->getType();
3801
3802 // Compute the offset to the base.
3803 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3804 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3805 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3806 break;
3807 }
3808 }
3809 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3810 }
3811 return Result;
3812}
3813
3814/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3815/// argument of the sizeof expression as an integer.
3816Value *
3817ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3818 const UnaryExprOrTypeTraitExpr *E) {
3819 QualType TypeToSize = E->getTypeOfArgument();
3820 if (auto Kind = E->getKind();
3821 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3822 if (const VariableArrayType *VAT =
3823 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3824 // For _Countof, we only want to evaluate if the extent is actually
3825 // variable as opposed to a multi-dimensional array whose extent is
3826 // constant but whose element type is variable.
3827 bool EvaluateExtent = true;
3828 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3829 EvaluateExtent =
3830 !VAT->getSizeExpr()->isIntegerConstantExpr(Ctx: CGF.getContext());
3831 }
3832 if (EvaluateExtent) {
3833 if (E->isArgumentType()) {
3834 // sizeof(type) - make sure to emit the VLA size.
3835 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3836 } else {
3837 // C99 6.5.3.4p2: If the argument is an expression of type
3838 // VLA, it is evaluated.
3839 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3840 }
3841
3842 // For _Countof, we just want to return the size of a single dimension.
3843 if (Kind == UETT_CountOf)
3844 return CGF.getVLAElements1D(vla: VAT).NumElts;
3845
3846 // For sizeof and __datasizeof, we need to scale the number of elements
3847 // by the size of the array element type.
3848 auto VlaSize = CGF.getVLASize(vla: VAT);
3849
3850 // Scale the number of non-VLA elements by the non-VLA element size.
3851 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: VlaSize.Type);
3852 if (!eltSize.isOne())
3853 return CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize),
3854 RHS: VlaSize.NumElts);
3855 return VlaSize.NumElts;
3856 }
3857 }
3858 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3859 auto Alignment =
3860 CGF.getContext()
3861 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3862 T: E->getTypeOfArgument()->getPointeeType()))
3863 .getQuantity();
3864 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3865 } else if (E->getKind() == UETT_VectorElements) {
3866 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3867 return Builder.CreateElementCount(Ty: CGF.SizeTy, EC: VecTy->getElementCount());
3868 }
3869
3870 // If this isn't sizeof(vla), the result must be constant; use the constant
3871 // folding logic so we don't have to duplicate it here.
3872 return Builder.getInt(AI: E->EvaluateKnownConstInt(Ctx: CGF.getContext()));
3873}
3874
3875Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3876 QualType PromotionType) {
3877 QualType promotionTy = PromotionType.isNull()
3878 ? getPromotionType(Ty: E->getSubExpr()->getType())
3879 : PromotionType;
3880 Value *result = VisitReal(E, PromotionType: promotionTy);
3881 if (result && !promotionTy.isNull())
3882 result = EmitUnPromotedValue(result, ExprType: E->getType());
3883 return result;
3884}
3885
3886Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3887 QualType PromotionType) {
3888 Expr *Op = E->getSubExpr();
3889 if (Op->getType()->isAnyComplexType()) {
3890 // If it's an l-value, load through the appropriate subobject l-value.
3891 // Note that we have to ask E because Op might be an l-value that
3892 // this won't work for, e.g. an Obj-C property.
3893 if (E->isGLValue()) {
3894 if (!PromotionType.isNull()) {
3895 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3896 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3897 PromotionType = PromotionType->isAnyComplexType()
3898 ? PromotionType
3899 : CGF.getContext().getComplexType(T: PromotionType);
3900 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3901 : result.first;
3902 }
3903
3904 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3905 .getScalarVal();
3906 }
3907 // Otherwise, calculate and project.
3908 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3909 }
3910
3911 if (!PromotionType.isNull())
3912 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3913 return Visit(E: Op);
3914}
3915
3916Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3917 QualType PromotionType) {
3918 QualType promotionTy = PromotionType.isNull()
3919 ? getPromotionType(Ty: E->getSubExpr()->getType())
3920 : PromotionType;
3921 Value *result = VisitImag(E, PromotionType: promotionTy);
3922 if (result && !promotionTy.isNull())
3923 result = EmitUnPromotedValue(result, ExprType: E->getType());
3924 return result;
3925}
3926
3927Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3928 QualType PromotionType) {
3929 Expr *Op = E->getSubExpr();
3930 if (Op->getType()->isAnyComplexType()) {
3931 // If it's an l-value, load through the appropriate subobject l-value.
3932 // Note that we have to ask E because Op might be an l-value that
3933 // this won't work for, e.g. an Obj-C property.
3934 if (Op->isGLValue()) {
3935 if (!PromotionType.isNull()) {
3936 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3937 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3938 PromotionType = PromotionType->isAnyComplexType()
3939 ? PromotionType
3940 : CGF.getContext().getComplexType(T: PromotionType);
3941 return result.second
3942 ? CGF.EmitPromotedValue(result, PromotionType).second
3943 : result.second;
3944 }
3945
3946 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3947 .getScalarVal();
3948 }
3949 // Otherwise, calculate and project.
3950 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3951 }
3952
3953 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3954 // effects are evaluated, but not the actual value.
3955 if (Op->isGLValue())
3956 CGF.EmitLValue(E: Op);
3957 else if (!PromotionType.isNull())
3958 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3959 else
3960 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3961 if (!PromotionType.isNull())
3962 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3963 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3964}
3965
3966//===----------------------------------------------------------------------===//
3967// Binary Operators
3968//===----------------------------------------------------------------------===//
3969
3970Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3971 QualType PromotionType) {
3972 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3973}
3974
3975Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3976 QualType ExprType) {
3977 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3978}
3979
3980Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3981 E = E->IgnoreParens();
3982 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3983 switch (BO->getOpcode()) {
3984#define HANDLE_BINOP(OP) \
3985 case BO_##OP: \
3986 return Emit##OP(EmitBinOps(BO, PromotionType));
3987 HANDLE_BINOP(Add)
3988 HANDLE_BINOP(Sub)
3989 HANDLE_BINOP(Mul)
3990 HANDLE_BINOP(Div)
3991#undef HANDLE_BINOP
3992 default:
3993 break;
3994 }
3995 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3996 switch (UO->getOpcode()) {
3997 case UO_Imag:
3998 return VisitImag(E: UO, PromotionType);
3999 case UO_Real:
4000 return VisitReal(E: UO, PromotionType);
4001 case UO_Minus:
4002 return VisitMinus(E: UO, PromotionType);
4003 case UO_Plus:
4004 return VisitPlus(E: UO, PromotionType);
4005 default:
4006 break;
4007 }
4008 }
4009 auto result = Visit(E: const_cast<Expr *>(E));
4010 if (result) {
4011 if (!PromotionType.isNull())
4012 return EmitPromotedValue(result, PromotionType);
4013 else
4014 return EmitUnPromotedValue(result, ExprType: E->getType());
4015 }
4016 return result;
4017}
4018
4019BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
4020 QualType PromotionType) {
4021 TestAndClearIgnoreResultAssign();
4022 BinOpInfo Result;
4023 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
4024 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
4025 if (!PromotionType.isNull())
4026 Result.Ty = PromotionType;
4027 else
4028 Result.Ty = E->getType();
4029 Result.Opcode = E->getOpcode();
4030 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
4031 Result.E = E;
4032 return Result;
4033}
4034
4035LValue ScalarExprEmitter::EmitCompoundAssignLValue(
4036 const CompoundAssignOperator *E,
4037 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
4038 Value *&Result) {
4039 QualType LHSTy = E->getLHS()->getType();
4040 BinOpInfo OpInfo;
4041
4042 if (E->getComputationResultType()->isAnyComplexType())
4043 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
4044
4045 // Emit the RHS first. __block variables need to have the rhs evaluated
4046 // first, plus this should improve codegen a little.
4047
4048 QualType PromotionTypeCR;
4049 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
4050 if (PromotionTypeCR.isNull())
4051 PromotionTypeCR = E->getComputationResultType();
4052 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
4053 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
4054 if (!PromotionTypeRHS.isNull())
4055 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
4056 else
4057 OpInfo.RHS = Visit(E: E->getRHS());
4058 OpInfo.Ty = PromotionTypeCR;
4059 OpInfo.Opcode = E->getOpcode();
4060 OpInfo.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
4061 OpInfo.E = E;
4062 // Load/convert the LHS.
4063 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4064
4065 llvm::PHINode *atomicPHI = nullptr;
4066 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
4067 QualType type = atomicTy->getValueType();
4068 if (!type->isBooleanType() && type->isIntegerType() &&
4069 !(type->isUnsignedIntegerType() &&
4070 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
4071 CGF.getLangOpts().getSignedOverflowBehavior() !=
4072 LangOptions::SOB_Trapping) {
4073 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
4074 llvm::Instruction::BinaryOps Op;
4075 switch (OpInfo.Opcode) {
4076 // We don't have atomicrmw operands for *, %, /, <<, >>
4077 case BO_MulAssign: case BO_DivAssign:
4078 case BO_RemAssign:
4079 case BO_ShlAssign:
4080 case BO_ShrAssign:
4081 break;
4082 case BO_AddAssign:
4083 AtomicOp = llvm::AtomicRMWInst::Add;
4084 Op = llvm::Instruction::Add;
4085 break;
4086 case BO_SubAssign:
4087 AtomicOp = llvm::AtomicRMWInst::Sub;
4088 Op = llvm::Instruction::Sub;
4089 break;
4090 case BO_AndAssign:
4091 AtomicOp = llvm::AtomicRMWInst::And;
4092 Op = llvm::Instruction::And;
4093 break;
4094 case BO_XorAssign:
4095 AtomicOp = llvm::AtomicRMWInst::Xor;
4096 Op = llvm::Instruction::Xor;
4097 break;
4098 case BO_OrAssign:
4099 AtomicOp = llvm::AtomicRMWInst::Or;
4100 Op = llvm::Instruction::Or;
4101 break;
4102 default:
4103 llvm_unreachable("Invalid compound assignment type");
4104 }
4105 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
4106 llvm::Value *Amt = CGF.EmitToMemory(
4107 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
4108 Loc: E->getExprLoc()),
4109 Ty: LHSTy);
4110
4111 llvm::AtomicRMWInst *OldVal =
4112 CGF.emitAtomicRMWInst(Op: AtomicOp, Addr: LHSLV.getAddress(), Val: Amt);
4113
4114 // Since operation is atomic, the result type is guaranteed to be the
4115 // same as the input in LLVM terms.
4116 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
4117 return LHSLV;
4118 }
4119 }
4120 // FIXME: For floating point types, we should be saving and restoring the
4121 // floating point environment in the loop.
4122 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4123 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
4124 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
4125 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
4126 Builder.CreateBr(Dest: opBB);
4127 Builder.SetInsertPoint(opBB);
4128 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
4129 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
4130 OpInfo.LHS = atomicPHI;
4131 }
4132 else
4133 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
4134
4135 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4136 SourceLocation Loc = E->getExprLoc();
4137 if (!PromotionTypeLHS.isNull())
4138 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
4139 Loc: E->getExprLoc());
4140 else
4141 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
4142 DstType: E->getComputationLHSType(), Loc);
4143
4144 // Expand the binary operator.
4145 Result = (this->*Func)(OpInfo);
4146
4147 // Convert the result back to the LHS type,
4148 // potentially with Implicit Conversion sanitizer check.
4149 // If LHSLV is a bitfield, use default ScalarConversionOpts
4150 // to avoid emit any implicit integer checks.
4151 Value *Previous = nullptr;
4152 if (LHSLV.isBitField()) {
4153 Previous = Result;
4154 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
4155 } else if (const auto *atomicTy = LHSTy->getAs<AtomicType>()) {
4156 Result =
4157 EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: atomicTy->getValueType(),
4158 Loc, Opts: ScalarConversionOpts(CGF.SanOpts));
4159 } else {
4160 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
4161 Opts: ScalarConversionOpts(CGF.SanOpts));
4162 }
4163
4164 if (atomicPHI) {
4165 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4166 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
4167 auto Pair = CGF.EmitAtomicCompareExchange(
4168 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
4169 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
4170 llvm::Value *success = Pair.second;
4171 atomicPHI->addIncoming(V: old, BB: curBlock);
4172 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
4173 Builder.SetInsertPoint(contBB);
4174 return LHSLV;
4175 }
4176
4177 // Store the result value into the LHS lvalue. Bit-fields are handled
4178 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4179 // 'An assignment expression has the value of the left operand after the
4180 // assignment...'.
4181 if (LHSLV.isBitField()) {
4182 Value *Src = Previous ? Previous : Result;
4183 QualType SrcType = E->getRHS()->getType();
4184 QualType DstType = E->getLHS()->getType();
4185 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
4186 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
4187 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
4188 } else
4189 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
4190
4191 if (CGF.getLangOpts().OpenMP)
4192 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
4193 LHS: E->getLHS());
4194 return LHSLV;
4195}
4196
4197Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4198 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4199 bool Ignore = TestAndClearIgnoreResultAssign();
4200 Value *RHS = nullptr;
4201 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
4202
4203 // If the result is clearly ignored, return now.
4204 if (Ignore)
4205 return nullptr;
4206
4207 // The result of an assignment in C is the assigned r-value.
4208 if (!CGF.getLangOpts().CPlusPlus)
4209 return RHS;
4210
4211 // If the lvalue is non-volatile, return the computed value of the assignment.
4212 if (!LHS.isVolatileQualified())
4213 return RHS;
4214
4215 // Otherwise, reload the value.
4216 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
4217}
4218
4219void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4220 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4221 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4222 Checks;
4223
4224 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
4225 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
4226 y: SanitizerKind::SO_IntegerDivideByZero));
4227 }
4228
4229 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
4230 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
4231 Ops.Ty->hasSignedIntegerRepresentation() &&
4232 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
4233 Ops.mayHaveIntegerOverflow() && !Ops.Ty.isWrapType() &&
4234 !CGF.getContext().isTypeIgnoredBySanitizer(
4235 Mask: SanitizerKind::SignedIntegerOverflow, Ty: Ops.Ty)) {
4236 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
4237
4238 llvm::Value *IntMin =
4239 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
4240 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4241
4242 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
4243 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
4244 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
4245 Checks.push_back(
4246 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SO_SignedIntegerOverflow));
4247 }
4248
4249 if (Checks.size() > 0)
4250 EmitBinOpCheck(Checks, Info: Ops);
4251}
4252
4253Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4254 {
4255 SanitizerDebugLocation SanScope(&CGF,
4256 {SanitizerKind::SO_IntegerDivideByZero,
4257 SanitizerKind::SO_SignedIntegerOverflow,
4258 SanitizerKind::SO_FloatDivideByZero},
4259 SanitizerHandler::DivremOverflow);
4260 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4261 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4262 Ops.Ty->isIntegerType() &&
4263 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4264 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4265 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
4266 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
4267 Ops.Ty->isRealFloatingType() &&
4268 Ops.mayHaveFloatDivisionByZero()) {
4269 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4270 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
4271 EmitBinOpCheck(
4272 Checks: std::make_pair(x&: NonZero, y: SanitizerKind::SO_FloatDivideByZero), Info: Ops);
4273 }
4274 }
4275
4276 if (Ops.Ty->isConstantMatrixType()) {
4277 llvm::MatrixBuilder MB(Builder);
4278 // We need to check the types of the operands of the operator to get the
4279 // correct matrix dimensions.
4280 auto *BO = cast<BinaryOperator>(Val: Ops.E);
4281 (void)BO;
4282 assert(
4283 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
4284 "first operand must be a matrix");
4285 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4286 "second operand must be an arithmetic type");
4287 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4288 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
4289 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
4290 }
4291
4292 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4293 llvm::Value *Val;
4294 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4295 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
4296 CGF.SetDivFPAccuracy(Val);
4297 return Val;
4298 }
4299 else if (Ops.isFixedPointOp())
4300 return EmitFixedPointBinOp(Ops);
4301 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4302 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4303 else
4304 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4305}
4306
4307Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4308 // Rem in C can't be a floating point type: C99 6.5.5p2.
4309 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4310 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4311 Ops.Ty->isIntegerType() &&
4312 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4313 SanitizerDebugLocation SanScope(&CGF,
4314 {SanitizerKind::SO_IntegerDivideByZero,
4315 SanitizerKind::SO_SignedIntegerOverflow},
4316 SanitizerHandler::DivremOverflow);
4317 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4318 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
4319 }
4320
4321 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4322 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4323
4324 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4325 return Builder.CreateFRem(L: Ops.LHS, R: Ops.RHS, Name: "rem");
4326
4327 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4328}
4329
4330Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4331 unsigned IID;
4332 unsigned OpID = 0;
4333 SanitizerHandler OverflowKind;
4334
4335 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4336 switch (Ops.Opcode) {
4337 case BO_Add:
4338 case BO_AddAssign:
4339 OpID = 1;
4340 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4341 llvm::Intrinsic::uadd_with_overflow;
4342 OverflowKind = SanitizerHandler::AddOverflow;
4343 break;
4344 case BO_Sub:
4345 case BO_SubAssign:
4346 OpID = 2;
4347 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4348 llvm::Intrinsic::usub_with_overflow;
4349 OverflowKind = SanitizerHandler::SubOverflow;
4350 break;
4351 case BO_Mul:
4352 case BO_MulAssign:
4353 OpID = 3;
4354 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4355 llvm::Intrinsic::umul_with_overflow;
4356 OverflowKind = SanitizerHandler::MulOverflow;
4357 break;
4358 default:
4359 llvm_unreachable("Unsupported operation for overflow detection");
4360 }
4361 OpID <<= 1;
4362 if (isSigned)
4363 OpID |= 1;
4364
4365 SanitizerDebugLocation SanScope(&CGF,
4366 {SanitizerKind::SO_SignedIntegerOverflow,
4367 SanitizerKind::SO_UnsignedIntegerOverflow},
4368 OverflowKind);
4369 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
4370
4371 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
4372
4373 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
4374 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
4375 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
4376
4377 // Handle overflow with llvm.trap if no custom handler has been specified.
4378 const std::string *handlerName =
4379 &CGF.getLangOpts().OverflowHandler;
4380 if (handlerName->empty()) {
4381 // If no -ftrapv handler has been specified, try to use sanitizer runtimes
4382 // if available otherwise just emit a trap. It is possible for unsigned
4383 // arithmetic to result in a trap due to the OverflowBehaviorType attribute
4384 // which describes overflow behavior on a per-type basis.
4385 if (isSigned) {
4386 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
4387 llvm::Value *NotOf = Builder.CreateNot(V: overflow);
4388 EmitBinOpCheck(
4389 Checks: std::make_pair(x&: NotOf, y: SanitizerKind::SO_SignedIntegerOverflow),
4390 Info: Ops);
4391 } else
4392 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
4393 return result;
4394 }
4395 if (CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) {
4396 llvm::Value *NotOf = Builder.CreateNot(V: overflow);
4397 EmitBinOpCheck(
4398 Checks: std::make_pair(x&: NotOf, y: SanitizerKind::SO_UnsignedIntegerOverflow),
4399 Info: Ops);
4400 } else
4401 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
4402 return result;
4403 }
4404
4405 // Branch in case of overflow.
4406 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4407 llvm::BasicBlock *continueBB =
4408 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
4409 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
4410
4411 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
4412
4413 // If an overflow handler is set, then we want to call it and then use its
4414 // result, if it returns.
4415 Builder.SetInsertPoint(overflowBB);
4416
4417 // Get the overflow handler.
4418 llvm::Type *Int8Ty = CGF.Int8Ty;
4419 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4420 llvm::FunctionType *handlerTy =
4421 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
4422 llvm::FunctionCallee handler =
4423 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
4424
4425 // Sign extend the args to 64-bit, so that we can use the same handler for
4426 // all types of overflow.
4427 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
4428 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
4429
4430 // Call the handler with the two arguments, the operation, and the size of
4431 // the result.
4432 llvm::Value *handlerArgs[] = {
4433 lhs,
4434 rhs,
4435 Builder.getInt8(C: OpID),
4436 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
4437 };
4438 llvm::Value *handlerResult =
4439 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
4440
4441 // Truncate the result back to the desired size.
4442 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
4443 Builder.CreateBr(Dest: continueBB);
4444
4445 Builder.SetInsertPoint(continueBB);
4446 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
4447 phi->addIncoming(V: result, BB: initialBB);
4448 phi->addIncoming(V: handlerResult, BB: overflowBB);
4449
4450 return phi;
4451}
4452
4453/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4454/// information.
4455/// This function is used for BO_AddAssign/BO_SubAssign.
4456static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4457 bool isSubtraction) {
4458 // Must have binary (not unary) expr here. Unary pointer
4459 // increment/decrement doesn't use this path.
4460 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4461
4462 Value *pointer = op.LHS;
4463 Expr *pointerOperand = expr->getLHS();
4464 Value *index = op.RHS;
4465 Expr *indexOperand = expr->getRHS();
4466
4467 // In a subtraction, the LHS is always the pointer.
4468 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4469 std::swap(a&: pointer, b&: index);
4470 std::swap(a&: pointerOperand, b&: indexOperand);
4471 }
4472
4473 return CGF.EmitPointerArithmetic(BO: expr, pointerOperand, pointer, indexOperand,
4474 index, isSubtraction);
4475}
4476
4477/// Emit pointer + index arithmetic.
4478llvm::Value *CodeGenFunction::EmitPointerArithmetic(
4479 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4480 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4481 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4482
4483 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
4484 auto &DL = CGM.getDataLayout();
4485 auto *PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
4486
4487 // Some versions of glibc and gcc use idioms (particularly in their malloc
4488 // routines) that add a pointer-sized integer (known to be a pointer value)
4489 // to a null pointer in order to cast the value back to an integer or as
4490 // part of a pointer alignment algorithm. This is undefined behavior, but
4491 // we'd like to be able to compile programs that use it.
4492 //
4493 // Normally, we'd generate a GEP with a null-pointer base here in response
4494 // to that code, but it's also UB to dereference a pointer created that
4495 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4496 // generate a direct cast of the integer value to a pointer.
4497 //
4498 // The idiom (p = nullptr + N) is not met if any of the following are true:
4499 //
4500 // The operation is subtraction.
4501 // The index is not pointer-sized.
4502 // The pointer type is not byte-sized.
4503 //
4504 // Note that we do not suppress the pointer overflow check in this case.
4505 if (BinaryOperator::isNullPointerArithmeticExtension(
4506 Ctx&: getContext(), Opc: BO->getOpcode(), LHS: pointerOperand, RHS: indexOperand)) {
4507 llvm::Value *Ptr = Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
4508 if (getLangOpts().PointerOverflowDefined ||
4509 !SanOpts.has(K: SanitizerKind::PointerOverflow) ||
4510 NullPointerIsDefined(F: Builder.GetInsertBlock()->getParent(),
4511 AS: PtrTy->getPointerAddressSpace()))
4512 return Ptr;
4513 // The inbounds GEP of null is valid iff the index is zero.
4514 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4515 auto CheckHandler = SanitizerHandler::PointerOverflow;
4516 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4517 llvm::Value *IsZeroIndex = Builder.CreateIsNull(Arg: index);
4518 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc: BO->getExprLoc())};
4519 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4520 llvm::Value *IntPtr = llvm::Constant::getNullValue(Ty: IntPtrTy);
4521 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(V: index, DestTy: IntPtrTy);
4522 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4523 EmitCheck(Checked: {{IsZeroIndex, CheckOrdinal}}, Check: CheckHandler, StaticArgs,
4524 DynamicArgs);
4525 return Ptr;
4526 }
4527
4528 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
4529 // Zero-extend or sign-extend the pointer value according to
4530 // whether the index is signed or not.
4531 index = Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
4532 Name: "idx.ext");
4533 }
4534
4535 // If this is subtraction, negate the index.
4536 if (isSubtraction)
4537 index = Builder.CreateNeg(V: index, Name: "idx.neg");
4538
4539 if (SanOpts.has(K: SanitizerKind::ArrayBounds))
4540 EmitBoundsCheck(ArrayExpr: BO, ArrayExprBase: pointerOperand, Index: index, IndexType: indexOperand->getType(),
4541 /*Accessed*/ false);
4542
4543 const PointerType *pointerType =
4544 pointerOperand->getType()->getAs<PointerType>();
4545 if (!pointerType) {
4546 QualType objectType = pointerOperand->getType()
4547 ->castAs<ObjCObjectPointerType>()
4548 ->getPointeeType();
4549 llvm::Value *objectSize =
4550 CGM.getSize(numChars: getContext().getTypeSizeInChars(T: objectType));
4551
4552 index = Builder.CreateMul(LHS: index, RHS: objectSize);
4553
4554 llvm::Value *result = Builder.CreateGEP(Ty: Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
4555 return Builder.CreateBitCast(V: result, DestTy: pointer->getType());
4556 }
4557
4558 QualType elementType = pointerType->getPointeeType();
4559 if (const VariableArrayType *vla =
4560 getContext().getAsVariableArrayType(T: elementType)) {
4561 // The element count here is the total number of non-VLA elements.
4562 llvm::Value *numElements = getVLASize(vla).NumElts;
4563
4564 // Effectively, the multiply by the VLA size is part of the GEP.
4565 // GEP indexes are signed, and scaling an index isn't permitted to
4566 // signed-overflow, so we use the same semantics for our explicit
4567 // multiply. We suppress this if overflow is not undefined behavior.
4568 llvm::Type *elemTy = ConvertTypeForMem(T: vla->getElementType());
4569 if (getLangOpts().PointerOverflowDefined) {
4570 index = Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
4571 pointer = Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4572 } else {
4573 index = Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
4574 pointer =
4575 EmitCheckedInBoundsGEP(ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned,
4576 IsSubtraction: isSubtraction, Loc: BO->getExprLoc(), Name: "add.ptr");
4577 }
4578 return pointer;
4579 }
4580
4581 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4582 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4583 // future proof.
4584 llvm::Type *elemTy;
4585 if (elementType->isVoidType() || elementType->isFunctionType())
4586 elemTy = Int8Ty;
4587 else
4588 elemTy = ConvertTypeForMem(T: elementType);
4589
4590 if (getLangOpts().PointerOverflowDefined)
4591 return Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4592
4593 return EmitCheckedInBoundsGEP(ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction,
4594 Loc: BO->getExprLoc(), Name: "add.ptr");
4595}
4596
4597// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4598// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4599// the add operand respectively. This allows fmuladd to represent a*b-c, or
4600// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4601// efficient operations.
4602static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4603 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4604 bool negMul, bool negAdd) {
4605 Value *MulOp0 = MulOp->getOperand(i: 0);
4606 Value *MulOp1 = MulOp->getOperand(i: 1);
4607 if (negMul)
4608 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
4609 if (negAdd)
4610 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
4611
4612 Value *FMulAdd = nullptr;
4613 if (Builder.getIsFPConstrained()) {
4614 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4615 "Only constrained operation should be created when Builder is in FP "
4616 "constrained mode");
4617 FMulAdd = Builder.CreateConstrainedFPCall(
4618 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::experimental_constrained_fmuladd,
4619 Tys: Addend->getType()),
4620 Args: {MulOp0, MulOp1, Addend});
4621 } else {
4622 FMulAdd = Builder.CreateCall(
4623 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::fmuladd, Tys: Addend->getType()),
4624 Args: {MulOp0, MulOp1, Addend});
4625 }
4626 MulOp->eraseFromParent();
4627
4628 return FMulAdd;
4629}
4630
4631// Check whether it would be legal to emit an fmuladd intrinsic call to
4632// represent op and if so, build the fmuladd.
4633//
4634// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4635// Does NOT check the type of the operation - it's assumed that this function
4636// will be called from contexts where it's known that the type is contractable.
4637static Value* tryEmitFMulAdd(const BinOpInfo &op,
4638 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4639 bool isSub=false) {
4640
4641 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4642 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4643 "Only fadd/fsub can be the root of an fmuladd.");
4644
4645 // Check whether this op is marked as fusable.
4646 if (!op.FPFeatures.allowFPContractWithinStatement())
4647 return nullptr;
4648
4649 Value *LHS = op.LHS;
4650 Value *RHS = op.RHS;
4651
4652 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4653 // it is the only use of its operand.
4654 bool NegLHS = false;
4655 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4656 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4657 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4658 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4659 NegLHS = true;
4660 }
4661 }
4662
4663 bool NegRHS = false;
4664 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4665 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4666 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4667 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4668 NegRHS = true;
4669 }
4670 }
4671
4672 // We have a potentially fusable op. Look for a mul on one of the operands.
4673 // Also, make sure that the mul result isn't used directly. In that case,
4674 // there's no point creating a muladd operation.
4675 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4676 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4677 (LHSBinOp->use_empty() || NegLHS)) {
4678 // If we looked through fneg, erase it.
4679 if (NegLHS)
4680 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4681 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4682 }
4683 }
4684 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4685 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4686 (RHSBinOp->use_empty() || NegRHS)) {
4687 // If we looked through fneg, erase it.
4688 if (NegRHS)
4689 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4690 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4691 }
4692 }
4693
4694 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4695 if (LHSBinOp->getIntrinsicID() ==
4696 llvm::Intrinsic::experimental_constrained_fmul &&
4697 (LHSBinOp->use_empty() || NegLHS)) {
4698 // If we looked through fneg, erase it.
4699 if (NegLHS)
4700 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4701 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4702 }
4703 }
4704 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4705 if (RHSBinOp->getIntrinsicID() ==
4706 llvm::Intrinsic::experimental_constrained_fmul &&
4707 (RHSBinOp->use_empty() || NegRHS)) {
4708 // If we looked through fneg, erase it.
4709 if (NegRHS)
4710 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4711 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4712 }
4713 }
4714
4715 return nullptr;
4716}
4717
4718Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4719 if (op.LHS->getType()->isPointerTy() ||
4720 op.RHS->getType()->isPointerTy())
4721 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4722
4723 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4724 op.Ty->isUnsignedIntegerType()) {
4725 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4726 const bool hasSan =
4727 isSigned ? CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)
4728 : CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow);
4729 switch (getOverflowBehaviorConsideringType(CGF, Ty: op.Ty)) {
4730 case LangOptions::OB_Wrap:
4731 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4732 case LangOptions::OB_SignedAndDefined:
4733 if (!hasSan)
4734 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4735 [[fallthrough]];
4736 case LangOptions::OB_Unset:
4737 if (!hasSan)
4738 return isSigned ? Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add")
4739 : Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4740 [[fallthrough]];
4741 case LangOptions::OB_Trap:
4742 if (CanElideOverflowCheck(Ctx&: CGF.getContext(), Op: op))
4743 return isSigned ? Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add")
4744 : Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4745 return EmitOverflowCheckedBinOp(Ops: op);
4746 }
4747 }
4748
4749 // For vector and matrix adds, try to fold into a fmuladd.
4750 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4751 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4752 // Try to form an fmuladd.
4753 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4754 return FMulAdd;
4755 }
4756
4757 if (op.Ty->isConstantMatrixType()) {
4758 llvm::MatrixBuilder MB(Builder);
4759 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4760 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4761 }
4762
4763 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4764 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4765 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4766 }
4767
4768 if (op.isFixedPointOp())
4769 return EmitFixedPointBinOp(Ops: op);
4770
4771 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4772}
4773
4774/// The resulting value must be calculated with exact precision, so the operands
4775/// may not be the same type.
4776Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4777 using llvm::APSInt;
4778 using llvm::ConstantInt;
4779
4780 // This is either a binary operation where at least one of the operands is
4781 // a fixed-point type, or a unary operation where the operand is a fixed-point
4782 // type. The result type of a binary operation is determined by
4783 // Sema::handleFixedPointConversions().
4784 QualType ResultTy = op.Ty;
4785 QualType LHSTy, RHSTy;
4786 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4787 RHSTy = BinOp->getRHS()->getType();
4788 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4789 // For compound assignment, the effective type of the LHS at this point
4790 // is the computation LHS type, not the actual LHS type, and the final
4791 // result type is not the type of the expression but rather the
4792 // computation result type.
4793 LHSTy = CAO->getComputationLHSType();
4794 ResultTy = CAO->getComputationResultType();
4795 } else
4796 LHSTy = BinOp->getLHS()->getType();
4797 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4798 LHSTy = UnOp->getSubExpr()->getType();
4799 RHSTy = UnOp->getSubExpr()->getType();
4800 }
4801 ASTContext &Ctx = CGF.getContext();
4802 Value *LHS = op.LHS;
4803 Value *RHS = op.RHS;
4804
4805 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4806 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4807 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4808 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4809
4810 // Perform the actual operation.
4811 Value *Result;
4812 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4813 switch (op.Opcode) {
4814 case BO_AddAssign:
4815 case BO_Add:
4816 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4817 break;
4818 case BO_SubAssign:
4819 case BO_Sub:
4820 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4821 break;
4822 case BO_MulAssign:
4823 case BO_Mul:
4824 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4825 break;
4826 case BO_DivAssign:
4827 case BO_Div:
4828 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4829 break;
4830 case BO_ShlAssign:
4831 case BO_Shl:
4832 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4833 break;
4834 case BO_ShrAssign:
4835 case BO_Shr:
4836 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4837 break;
4838 case BO_LT:
4839 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4840 case BO_GT:
4841 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4842 case BO_LE:
4843 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4844 case BO_GE:
4845 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4846 case BO_EQ:
4847 // For equality operations, we assume any padding bits on unsigned types are
4848 // zero'd out. They could be overwritten through non-saturating operations
4849 // that cause overflow, but this leads to undefined behavior.
4850 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4851 case BO_NE:
4852 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4853 case BO_Cmp:
4854 case BO_LAnd:
4855 case BO_LOr:
4856 llvm_unreachable("Found unimplemented fixed point binary operation");
4857 case BO_PtrMemD:
4858 case BO_PtrMemI:
4859 case BO_Rem:
4860 case BO_Xor:
4861 case BO_And:
4862 case BO_Or:
4863 case BO_Assign:
4864 case BO_RemAssign:
4865 case BO_AndAssign:
4866 case BO_XorAssign:
4867 case BO_OrAssign:
4868 case BO_Comma:
4869 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4870 }
4871
4872 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4873 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4874 // Convert to the result type.
4875 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4876 : CommonFixedSema,
4877 DstSema: ResultFixedSema);
4878}
4879
4880Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4881 // The LHS is always a pointer if either side is.
4882 if (!op.LHS->getType()->isPointerTy()) {
4883 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4884 op.Ty->isUnsignedIntegerType()) {
4885 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4886 const bool hasSan =
4887 isSigned ? CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)
4888 : CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow);
4889 switch (getOverflowBehaviorConsideringType(CGF, Ty: op.Ty)) {
4890 case LangOptions::OB_Wrap:
4891 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4892 case LangOptions::OB_SignedAndDefined:
4893 if (!hasSan)
4894 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4895 [[fallthrough]];
4896 case LangOptions::OB_Unset:
4897 if (!hasSan)
4898 return isSigned ? Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub")
4899 : Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4900 [[fallthrough]];
4901 case LangOptions::OB_Trap:
4902 if (CanElideOverflowCheck(Ctx&: CGF.getContext(), Op: op))
4903 return isSigned ? Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub")
4904 : Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4905 return EmitOverflowCheckedBinOp(Ops: op);
4906 }
4907 }
4908
4909 // For vector and matrix subs, try to fold into a fmuladd.
4910 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4912 // Try to form an fmuladd.
4913 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4914 return FMulAdd;
4915 }
4916
4917 if (op.Ty->isConstantMatrixType()) {
4918 llvm::MatrixBuilder MB(Builder);
4919 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4920 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4921 }
4922
4923 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4924 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4925 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4926 }
4927
4928 if (op.isFixedPointOp())
4929 return EmitFixedPointBinOp(op);
4930
4931 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4932 }
4933
4934 // If the RHS is not a pointer, then we have normal pointer
4935 // arithmetic.
4936 if (!op.RHS->getType()->isPointerTy())
4937 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4938
4939 // Otherwise, this is a pointer subtraction.
4940
4941 // Do the raw subtraction part.
4942 llvm::Value *LHS
4943 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4944 llvm::Value *RHS
4945 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4946 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4947
4948 // Okay, figure out the element size.
4949 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4950 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4951
4952 llvm::Value *divisor = nullptr;
4953
4954 // For a variable-length array, this is going to be non-constant.
4955 if (const VariableArrayType *vla
4956 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4957 auto VlaSize = CGF.getVLASize(vla);
4958 elementType = VlaSize.Type;
4959 divisor = VlaSize.NumElts;
4960
4961 // Scale the number of non-VLA elements by the non-VLA element size.
4962 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4963 if (!eltSize.isOne())
4964 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4965
4966 // For everything elese, we can just compute it, safe in the
4967 // assumption that Sema won't let anything through that we can't
4968 // safely compute the size of.
4969 } else {
4970 CharUnits elementSize;
4971 // Handle GCC extension for pointer arithmetic on void* and
4972 // function pointer types.
4973 if (elementType->isVoidType() || elementType->isFunctionType())
4974 elementSize = CharUnits::One();
4975 else
4976 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4977
4978 // Don't even emit the divide for element size of 1.
4979 if (elementSize.isOne())
4980 return diffInChars;
4981
4982 divisor = CGF.CGM.getSize(numChars: elementSize);
4983 }
4984
4985 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4986 // pointer difference in C is only defined in the case where both operands
4987 // are pointing to elements of an array.
4988 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4989}
4990
4991Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4992 bool RHSIsSigned) {
4993 llvm::IntegerType *Ty;
4994 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4995 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4996 else
4997 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4998 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4999 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
5000 // this in ConstantInt::get, this results in the value getting truncated.
5001 // Constrain the return value to be max(RHS) in this case.
5002 llvm::Type *RHSTy = RHS->getType();
5003 llvm::APInt RHSMax =
5004 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
5005 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
5006 if (RHSMax.ult(RHS: Ty->getBitWidth()))
5007 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
5008 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
5009}
5010
5011Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
5012 const Twine &Name) {
5013 llvm::IntegerType *Ty;
5014 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
5015 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
5016 else
5017 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
5018
5019 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
5020 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
5021
5022 return Builder.CreateURem(
5023 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
5024}
5025
5026Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
5027 // TODO: This misses out on the sanitizer check below.
5028 if (Ops.isFixedPointOp())
5029 return EmitFixedPointBinOp(op: Ops);
5030
5031 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5032 // RHS to the same size as the LHS.
5033 Value *RHS = Ops.RHS;
5034 if (Ops.LHS->getType() != RHS->getType())
5035 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
5036
5037 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
5038 Ops.Ty->hasSignedIntegerRepresentation() &&
5039 !CGF.getLangOpts().isSignedOverflowDefined() &&
5040 !CGF.getLangOpts().CPlusPlus20;
5041 bool SanitizeUnsignedBase =
5042 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
5043 Ops.Ty->hasUnsignedIntegerRepresentation();
5044 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
5045 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
5046 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5047 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5048 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
5049 else if ((SanitizeBase || SanitizeExponent) &&
5050 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
5051 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
5052 if (SanitizeSignedBase)
5053 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftBase);
5054 if (SanitizeUnsignedBase)
5055 Ordinals.push_back(Elt: SanitizerKind::SO_UnsignedShiftBase);
5056 if (SanitizeExponent)
5057 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftExponent);
5058
5059 SanitizerDebugLocation SanScope(&CGF, Ordinals,
5060 SanitizerHandler::ShiftOutOfBounds);
5061 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
5062 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5063 llvm::Value *WidthMinusOne =
5064 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
5065 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
5066
5067 if (SanitizeExponent) {
5068 Checks.push_back(
5069 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::SO_ShiftExponent));
5070 }
5071
5072 if (SanitizeBase) {
5073 // Check whether we are shifting any non-zero bits off the top of the
5074 // integer. We only emit this check if exponent is valid - otherwise
5075 // instructions below will have undefined behavior themselves.
5076 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
5077 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
5078 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
5079 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
5080 llvm::Value *PromotedWidthMinusOne =
5081 (RHS == Ops.RHS) ? WidthMinusOne
5082 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
5083 CGF.EmitBlock(BB: CheckShiftBase);
5084 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
5085 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
5086 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
5087 Name: "shl.check");
5088 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
5089 // In C99, we are not permitted to shift a 1 bit into the sign bit.
5090 // Under C++11's rules, shifting a 1 bit into the sign bit is
5091 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
5092 // define signed left shifts, so we use the C99 and C++11 rules there).
5093 // Unsigned shifts can always shift into the top bit.
5094 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
5095 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
5096 }
5097 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
5098 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
5099 CGF.EmitBlock(BB: Cont);
5100 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
5101 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
5102 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
5103 Checks.push_back(Elt: std::make_pair(
5104 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
5105 : SanitizerKind::SO_UnsignedShiftBase));
5106 }
5107
5108 assert(!Checks.empty());
5109 EmitBinOpCheck(Checks, Info: Ops);
5110 }
5111
5112 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
5113}
5114
5115Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
5116 // TODO: This misses out on the sanitizer check below.
5117 if (Ops.isFixedPointOp())
5118 return EmitFixedPointBinOp(op: Ops);
5119
5120 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5121 // RHS to the same size as the LHS.
5122 Value *RHS = Ops.RHS;
5123 if (Ops.LHS->getType() != RHS->getType())
5124 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
5125
5126 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5127 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5128 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
5129 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
5130 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
5131 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
5132 SanitizerHandler::ShiftOutOfBounds);
5133 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5134 llvm::Value *Valid = Builder.CreateICmpULE(
5135 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
5136 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::SO_ShiftExponent), Info: Ops);
5137 }
5138
5139 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5140 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
5141 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
5142}
5143
5144enum IntrinsicType { VCMPEQ, VCMPGT };
5145// return corresponding comparison intrinsic for given vector type
5146static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5147 BuiltinType::Kind ElemKind) {
5148 switch (ElemKind) {
5149 default: llvm_unreachable("unexpected element type");
5150 case BuiltinType::Char_U:
5151 case BuiltinType::UChar:
5152 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5153 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5154 case BuiltinType::Char_S:
5155 case BuiltinType::SChar:
5156 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5157 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5158 case BuiltinType::UShort:
5159 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5160 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5161 case BuiltinType::Short:
5162 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5163 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5164 case BuiltinType::UInt:
5165 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5166 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5167 case BuiltinType::Int:
5168 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5169 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5170 case BuiltinType::ULong:
5171 case BuiltinType::ULongLong:
5172 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5173 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5174 case BuiltinType::Long:
5175 case BuiltinType::LongLong:
5176 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5177 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5178 case BuiltinType::Float:
5179 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5180 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5181 case BuiltinType::Double:
5182 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5183 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5184 case BuiltinType::UInt128:
5185 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5186 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5187 case BuiltinType::Int128:
5188 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5189 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5190 }
5191}
5192
5193Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5194 llvm::CmpInst::Predicate UICmpOpc,
5195 llvm::CmpInst::Predicate SICmpOpc,
5196 llvm::CmpInst::Predicate FCmpOpc,
5197 bool IsSignaling) {
5198 TestAndClearIgnoreResultAssign();
5199 Value *Result;
5200 QualType LHSTy = E->getLHS()->getType();
5201 QualType RHSTy = E->getRHS()->getType();
5202 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5203 assert(E->getOpcode() == BO_EQ ||
5204 E->getOpcode() == BO_NE);
5205 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
5206 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
5207 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
5208 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
5209 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5210 BinOpInfo BOInfo = EmitBinOps(E);
5211 Value *LHS = BOInfo.LHS;
5212 Value *RHS = BOInfo.RHS;
5213
5214 // If AltiVec, the comparison results in a numeric type, so we use
5215 // intrinsics comparing vectors and giving 0 or 1 as a result
5216 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5217 // constants for mapping CR6 register bits to predicate result
5218 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5219
5220 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5221
5222 // in several cases vector arguments order will be reversed
5223 Value *FirstVecArg = LHS,
5224 *SecondVecArg = RHS;
5225
5226 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5227 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5228
5229 switch(E->getOpcode()) {
5230 default: llvm_unreachable("is not a comparison operation");
5231 case BO_EQ:
5232 CR6 = CR6_LT;
5233 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
5234 break;
5235 case BO_NE:
5236 CR6 = CR6_EQ;
5237 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
5238 break;
5239 case BO_LT:
5240 CR6 = CR6_LT;
5241 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
5242 std::swap(a&: FirstVecArg, b&: SecondVecArg);
5243 break;
5244 case BO_GT:
5245 CR6 = CR6_LT;
5246 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
5247 break;
5248 case BO_LE:
5249 if (ElementKind == BuiltinType::Float) {
5250 CR6 = CR6_LT;
5251 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5252 std::swap(a&: FirstVecArg, b&: SecondVecArg);
5253 }
5254 else {
5255 CR6 = CR6_EQ;
5256 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
5257 }
5258 break;
5259 case BO_GE:
5260 if (ElementKind == BuiltinType::Float) {
5261 CR6 = CR6_LT;
5262 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5263 }
5264 else {
5265 CR6 = CR6_EQ;
5266 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
5267 std::swap(a&: FirstVecArg, b&: SecondVecArg);
5268 }
5269 break;
5270 }
5271
5272 Value *CR6Param = Builder.getInt32(C: CR6);
5273 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
5274 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
5275
5276 // The result type of intrinsic may not be same as E->getType().
5277 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5278 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5279 // do nothing, if ResultTy is not i1 at the same time, it will cause
5280 // crash later.
5281 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
5282 if (ResultTy->getBitWidth() > 1 &&
5283 E->getType() == CGF.getContext().BoolTy)
5284 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
5285 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5286 Loc: E->getExprLoc());
5287 }
5288
5289 if (BOInfo.isFixedPointOp()) {
5290 Result = EmitFixedPointBinOp(op: BOInfo);
5291 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5292 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5293 if (!IsSignaling)
5294 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
5295 else
5296 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
5297 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5298 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
5299 } else {
5300 // Unsigned integers and pointers.
5301
5302 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5303 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
5304 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
5305
5306 // Dynamic information is required to be stripped for comparisons,
5307 // because it could leak the dynamic information. Based on comparisons
5308 // of pointers to dynamic objects, the optimizer can replace one pointer
5309 // with another, which might be incorrect in presence of invariant
5310 // groups. Comparison with null is safe because null does not carry any
5311 // dynamic information.
5312 if (LHSTy.mayBeDynamicClass())
5313 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
5314 if (RHSTy.mayBeDynamicClass())
5315 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
5316 }
5317
5318 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
5319 }
5320
5321 // If this is a vector comparison, sign extend the result to the appropriate
5322 // vector integer type and return it (don't convert to bool).
5323 if (LHSTy->isVectorType())
5324 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
5325
5326 } else {
5327 // Complex Comparison: can only be an equality comparison.
5328 CodeGenFunction::ComplexPairTy LHS, RHS;
5329 QualType CETy;
5330 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5331 LHS = CGF.EmitComplexExpr(E: E->getLHS());
5332 CETy = CTy->getElementType();
5333 } else {
5334 LHS.first = Visit(E: E->getLHS());
5335 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
5336 CETy = LHSTy;
5337 }
5338 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5339 RHS = CGF.EmitComplexExpr(E: E->getRHS());
5340 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5341 CTy->getElementType()) &&
5342 "The element types must always match.");
5343 (void)CTy;
5344 } else {
5345 RHS.first = Visit(E: E->getRHS());
5346 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
5347 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5348 "The element types must always match.");
5349 }
5350
5351 Value *ResultR, *ResultI;
5352 if (CETy->isRealFloatingType()) {
5353 // As complex comparisons can only be equality comparisons, they
5354 // are never signaling comparisons.
5355 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5356 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5357 } else {
5358 // Complex comparisons can only be equality comparisons. As such, signed
5359 // and unsigned opcodes are the same.
5360 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5361 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5362 }
5363
5364 if (E->getOpcode() == BO_EQ) {
5365 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
5366 } else {
5367 assert(E->getOpcode() == BO_NE &&
5368 "Complex comparison other than == or != ?");
5369 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
5370 }
5371 }
5372
5373 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5374 Loc: E->getExprLoc());
5375}
5376
5377llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
5378 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5379 // In case we have the integer or bitfield sanitizer checks enabled
5380 // we want to get the expression before scalar conversion.
5381 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
5382 CastKind Kind = ICE->getCastKind();
5383 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5384 *SrcType = ICE->getSubExpr()->getType();
5385 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
5386 // Pass default ScalarConversionOpts to avoid emitting
5387 // integer sanitizer checks as E refers to bitfield.
5388 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
5389 Loc: ICE->getExprLoc());
5390 }
5391 }
5392 return EmitScalarExpr(E: E->getRHS());
5393}
5394
5395Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5396 ApplyAtomGroup Grp(CGF.getDebugInfo());
5397 bool Ignore = TestAndClearIgnoreResultAssign();
5398
5399 Value *RHS;
5400 LValue LHS;
5401
5402 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5403 LValue LV = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5404 LV.getQuals().removePointerAuth();
5405 llvm::Value *RV =
5406 CGF.EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: LV.getAddress());
5407 CGF.EmitNullabilityCheck(LHS: LV, RHS: RV, Loc: E->getExprLoc());
5408 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: LV);
5409
5410 if (Ignore)
5411 return nullptr;
5412 RV = CGF.EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: RV, PointerType: LV.getType(),
5413 StorageAddress: LV.getAddress(), /*nonnull*/ IsKnownNonNull: false);
5414 return RV;
5415 }
5416
5417 switch (E->getLHS()->getType().getObjCLifetime()) {
5418 case Qualifiers::OCL_Strong:
5419 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(e: E, ignored: Ignore);
5420 break;
5421
5422 case Qualifiers::OCL_Autoreleasing:
5423 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
5424 break;
5425
5426 case Qualifiers::OCL_ExplicitNone:
5427 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
5428 break;
5429
5430 case Qualifiers::OCL_Weak:
5431 RHS = Visit(E: E->getRHS());
5432 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5433 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(), value: RHS, ignored: Ignore);
5434 break;
5435
5436 case Qualifiers::OCL_None:
5437 // __block variables need to have the rhs evaluated first, plus
5438 // this should improve codegen just a little.
5439 Value *Previous = nullptr;
5440 QualType SrcType = E->getRHS()->getType();
5441 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5442 // we want to extract that value and potentially (if the bitfield sanitizer
5443 // is enabled) use it to check for an implicit conversion.
5444 if (E->getLHS()->refersToBitField())
5445 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
5446 else
5447 RHS = Visit(E: E->getRHS());
5448
5449 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5450
5451 // Store the value into the LHS. Bit-fields are handled specially
5452 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5453 // 'An assignment expression has the value of the left operand after
5454 // the assignment...'.
5455 if (LHS.isBitField()) {
5456 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
5457 // If the expression contained an implicit conversion, make sure
5458 // to use the value before the scalar conversion.
5459 Value *Src = Previous ? Previous : RHS;
5460 QualType DstType = E->getLHS()->getType();
5461 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
5462 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
5463 } else {
5464 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
5465 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
5466 }
5467 }
5468 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5469 if (CGF.getLangOpts().OpenMP) {
5470 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
5471 LHS: E->getLHS());
5472 }
5473
5474 // If the result is clearly ignored, return now.
5475 if (Ignore)
5476 return nullptr;
5477
5478 // The result of an assignment in C is the assigned r-value.
5479 if (!CGF.getLangOpts().CPlusPlus)
5480 return RHS;
5481
5482 // If the lvalue is non-volatile, return the computed value of the assignment.
5483 if (!LHS.isVolatileQualified())
5484 return RHS;
5485
5486 // Otherwise, reload the value.
5487 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
5488}
5489
5490Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5491 auto HasLHSSkip = CGF.hasSkipCounter(S: E);
5492 auto HasRHSSkip = CGF.hasSkipCounter(S: E->getRHS());
5493
5494 // Perform vector logical and on comparisons with zero vectors.
5495 if (E->getType()->isVectorType()) {
5496 CGF.incrementProfileCounter(S: E);
5497
5498 Value *LHS = Visit(E: E->getLHS());
5499 Value *RHS = Visit(E: E->getRHS());
5500 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5501 if (LHS->getType()->isFPOrFPVectorTy()) {
5502 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5503 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5504 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5505 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5506 } else {
5507 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5508 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5509 }
5510 Value *And = Builder.CreateAnd(LHS, RHS);
5511 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
5512 }
5513
5514 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5515 llvm::Type *ResTy = ConvertType(T: E->getType());
5516
5517 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5518 // If we have 1 && X, just emit X without inserting the control flow.
5519 bool LHSCondVal;
5520 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5521 if (LHSCondVal) { // If we have 1 && X, just emit X.
5522 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E, /*UseBoth=*/true);
5523
5524 // If the top of the logical operator nest, reset the MCDC temp to 0.
5525 if (CGF.isMCDCDecisionExpr(E))
5526 CGF.maybeResetMCDCCondBitmap(E);
5527
5528 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5529
5530 // If we're generating for profiling or coverage, generate a branch to a
5531 // block that increments the RHS counter needed to track branch condition
5532 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5533 // "FalseBlock" after the increment is done.
5534 if (InstrumentRegions &&
5535 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5536 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5537 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
5538 llvm::BasicBlock *RHSSkip =
5539 (HasRHSSkip ? CGF.createBasicBlock(name: "land.rhsskip") : FBlock);
5540 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5541 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: RHSSkip);
5542 CGF.EmitBlock(BB: RHSBlockCnt);
5543 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E->getRHS());
5544 CGF.EmitBranch(Block: FBlock);
5545 if (HasRHSSkip) {
5546 CGF.EmitBlock(BB: RHSSkip);
5547 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E->getRHS());
5548 }
5549 CGF.EmitBlock(BB: FBlock);
5550 } else
5551 CGF.markStmtMaybeUsed(S: E->getRHS());
5552
5553 // If the top of the logical operator nest, update the MCDC bitmap.
5554 if (CGF.isMCDCDecisionExpr(E))
5555 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5556
5557 // ZExt result to int or bool.
5558 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
5559 }
5560
5561 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5562 if (!CGF.ContainsLabel(S: E->getRHS())) {
5563 CGF.markStmtAsUsed(Skipped: false, S: E);
5564 if (HasLHSSkip)
5565 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
5566
5567 CGF.markStmtMaybeUsed(S: E->getRHS());
5568
5569 return llvm::Constant::getNullValue(Ty: ResTy);
5570 }
5571 }
5572
5573 // If the top of the logical operator nest, reset the MCDC temp to 0.
5574 if (CGF.isMCDCDecisionExpr(E))
5575 CGF.maybeResetMCDCCondBitmap(E);
5576
5577 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
5578 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
5579
5580 llvm::BasicBlock *LHSFalseBlock =
5581 (HasLHSSkip ? CGF.createBasicBlock(name: "land.lhsskip") : ContBlock);
5582
5583 CodeGenFunction::ConditionalEvaluation eval(CGF);
5584
5585 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5586 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: LHSFalseBlock,
5587 TrueCount: CGF.getProfileCount(S: E->getRHS()));
5588
5589 if (HasLHSSkip) {
5590 CGF.EmitBlock(BB: LHSFalseBlock);
5591 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
5592 CGF.EmitBranch(Block: ContBlock);
5593 }
5594
5595 // Any edges into the ContBlock are now from an (indeterminate number of)
5596 // edges from this first condition. All of these values will be false. Start
5597 // setting up the PHI node in the Cont Block for this.
5598 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5599 NameStr: "", InsertBefore: ContBlock);
5600 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5601 PI != PE; ++PI)
5602 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
5603
5604 eval.begin(CGF);
5605 CGF.EmitBlock(BB: RHSBlock);
5606 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E);
5607 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5608 eval.end(CGF);
5609
5610 // Reaquire the RHS block, as there may be subblocks inserted.
5611 RHSBlock = Builder.GetInsertBlock();
5612
5613 // If we're generating for profiling or coverage, generate a branch on the
5614 // RHS to a block that increments the RHS true counter needed to track branch
5615 // condition coverage.
5616 llvm::BasicBlock *ContIncoming = RHSBlock;
5617 if (InstrumentRegions &&
5618 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5619 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5620 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5621 llvm::BasicBlock *RHSBlockSkip =
5622 (HasRHSSkip ? CGF.createBasicBlock(name: "land.rhsskip") : ContBlock);
5623 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: RHSBlockSkip);
5624 CGF.EmitBlock(BB: RHSBlockCnt);
5625 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E->getRHS());
5626 CGF.EmitBranch(Block: ContBlock);
5627 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5628 if (HasRHSSkip) {
5629 CGF.EmitBlock(BB: RHSBlockSkip);
5630 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E->getRHS());
5631 CGF.EmitBranch(Block: ContBlock);
5632 ContIncoming = RHSBlockSkip;
5633 }
5634 }
5635
5636 // Emit an unconditional branch from this block to ContBlock.
5637 {
5638 // There is no need to emit line number for unconditional branch.
5639 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5640 CGF.EmitBlock(BB: ContBlock);
5641 }
5642 // Insert an entry into the phi node for the edge with the value of RHSCond.
5643 PN->addIncoming(V: RHSCond, BB: ContIncoming);
5644
5645 // If the top of the logical operator nest, update the MCDC bitmap.
5646 if (CGF.isMCDCDecisionExpr(E))
5647 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5648
5649 // Artificial location to preserve the scope information
5650 {
5651 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
5652 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5653 }
5654
5655 // ZExt result to int.
5656 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
5657}
5658
5659Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5660 auto HasLHSSkip = CGF.hasSkipCounter(S: E);
5661 auto HasRHSSkip = CGF.hasSkipCounter(S: E->getRHS());
5662
5663 // Perform vector logical or on comparisons with zero vectors.
5664 if (E->getType()->isVectorType()) {
5665 CGF.incrementProfileCounter(S: E);
5666
5667 Value *LHS = Visit(E: E->getLHS());
5668 Value *RHS = Visit(E: E->getRHS());
5669 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5670 if (LHS->getType()->isFPOrFPVectorTy()) {
5671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5672 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5673 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5674 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5675 } else {
5676 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5677 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5678 }
5679 Value *Or = Builder.CreateOr(LHS, RHS);
5680 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
5681 }
5682
5683 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5684 llvm::Type *ResTy = ConvertType(T: E->getType());
5685
5686 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5687 // If we have 0 || X, just emit X without inserting the control flow.
5688 bool LHSCondVal;
5689 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5690 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5691 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E, /*UseBoth=*/true);
5692
5693 // If the top of the logical operator nest, reset the MCDC temp to 0.
5694 if (CGF.isMCDCDecisionExpr(E))
5695 CGF.maybeResetMCDCCondBitmap(E);
5696
5697 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5698
5699 // If we're generating for profiling or coverage, generate a branch to a
5700 // block that increments the RHS counter need to track branch condition
5701 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5702 // "FalseBlock" after the increment is done.
5703 if (InstrumentRegions &&
5704 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5705 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5706 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5707 llvm::BasicBlock *RHSSkip =
5708 (HasRHSSkip ? CGF.createBasicBlock(name: "lor.rhsskip") : FBlock);
5709 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5710 Builder.CreateCondBr(Cond: RHSCond, True: RHSSkip, False: RHSBlockCnt);
5711 CGF.EmitBlock(BB: RHSBlockCnt);
5712 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E->getRHS());
5713 CGF.EmitBranch(Block: FBlock);
5714 if (HasRHSSkip) {
5715 CGF.EmitBlock(BB: RHSSkip);
5716 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E->getRHS());
5717 }
5718 CGF.EmitBlock(BB: FBlock);
5719 } else
5720 CGF.markStmtMaybeUsed(S: E->getRHS());
5721
5722 // If the top of the logical operator nest, update the MCDC bitmap.
5723 if (CGF.isMCDCDecisionExpr(E))
5724 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5725
5726 // ZExt result to int or bool.
5727 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5728 }
5729
5730 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5731 if (!CGF.ContainsLabel(S: E->getRHS())) {
5732 CGF.markStmtAsUsed(Skipped: false, S: E);
5733 if (HasLHSSkip)
5734 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
5735
5736 CGF.markStmtMaybeUsed(S: E->getRHS());
5737
5738 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5739 }
5740 }
5741
5742 // If the top of the logical operator nest, reset the MCDC temp to 0.
5743 if (CGF.isMCDCDecisionExpr(E))
5744 CGF.maybeResetMCDCCondBitmap(E);
5745
5746 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5747 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5748 llvm::BasicBlock *LHSTrueBlock =
5749 (HasLHSSkip ? CGF.createBasicBlock(name: "lor.lhsskip") : ContBlock);
5750
5751 CodeGenFunction::ConditionalEvaluation eval(CGF);
5752
5753 // Branch on the LHS first. If it is true, go to the success (cont) block.
5754 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: LHSTrueBlock, FalseBlock: RHSBlock,
5755 TrueCount: CGF.getCurrentProfileCount() -
5756 CGF.getProfileCount(S: E->getRHS()));
5757
5758 if (HasLHSSkip) {
5759 CGF.EmitBlock(BB: LHSTrueBlock);
5760 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
5761 CGF.EmitBranch(Block: ContBlock);
5762 }
5763
5764 // Any edges into the ContBlock are now from an (indeterminate number of)
5765 // edges from this first condition. All of these values will be true. Start
5766 // setting up the PHI node in the Cont Block for this.
5767 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5768 NameStr: "", InsertBefore: ContBlock);
5769 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5770 PI != PE; ++PI)
5771 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5772
5773 eval.begin(CGF);
5774
5775 // Emit the RHS condition as a bool value.
5776 CGF.EmitBlock(BB: RHSBlock);
5777 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E);
5778 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5779
5780 eval.end(CGF);
5781
5782 // Reaquire the RHS block, as there may be subblocks inserted.
5783 RHSBlock = Builder.GetInsertBlock();
5784
5785 // If we're generating for profiling or coverage, generate a branch on the
5786 // RHS to a block that increments the RHS true counter needed to track branch
5787 // condition coverage.
5788 llvm::BasicBlock *ContIncoming = RHSBlock;
5789 if (InstrumentRegions &&
5790 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5791 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5792 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5793 llvm::BasicBlock *RHSTrueBlock =
5794 (HasRHSSkip ? CGF.createBasicBlock(name: "lor.rhsskip") : ContBlock);
5795 Builder.CreateCondBr(Cond: RHSCond, True: RHSTrueBlock, False: RHSBlockCnt);
5796 CGF.EmitBlock(BB: RHSBlockCnt);
5797 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E->getRHS());
5798 CGF.EmitBranch(Block: ContBlock);
5799 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5800 if (HasRHSSkip) {
5801 CGF.EmitBlock(BB: RHSTrueBlock);
5802 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E->getRHS());
5803 CGF.EmitBranch(Block: ContBlock);
5804 ContIncoming = RHSTrueBlock;
5805 }
5806 }
5807
5808 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5809 // into the phi node for the edge with the value of RHSCond.
5810 CGF.EmitBlock(BB: ContBlock);
5811 PN->addIncoming(V: RHSCond, BB: ContIncoming);
5812
5813 // If the top of the logical operator nest, update the MCDC bitmap.
5814 if (CGF.isMCDCDecisionExpr(E))
5815 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5816
5817 // ZExt result to int.
5818 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5819}
5820
5821Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5822 CGF.EmitIgnoredExpr(E: E->getLHS());
5823 CGF.EnsureInsertPoint();
5824 return Visit(E: E->getRHS());
5825}
5826
5827//===----------------------------------------------------------------------===//
5828// Other Operators
5829//===----------------------------------------------------------------------===//
5830
5831/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5832/// expression is cheap enough and side-effect-free enough to evaluate
5833/// unconditionally instead of conditionally. This is used to convert control
5834/// flow into selects in some cases.
5835static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5836 CodeGenFunction &CGF) {
5837 // Anything that is an integer or floating point constant is fine.
5838 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5839
5840 // Even non-volatile automatic variables can't be evaluated unconditionally.
5841 // Referencing a thread_local may cause non-trivial initialization work to
5842 // occur. If we're inside a lambda and one of the variables is from the scope
5843 // outside the lambda, that function may have returned already. Reading its
5844 // locals is a bad idea. Also, these reads may introduce races there didn't
5845 // exist in the source-level program.
5846}
5847
5848
5849Value *ScalarExprEmitter::
5850VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5851 TestAndClearIgnoreResultAssign();
5852
5853 // Bind the common expression if necessary.
5854 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5855
5856 Expr *condExpr = E->getCond();
5857 Expr *lhsExpr = E->getTrueExpr();
5858 Expr *rhsExpr = E->getFalseExpr();
5859
5860 // If the condition constant folds and can be elided, try to avoid emitting
5861 // the condition and the dead arm.
5862 bool CondExprBool;
5863 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5864 Expr *live = lhsExpr, *dead = rhsExpr;
5865 if (!CondExprBool) std::swap(a&: live, b&: dead);
5866
5867 // If the dead side doesn't have labels we need, just emit the Live part.
5868 if (!CGF.ContainsLabel(S: dead)) {
5869 CGF.incrementProfileCounter(ExecSkip: CondExprBool ? CGF.UseExecPath
5870 : CGF.UseSkipPath,
5871 S: E, /*UseBoth=*/true);
5872 Value *Result = Visit(E: live);
5873 CGF.markStmtMaybeUsed(S: dead);
5874
5875 // If the live part is a throw expression, it acts like it has a void
5876 // type, so evaluating it returns a null Value*. However, a conditional
5877 // with non-void type must return a non-null Value*.
5878 if (!Result && !E->getType()->isVoidType())
5879 Result = llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
5880
5881 return Result;
5882 }
5883 }
5884
5885 // OpenCL: If the condition is a vector, we can treat this condition like
5886 // the select function.
5887 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5888 condExpr->getType()->isExtVectorType())) {
5889 CGF.incrementProfileCounter(S: E);
5890
5891 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5892 llvm::Value *LHS = Visit(E: lhsExpr);
5893 llvm::Value *RHS = Visit(E: rhsExpr);
5894
5895 llvm::Type *condType = ConvertType(T: condExpr->getType());
5896 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5897
5898 unsigned numElem = vecTy->getNumElements();
5899 llvm::Type *elemType = vecTy->getElementType();
5900
5901 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5902 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5903 llvm::Value *tmp = Builder.CreateSExt(
5904 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5905 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5906
5907 // Cast float to int to perform ANDs if necessary.
5908 llvm::Value *RHSTmp = RHS;
5909 llvm::Value *LHSTmp = LHS;
5910 bool wasCast = false;
5911 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5912 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5913 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5914 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5915 wasCast = true;
5916 }
5917
5918 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5919 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5920 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5921 if (wasCast)
5922 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5923
5924 return tmp5;
5925 }
5926
5927 if (condExpr->getType()->isVectorType() ||
5928 condExpr->getType()->isSveVLSBuiltinType()) {
5929 CGF.incrementProfileCounter(S: E);
5930
5931 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5932 llvm::Value *LHS = Visit(E: lhsExpr);
5933 llvm::Value *RHS = Visit(E: rhsExpr);
5934
5935 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5936 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5937
5938 if (VecTy->getElementType()->isIntegerTy(Bitwidth: 1))
5939 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5940
5941 // OpenCL uses the MSB of the mask vector.
5942 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5943 if (condExpr->getType()->isExtVectorType())
5944 CondV = Builder.CreateICmpSLT(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5945 else
5946 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5947 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5948 }
5949
5950 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5951 // select instead of as control flow. We can only do this if it is cheap and
5952 // safe to evaluate the LHS and RHS unconditionally.
5953 if (!llvm::EnableSingleByteCoverage &&
5954 isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5955 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5956 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5957 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5958
5959 CGF.incrementProfileCounter(S: E, StepV);
5960
5961 llvm::Value *LHS = Visit(E: lhsExpr);
5962 llvm::Value *RHS = Visit(E: rhsExpr);
5963 if (!LHS) {
5964 // If the conditional has void type, make sure we return a null Value*.
5965 assert(!RHS && "LHS and RHS types must match");
5966 return nullptr;
5967 }
5968 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5969 }
5970
5971 // If the top of the logical operator nest, reset the MCDC temp to 0.
5972 if (auto E = CGF.stripCond(C: condExpr); CGF.isMCDCDecisionExpr(E))
5973 CGF.maybeResetMCDCCondBitmap(E);
5974
5975 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5976 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5977 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5978
5979 CodeGenFunction::ConditionalEvaluation eval(CGF);
5980 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5981 TrueCount: CGF.getProfileCount(S: lhsExpr));
5982
5983 CGF.EmitBlock(BB: LHSBlock);
5984
5985 // If the top of the logical operator nest, update the MCDC bitmap for the
5986 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5987 // may also contain a boolean expression.
5988 if (auto E = CGF.stripCond(C: condExpr); CGF.isMCDCDecisionExpr(E))
5989 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5990
5991 CGF.incrementProfileCounter(ExecSkip: CGF.UseExecPath, S: E);
5992 eval.begin(CGF);
5993 Value *LHS = Visit(E: lhsExpr);
5994 eval.end(CGF);
5995
5996 LHSBlock = Builder.GetInsertBlock();
5997 Builder.CreateBr(Dest: ContBlock);
5998
5999 CGF.EmitBlock(BB: RHSBlock);
6000
6001 // If the top of the logical operator nest, update the MCDC bitmap for the
6002 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
6003 // may also contain a boolean expression.
6004 if (auto E = CGF.stripCond(C: condExpr); CGF.isMCDCDecisionExpr(E))
6005 CGF.maybeUpdateMCDCTestVectorBitmap(E);
6006
6007 CGF.incrementProfileCounter(ExecSkip: CGF.UseSkipPath, S: E);
6008 eval.begin(CGF);
6009 Value *RHS = Visit(E: rhsExpr);
6010 eval.end(CGF);
6011
6012 RHSBlock = Builder.GetInsertBlock();
6013 CGF.EmitBlock(BB: ContBlock);
6014
6015 // If the LHS or RHS is a throw expression, it will be legitimately null.
6016 if (!LHS)
6017 return RHS;
6018 if (!RHS)
6019 return LHS;
6020
6021 // Create a PHI node for the real part.
6022 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
6023 PN->addIncoming(V: LHS, BB: LHSBlock);
6024 PN->addIncoming(V: RHS, BB: RHSBlock);
6025
6026 return PN;
6027}
6028
6029Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
6030 return Visit(E: E->getChosenSubExpr());
6031}
6032
6033Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
6034 Address ArgValue = Address::invalid();
6035 RValue ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
6036
6037 return ArgPtr.getScalarVal();
6038}
6039
6040Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
6041 return CGF.EmitBlockLiteral(block);
6042}
6043
6044// Convert a vec3 to vec4, or vice versa.
6045static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
6046 Value *Src, unsigned NumElementsDst) {
6047 static constexpr int Mask[] = {0, 1, 2, -1};
6048 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
6049}
6050
6051// Create cast instructions for converting LLVM value \p Src to LLVM type \p
6052// DstTy. \p Src has the same size as \p DstTy. Both are single value types
6053// but could be scalar or vectors of different lengths, and either can be
6054// pointer.
6055// There are 4 cases:
6056// 1. non-pointer -> non-pointer : needs 1 bitcast
6057// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
6058// 3. pointer -> non-pointer
6059// a) pointer -> intptr_t : needs 1 ptrtoint
6060// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
6061// 4. non-pointer -> pointer
6062// a) intptr_t -> pointer : needs 1 inttoptr
6063// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
6064// Note: for cases 3b and 4b two casts are required since LLVM casts do not
6065// allow casting directly between pointer types and non-integer non-pointer
6066// types.
6067static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
6068 const llvm::DataLayout &DL,
6069 Value *Src, llvm::Type *DstTy,
6070 StringRef Name = "") {
6071 auto SrcTy = Src->getType();
6072
6073 // Case 1.
6074 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
6075 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
6076
6077 // Case 2.
6078 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
6079 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
6080
6081 // Case 3.
6082 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
6083 // Case 3b.
6084 if (!DstTy->isIntegerTy())
6085 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
6086 // Cases 3a and 3b.
6087 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
6088 }
6089
6090 // Case 4b.
6091 if (!SrcTy->isIntegerTy())
6092 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
6093 // Cases 4a and 4b.
6094 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
6095}
6096
6097Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
6098 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
6099 llvm::Type *DstTy = ConvertType(T: E->getType());
6100
6101 llvm::Type *SrcTy = Src->getType();
6102 unsigned NumElementsSrc =
6103 isa<llvm::VectorType>(Val: SrcTy)
6104 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
6105 : 0;
6106 unsigned NumElementsDst =
6107 isa<llvm::VectorType>(Val: DstTy)
6108 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
6109 : 0;
6110
6111 // Use bit vector expansion for ext_vector_type boolean vectors.
6112 if (E->getType()->isExtVectorBoolType())
6113 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
6114
6115 // Going from vec3 to non-vec3 is a special case and requires a shuffle
6116 // vector to get a vec4, then a bitcast if the target type is different.
6117 if (NumElementsSrc == 3 && NumElementsDst != 3) {
6118 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
6119 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
6120 DstTy);
6121
6122 Src->setName("astype");
6123 return Src;
6124 }
6125
6126 // Going from non-vec3 to vec3 is a special case and requires a bitcast
6127 // to vec4 if the original type is not vec4, then a shuffle vector to
6128 // get a vec3.
6129 if (NumElementsSrc != 3 && NumElementsDst == 3) {
6130 auto *Vec4Ty = llvm::FixedVectorType::get(
6131 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
6132 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
6133 DstTy: Vec4Ty);
6134
6135 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
6136 Src->setName("astype");
6137 return Src;
6138 }
6139
6140 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
6141 Src, DstTy, Name: "astype");
6142}
6143
6144Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
6145 return CGF.EmitAtomicExpr(E).getScalarVal();
6146}
6147
6148//===----------------------------------------------------------------------===//
6149// Entry Point into this File
6150//===----------------------------------------------------------------------===//
6151
6152/// Emit the computation of the specified expression of scalar type, ignoring
6153/// the result.
6154Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
6155 assert(E && hasScalarEvaluationKind(E->getType()) &&
6156 "Invalid scalar expression to emit");
6157
6158 return ScalarExprEmitter(*this, IgnoreResultAssign)
6159 .Visit(E: const_cast<Expr *>(E));
6160}
6161
6162/// Emit a conversion from the specified type to the specified destination type,
6163/// both of which are LLVM scalar types.
6164Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
6165 QualType DstTy,
6166 SourceLocation Loc) {
6167 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
6168 "Invalid scalar expression to emit");
6169 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
6170}
6171
6172/// Emit a conversion from the specified complex type to the specified
6173/// destination type, where the destination type is an LLVM scalar type.
6174Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
6175 QualType SrcTy,
6176 QualType DstTy,
6177 SourceLocation Loc) {
6178 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6179 "Invalid complex -> scalar conversion");
6180 return ScalarExprEmitter(*this)
6181 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6182}
6183
6184
6185Value *
6186CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
6187 QualType PromotionType) {
6188 if (!PromotionType.isNull())
6189 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6190 else
6191 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
6192}
6193
6194
6195llvm::Value *CodeGenFunction::
6196EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
6197 bool isInc, bool isPre) {
6198 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6199}
6200
6201LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
6202 // object->isa or (*object).isa
6203 // Generate code as for: *(Class*)object
6204
6205 Expr *BaseExpr = E->getBase();
6206 Address Addr = Address::invalid();
6207 if (BaseExpr->isPRValue()) {
6208 llvm::Type *BaseTy =
6209 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
6210 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
6211 } else {
6212 Addr = EmitLValue(E: BaseExpr).getAddress();
6213 }
6214
6215 // Cast the address to Class*.
6216 Addr = Addr.withElementType(ElemTy: ConvertType(T: E->getType()));
6217 return MakeAddrLValue(Addr, T: E->getType());
6218}
6219
6220
6221LValue CodeGenFunction::EmitCompoundAssignmentLValue(
6222 const CompoundAssignOperator *E) {
6223 ApplyAtomGroup Grp(getDebugInfo());
6224 ScalarExprEmitter Scalar(*this);
6225 Value *Result = nullptr;
6226 switch (E->getOpcode()) {
6227#define COMPOUND_OP(Op) \
6228 case BO_##Op##Assign: \
6229 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6230 Result)
6231 COMPOUND_OP(Mul);
6232 COMPOUND_OP(Div);
6233 COMPOUND_OP(Rem);
6234 COMPOUND_OP(Add);
6235 COMPOUND_OP(Sub);
6236 COMPOUND_OP(Shl);
6237 COMPOUND_OP(Shr);
6238 COMPOUND_OP(And);
6239 COMPOUND_OP(Xor);
6240 COMPOUND_OP(Or);
6241#undef COMPOUND_OP
6242
6243 case BO_PtrMemD:
6244 case BO_PtrMemI:
6245 case BO_Mul:
6246 case BO_Div:
6247 case BO_Rem:
6248 case BO_Add:
6249 case BO_Sub:
6250 case BO_Shl:
6251 case BO_Shr:
6252 case BO_LT:
6253 case BO_GT:
6254 case BO_LE:
6255 case BO_GE:
6256 case BO_EQ:
6257 case BO_NE:
6258 case BO_Cmp:
6259 case BO_And:
6260 case BO_Xor:
6261 case BO_Or:
6262 case BO_LAnd:
6263 case BO_LOr:
6264 case BO_Assign:
6265 case BO_Comma:
6266 llvm_unreachable("Not valid compound assignment operators");
6267 }
6268
6269 llvm_unreachable("Unhandled compound assignment operator");
6270}
6271
6272struct GEPOffsetAndOverflow {
6273 // The total (signed) byte offset for the GEP.
6274 llvm::Value *TotalOffset;
6275 // The offset overflow flag - true if the total offset overflows.
6276 llvm::Value *OffsetOverflows;
6277};
6278
6279/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6280/// and compute the total offset it applies from it's base pointer BasePtr.
6281/// Returns offset in bytes and a boolean flag whether an overflow happened
6282/// during evaluation.
6283static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
6284 llvm::LLVMContext &VMContext,
6285 CodeGenModule &CGM,
6286 CGBuilderTy &Builder) {
6287 const auto &DL = CGM.getDataLayout();
6288
6289 // The total (signed) byte offset for the GEP.
6290 llvm::Value *TotalOffset = nullptr;
6291
6292 // Was the GEP already reduced to a constant?
6293 if (isa<llvm::Constant>(Val: GEPVal)) {
6294 // Compute the offset by casting both pointers to integers and subtracting:
6295 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6296 Value *BasePtr_int =
6297 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
6298 Value *GEPVal_int =
6299 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
6300 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
6301 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6302 }
6303
6304 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
6305 assert(GEP->getPointerOperand() == BasePtr &&
6306 "BasePtr must be the base of the GEP.");
6307 assert(GEP->isInBounds() && "Expected inbounds GEP");
6308
6309 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6310
6311 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6312 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
6313 auto *SAddIntrinsic =
6314 CGM.getIntrinsic(IID: llvm::Intrinsic::sadd_with_overflow, Tys: IntPtrTy);
6315 auto *SMulIntrinsic =
6316 CGM.getIntrinsic(IID: llvm::Intrinsic::smul_with_overflow, Tys: IntPtrTy);
6317
6318 // The offset overflow flag - true if the total offset overflows.
6319 llvm::Value *OffsetOverflows = Builder.getFalse();
6320
6321 /// Return the result of the given binary operation.
6322 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6323 llvm::Value *RHS) -> llvm::Value * {
6324 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6325
6326 // If the operands are constants, return a constant result.
6327 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
6328 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
6329 llvm::APInt N;
6330 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
6331 /*Signed=*/true, Result&: N);
6332 if (HasOverflow)
6333 OffsetOverflows = Builder.getTrue();
6334 return llvm::ConstantInt::get(Context&: VMContext, V: N);
6335 }
6336 }
6337
6338 // Otherwise, compute the result with checked arithmetic.
6339 auto *ResultAndOverflow = Builder.CreateCall(
6340 Callee: (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, Args: {LHS, RHS});
6341 OffsetOverflows = Builder.CreateOr(
6342 LHS: Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), RHS: OffsetOverflows);
6343 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
6344 };
6345
6346 // Determine the total byte offset by looking at each GEP operand.
6347 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6348 GTI != GTE; ++GTI) {
6349 llvm::Value *LocalOffset;
6350 auto *Index = GTI.getOperand();
6351 // Compute the local offset contributed by this indexing step:
6352 if (auto *STy = GTI.getStructTypeOrNull()) {
6353 // For struct indexing, the local offset is the byte position of the
6354 // specified field.
6355 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
6356 LocalOffset = llvm::ConstantInt::get(
6357 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
6358 } else {
6359 // Otherwise this is array-like indexing. The local offset is the index
6360 // multiplied by the element size.
6361 auto *ElementSize =
6362 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
6363 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
6364 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6365 }
6366
6367 // If this is the first offset, set it as the total offset. Otherwise, add
6368 // the local offset into the running total.
6369 if (!TotalOffset || TotalOffset == Zero)
6370 TotalOffset = LocalOffset;
6371 else
6372 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6373 }
6374
6375 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
6376}
6377
6378Value *
6379CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6380 ArrayRef<Value *> IdxList,
6381 bool SignedIndices, bool IsSubtraction,
6382 SourceLocation Loc, const Twine &Name) {
6383 llvm::Type *PtrTy = Ptr->getType();
6384
6385 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6386 if (!SignedIndices && !IsSubtraction)
6387 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6388
6389 Value *GEPVal = Builder.CreateGEP(Ty: ElemTy, Ptr, IdxList, Name, NW: NWFlags);
6390
6391 // If the pointer overflow sanitizer isn't enabled, do nothing.
6392 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
6393 return GEPVal;
6394
6395 // Perform nullptr-and-offset check unless the nullptr is defined.
6396 bool PerformNullCheck = !NullPointerIsDefined(
6397 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
6398 // Check for overflows unless the GEP got constant-folded,
6399 // and only in the default address space
6400 bool PerformOverflowCheck =
6401 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6402
6403 if (!(PerformNullCheck || PerformOverflowCheck))
6404 return GEPVal;
6405
6406 const auto &DL = CGM.getDataLayout();
6407
6408 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6409 auto CheckHandler = SanitizerHandler::PointerOverflow;
6410 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6411 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6412
6413 GEPOffsetAndOverflow EvaluatedGEP =
6414 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
6415
6416 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6417 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6418 "If the offset got constant-folded, we don't expect that there was an "
6419 "overflow.");
6420
6421 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
6422
6423 // Common case: if the total offset is zero, don't emit a check.
6424 if (EvaluatedGEP.TotalOffset == Zero)
6425 return GEPVal;
6426
6427 // Now that we've computed the total offset, add it to the base pointer (with
6428 // wrapping semantics).
6429 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
6430 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
6431
6432 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6433 2>
6434 Checks;
6435
6436 if (PerformNullCheck) {
6437 // If the base pointer evaluates to a null pointer value,
6438 // the only valid pointer this inbounds GEP can produce is also
6439 // a null pointer, so the offset must also evaluate to zero.
6440 // Likewise, if we have non-zero base pointer, we can not get null pointer
6441 // as a result, so the offset can not be -intptr_t(BasePtr).
6442 // In other words, both pointers are either null, or both are non-null,
6443 // or the behaviour is undefined.
6444 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
6445 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
6446 auto *Valid = Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
6447 Checks.emplace_back(Args&: Valid, Args&: CheckOrdinal);
6448 }
6449
6450 if (PerformOverflowCheck) {
6451 // The GEP is valid if:
6452 // 1) The total offset doesn't overflow, and
6453 // 2) The sign of the difference between the computed address and the base
6454 // pointer matches the sign of the total offset.
6455 llvm::Value *ValidGEP;
6456 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
6457 if (SignedIndices) {
6458 // GEP is computed as `unsigned base + signed offset`, therefore:
6459 // * If offset was positive, then the computed pointer can not be
6460 // [unsigned] less than the base pointer, unless it overflowed.
6461 // * If offset was negative, then the computed pointer can not be
6462 // [unsigned] greater than the bas pointere, unless it overflowed.
6463 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6464 auto *PosOrZeroOffset =
6465 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
6466 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
6467 ValidGEP =
6468 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
6469 } else if (!IsSubtraction) {
6470 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6471 // computed pointer can not be [unsigned] less than base pointer,
6472 // unless there was an overflow.
6473 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6474 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6475 } else {
6476 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6477 // computed pointer can not be [unsigned] greater than base pointer,
6478 // unless there was an overflow.
6479 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6480 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
6481 }
6482 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
6483 Checks.emplace_back(Args&: ValidGEP, Args&: CheckOrdinal);
6484 }
6485
6486 assert(!Checks.empty() && "Should have produced some checks.");
6487
6488 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6489 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6490 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6491 EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs);
6492
6493 return GEPVal;
6494}
6495
6496Address CodeGenFunction::EmitCheckedInBoundsGEP(
6497 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6498 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6499 const Twine &Name) {
6500 if (!SanOpts.has(K: SanitizerKind::PointerOverflow)) {
6501 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6502 if (!SignedIndices && !IsSubtraction)
6503 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6504
6505 return Builder.CreateGEP(Addr, IdxList, ElementType: elementType, Align, Name, NW: NWFlags);
6506 }
6507
6508 return RawAddress(
6509 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
6510 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6511 elementType, Align);
6512}
6513