1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/AST/Expr.h"
28#include "clang/AST/ParentMapContext.h"
29#include "clang/AST/RecordLayout.h"
30#include "clang/AST/StmtVisitor.h"
31#include "clang/Basic/CodeGenOptions.h"
32#include "clang/Basic/TargetInfo.h"
33#include "llvm/ADT/APFixedPoint.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/CFG.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/DerivedTypes.h"
39#include "llvm/IR/FixedPointBuilder.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GEPNoWrapFlags.h"
42#include "llvm/IR/GetElementPtrTypeIterator.h"
43#include "llvm/IR/GlobalVariable.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsPowerPC.h"
46#include "llvm/IR/MatrixBuilder.h"
47#include "llvm/IR/Module.h"
48#include "llvm/Support/TypeSize.h"
49#include <cstdarg>
50#include <optional>
51
52using namespace clang;
53using namespace CodeGen;
54using llvm::Value;
55
56//===----------------------------------------------------------------------===//
57// Scalar Expression Emitter
58//===----------------------------------------------------------------------===//
59
60namespace llvm {
61extern cl::opt<bool> EnableSingleByteCoverage;
62} // namespace llvm
63
64namespace {
65
66/// Determine whether the given binary operation may overflow.
67/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
68/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
69/// the returned overflow check is precise. The returned value is 'true' for
70/// all other opcodes, to be conservative.
71bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
72 BinaryOperator::Opcode Opcode, bool Signed,
73 llvm::APInt &Result) {
74 // Assume overflow is possible, unless we can prove otherwise.
75 bool Overflow = true;
76 const auto &LHSAP = LHS->getValue();
77 const auto &RHSAP = RHS->getValue();
78 if (Opcode == BO_Add) {
79 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
80 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
81 } else if (Opcode == BO_Sub) {
82 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
83 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
84 } else if (Opcode == BO_Mul) {
85 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
86 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
87 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
88 if (Signed && !RHS->isZero())
89 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
90 else
91 return false;
92 }
93 return Overflow;
94}
95
96struct BinOpInfo {
97 Value *LHS;
98 Value *RHS;
99 QualType Ty; // Computation Type.
100 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
101 FPOptions FPFeatures;
102 const Expr *E; // Entire expr, for error unsupported. May not be binop.
103
104 /// Check if the binop can result in integer overflow.
105 bool mayHaveIntegerOverflow() const {
106 // Without constant input, we can't rule out overflow.
107 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
108 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
109 if (!LHSCI || !RHSCI)
110 return true;
111
112 llvm::APInt Result;
113 return ::mayHaveIntegerOverflow(
114 LHS: LHSCI, RHS: RHSCI, Opcode, Signed: Ty->hasSignedIntegerRepresentation(), Result);
115 }
116
117 /// Check if the binop computes a division or a remainder.
118 bool isDivremOp() const {
119 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
120 Opcode == BO_RemAssign;
121 }
122
123 /// Check if the binop can result in an integer division by zero.
124 bool mayHaveIntegerDivisionByZero() const {
125 if (isDivremOp())
126 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
127 return CI->isZero();
128 return true;
129 }
130
131 /// Check if the binop can result in a float division by zero.
132 bool mayHaveFloatDivisionByZero() const {
133 if (isDivremOp())
134 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
135 return CFP->isZero();
136 return true;
137 }
138
139 /// Check if at least one operand is a fixed point type. In such cases, this
140 /// operation did not follow usual arithmetic conversion and both operands
141 /// might not be of the same type.
142 bool isFixedPointOp() const {
143 // We cannot simply check the result type since comparison operations return
144 // an int.
145 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
146 QualType LHSType = BinOp->getLHS()->getType();
147 QualType RHSType = BinOp->getRHS()->getType();
148 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
149 }
150 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
151 return UnOp->getSubExpr()->getType()->isFixedPointType();
152 return false;
153 }
154
155 /// Check if the RHS has a signed integer representation.
156 bool rhsHasSignedIntegerRepresentation() const {
157 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
158 QualType RHSType = BinOp->getRHS()->getType();
159 return RHSType->hasSignedIntegerRepresentation();
160 }
161 return false;
162 }
163};
164
165static bool MustVisitNullValue(const Expr *E) {
166 // If a null pointer expression's type is the C++0x nullptr_t, then
167 // it's not necessarily a simple constant and it must be evaluated
168 // for its potential side effects.
169 return E->getType()->isNullPtrType();
170}
171
172/// If \p E is a widened promoted integer, get its base (unpromoted) type.
173static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
174 const Expr *E) {
175 const Expr *Base = E->IgnoreImpCasts();
176 if (E == Base)
177 return std::nullopt;
178
179 QualType BaseTy = Base->getType();
180 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
181 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
182 return std::nullopt;
183
184 return BaseTy;
185}
186
187/// Check if \p E is a widened promoted integer.
188static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
189 return getUnwidenedIntegerType(Ctx, E).has_value();
190}
191
192/// Check if we can skip the overflow check for \p Op.
193static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
194 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
195 "Expected a unary or binary operator");
196
197 // If the binop has constant inputs and we can prove there is no overflow,
198 // we can elide the overflow check.
199 if (!Op.mayHaveIntegerOverflow())
200 return true;
201
202 if (Op.Ty->isSignedIntegerType() &&
203 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::SignedIntegerOverflow,
204 Ty: Op.Ty)) {
205 return true;
206 }
207
208 if (Op.Ty->isUnsignedIntegerType() &&
209 Ctx.isTypeIgnoredBySanitizer(Mask: SanitizerKind::UnsignedIntegerOverflow,
210 Ty: Op.Ty)) {
211 return true;
212 }
213
214 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Op.E);
215
216 if (UO && UO->getOpcode() == UO_Minus &&
217 Ctx.getLangOpts().isOverflowPatternExcluded(
218 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
219 UO->isIntegerConstantExpr(Ctx))
220 return true;
221
222 // If a unary op has a widened operand, the op cannot overflow.
223 if (UO)
224 return !UO->canOverflow();
225
226 // We usually don't need overflow checks for binops with widened operands.
227 // Multiplication with promoted unsigned operands is a special case.
228 const auto *BO = cast<BinaryOperator>(Val: Op.E);
229 if (BO->hasExcludedOverflowPattern())
230 return true;
231
232 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, E: BO->getLHS());
233 if (!OptionalLHSTy)
234 return false;
235
236 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, E: BO->getRHS());
237 if (!OptionalRHSTy)
238 return false;
239
240 QualType LHSTy = *OptionalLHSTy;
241 QualType RHSTy = *OptionalRHSTy;
242
243 // This is the simple case: binops without unsigned multiplication, and with
244 // widened operands. No overflow check is needed here.
245 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
246 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
247 return true;
248
249 // For unsigned multiplication the overflow check can be elided if either one
250 // of the unpromoted types are less than half the size of the promoted type.
251 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
252 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
253 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
254}
255
256class ScalarExprEmitter
257 : public StmtVisitor<ScalarExprEmitter, Value*> {
258 CodeGenFunction &CGF;
259 CGBuilderTy &Builder;
260 bool IgnoreResultAssign;
261 llvm::LLVMContext &VMContext;
262public:
263
264 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
265 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
266 VMContext(cgf.getLLVMContext()) {
267 }
268
269 //===--------------------------------------------------------------------===//
270 // Utilities
271 //===--------------------------------------------------------------------===//
272
273 bool TestAndClearIgnoreResultAssign() {
274 bool I = IgnoreResultAssign;
275 IgnoreResultAssign = false;
276 return I;
277 }
278
279 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
280 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
281 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
282 return CGF.EmitCheckedLValue(E, TCK);
283 }
284
285 void EmitBinOpCheck(
286 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
287 const BinOpInfo &Info);
288
289 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
290 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
291 }
292
293 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
294 const AlignValueAttr *AVAttr = nullptr;
295 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
296 const ValueDecl *VD = DRE->getDecl();
297
298 if (VD->getType()->isReferenceType()) {
299 if (const auto *TTy =
300 VD->getType().getNonReferenceType()->getAs<TypedefType>())
301 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
302 } else {
303 // Assumptions for function parameters are emitted at the start of the
304 // function, so there is no need to repeat that here,
305 // unless the alignment-assumption sanitizer is enabled,
306 // then we prefer the assumption over alignment attribute
307 // on IR function param.
308 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
309 return;
310
311 AVAttr = VD->getAttr<AlignValueAttr>();
312 }
313 }
314
315 if (!AVAttr)
316 if (const auto *TTy = E->getType()->getAs<TypedefType>())
317 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
318
319 if (!AVAttr)
320 return;
321
322 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
323 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
324 CGF.emitAlignmentAssumption(PtrValue: V, E, AssumptionLoc: AVAttr->getLocation(), Alignment: AlignmentCI);
325 }
326
327 /// EmitLoadOfLValue - Given an expression with complex type that represents a
328 /// value l-value, this method emits the address of the l-value, then loads
329 /// and returns the result.
330 Value *EmitLoadOfLValue(const Expr *E) {
331 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
332 Loc: E->getExprLoc());
333
334 EmitLValueAlignmentAssumption(E, V);
335 return V;
336 }
337
338 /// EmitConversionToBool - Convert the specified expression value to a
339 /// boolean (i1) truth value. This is equivalent to "Val != 0".
340 Value *EmitConversionToBool(Value *Src, QualType DstTy);
341
342 /// Emit a check that a conversion from a floating-point type does not
343 /// overflow.
344 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
345 Value *Src, QualType SrcType, QualType DstType,
346 llvm::Type *DstTy, SourceLocation Loc);
347
348 /// Known implicit conversion check kinds.
349 /// This is used for bitfield conversion checks as well.
350 /// Keep in sync with the enum of the same name in ubsan_handlers.h
351 enum ImplicitConversionCheckKind : unsigned char {
352 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
353 ICCK_UnsignedIntegerTruncation = 1,
354 ICCK_SignedIntegerTruncation = 2,
355 ICCK_IntegerSignChange = 3,
356 ICCK_SignedIntegerTruncationOrSignChange = 4,
357 };
358
359 /// Emit a check that an [implicit] truncation of an integer does not
360 /// discard any bits. It is not UB, so we use the value after truncation.
361 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
362 QualType DstType, SourceLocation Loc);
363
364 /// Emit a check that an [implicit] conversion of an integer does not change
365 /// the sign of the value. It is not UB, so we use the value after conversion.
366 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
367 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
368 QualType DstType, SourceLocation Loc);
369
370 /// Emit a conversion from the specified type to the specified destination
371 /// type, both of which are LLVM scalar types.
372 struct ScalarConversionOpts {
373 bool TreatBooleanAsSigned;
374 bool EmitImplicitIntegerTruncationChecks;
375 bool EmitImplicitIntegerSignChangeChecks;
376
377 ScalarConversionOpts()
378 : TreatBooleanAsSigned(false),
379 EmitImplicitIntegerTruncationChecks(false),
380 EmitImplicitIntegerSignChangeChecks(false) {}
381
382 ScalarConversionOpts(clang::SanitizerSet SanOpts)
383 : TreatBooleanAsSigned(false),
384 EmitImplicitIntegerTruncationChecks(
385 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
386 EmitImplicitIntegerSignChangeChecks(
387 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
388 };
389 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
390 llvm::Type *SrcTy, llvm::Type *DstTy,
391 ScalarConversionOpts Opts);
392 Value *
393 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
394 SourceLocation Loc,
395 ScalarConversionOpts Opts = ScalarConversionOpts());
396
397 /// Convert between either a fixed point and other fixed point or fixed point
398 /// and an integer.
399 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
400 SourceLocation Loc);
401
402 /// Emit a conversion from the specified complex type to the specified
403 /// destination type, where the destination type is an LLVM scalar type.
404 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
405 QualType SrcTy, QualType DstTy,
406 SourceLocation Loc);
407
408 /// EmitNullValue - Emit a value that corresponds to null for the given type.
409 Value *EmitNullValue(QualType Ty);
410
411 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
412 Value *EmitFloatToBoolConversion(Value *V) {
413 // Compare against 0.0 for fp scalars.
414 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
415 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
416 }
417
418 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
419 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
420 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
421
422 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
423 }
424
425 Value *EmitIntToBoolConversion(Value *V) {
426 // Because of the type rules of C, we often end up computing a
427 // logical value, then zero extending it to int, then wanting it
428 // as a logical value again. Optimize this common case.
429 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
430 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
431 Value *Result = ZI->getOperand(i_nocapture: 0);
432 // If there aren't any more uses, zap the instruction to save space.
433 // Note that there can be more uses, for example if this
434 // is the result of an assignment.
435 if (ZI->use_empty())
436 ZI->eraseFromParent();
437 return Result;
438 }
439 }
440
441 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
442 }
443
444 //===--------------------------------------------------------------------===//
445 // Visitor Methods
446 //===--------------------------------------------------------------------===//
447
448 Value *Visit(Expr *E) {
449 ApplyDebugLocation DL(CGF, E);
450 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(S: E);
451 }
452
453 Value *VisitStmt(Stmt *S) {
454 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
455 llvm_unreachable("Stmt can't have complex result type!");
456 }
457 Value *VisitExpr(Expr *S);
458
459 Value *VisitConstantExpr(ConstantExpr *E) {
460 // A constant expression of type 'void' generates no code and produces no
461 // value.
462 if (E->getType()->isVoidType())
463 return nullptr;
464
465 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
466 if (E->isGLValue())
467 return CGF.EmitLoadOfScalar(
468 Addr: Address(Result, CGF.convertTypeForLoadStore(ASTTy: E->getType()),
469 CGF.getContext().getTypeAlignInChars(T: E->getType())),
470 /*Volatile*/ false, Ty: E->getType(), Loc: E->getExprLoc());
471 return Result;
472 }
473 return Visit(E: E->getSubExpr());
474 }
475 Value *VisitParenExpr(ParenExpr *PE) {
476 return Visit(E: PE->getSubExpr());
477 }
478 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
479 return Visit(E: E->getReplacement());
480 }
481 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
482 return Visit(E: GE->getResultExpr());
483 }
484 Value *VisitCoawaitExpr(CoawaitExpr *S) {
485 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
486 }
487 Value *VisitCoyieldExpr(CoyieldExpr *S) {
488 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
489 }
490 Value *VisitUnaryCoawait(const UnaryOperator *E) {
491 return Visit(E: E->getSubExpr());
492 }
493
494 // Leaves.
495 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
496 return Builder.getInt(AI: E->getValue());
497 }
498 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
499 return Builder.getInt(AI: E->getValue());
500 }
501 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
502 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
503 }
504 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
505 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
506 }
507 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
508 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
509 }
510 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
511 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
512 }
513 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
514 if (E->getType()->isVoidType())
515 return nullptr;
516
517 return EmitNullValue(Ty: E->getType());
518 }
519 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
520 return EmitNullValue(Ty: E->getType());
521 }
522 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
523 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
524 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
525 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
526 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
527 }
528
529 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
530 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),V: E->getPackLength());
531 }
532
533 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
534 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
535 }
536
537 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
538 Value *VisitEmbedExpr(EmbedExpr *E);
539
540 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
541 if (E->isGLValue())
542 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
543 Loc: E->getExprLoc());
544
545 // Otherwise, assume the mapping is the scalar directly.
546 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
547 }
548
549 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
550 llvm_unreachable("Codegen for this isn't defined/implemented");
551 }
552
553 // l-values.
554 Value *VisitDeclRefExpr(DeclRefExpr *E) {
555 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(RefExpr: E))
556 return CGF.emitScalarConstant(Constant, E);
557 return EmitLoadOfLValue(E);
558 }
559
560 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
561 return CGF.EmitObjCSelectorExpr(E);
562 }
563 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
564 return CGF.EmitObjCProtocolExpr(E);
565 }
566 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
567 return EmitLoadOfLValue(E);
568 }
569 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
570 if (E->getMethodDecl() &&
571 E->getMethodDecl()->getReturnType()->isReferenceType())
572 return EmitLoadOfLValue(E);
573 return CGF.EmitObjCMessageExpr(E).getScalarVal();
574 }
575
576 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
577 LValue LV = CGF.EmitObjCIsaExpr(E);
578 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
579 return V;
580 }
581
582 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
583 VersionTuple Version = E->getVersion();
584
585 // If we're checking for a platform older than our minimum deployment
586 // target, we can fold the check away.
587 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
588 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
589
590 return CGF.EmitBuiltinAvailable(Version);
591 }
592
593 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
594 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
595 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
596 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
597 Value *VisitMemberExpr(MemberExpr *E);
598 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
599 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
600 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
601 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
602 // literals aren't l-values in C++. We do so simply because that's the
603 // cleanest way to handle compound literals in C++.
604 // See the discussion here: https://reviews.llvm.org/D64464
605 return EmitLoadOfLValue(E);
606 }
607
608 Value *VisitInitListExpr(InitListExpr *E);
609
610 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
611 assert(CGF.getArrayInitIndex() &&
612 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
613 return CGF.getArrayInitIndex();
614 }
615
616 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
617 return EmitNullValue(Ty: E->getType());
618 }
619 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
620 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
621 return VisitCastExpr(E);
622 }
623 Value *VisitCastExpr(CastExpr *E);
624
625 Value *VisitCallExpr(const CallExpr *E) {
626 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
627 return EmitLoadOfLValue(E);
628
629 Value *V = CGF.EmitCallExpr(E).getScalarVal();
630
631 EmitLValueAlignmentAssumption(E, V);
632 return V;
633 }
634
635 Value *VisitStmtExpr(const StmtExpr *E);
636
637 // Unary Operators.
638 Value *VisitUnaryPostDec(const UnaryOperator *E) {
639 LValue LV = EmitLValue(E: E->getSubExpr());
640 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
641 }
642 Value *VisitUnaryPostInc(const UnaryOperator *E) {
643 LValue LV = EmitLValue(E: E->getSubExpr());
644 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
645 }
646 Value *VisitUnaryPreDec(const UnaryOperator *E) {
647 LValue LV = EmitLValue(E: E->getSubExpr());
648 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
649 }
650 Value *VisitUnaryPreInc(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E: E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
653 }
654
655 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
656 llvm::Value *InVal,
657 bool IsInc);
658
659 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
660 bool isInc, bool isPre);
661
662
663 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
664 if (isa<MemberPointerType>(Val: E->getType())) // never sugared
665 return CGF.CGM.getMemberPointerConstant(e: E);
666
667 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
668 }
669 Value *VisitUnaryDeref(const UnaryOperator *E) {
670 if (E->getType()->isVoidType())
671 return Visit(E: E->getSubExpr()); // the actual value should be unused
672 return EmitLoadOfLValue(E);
673 }
674
675 Value *VisitUnaryPlus(const UnaryOperator *E,
676 QualType PromotionType = QualType());
677 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
678 Value *VisitUnaryMinus(const UnaryOperator *E,
679 QualType PromotionType = QualType());
680 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
681
682 Value *VisitUnaryNot (const UnaryOperator *E);
683 Value *VisitUnaryLNot (const UnaryOperator *E);
684 Value *VisitUnaryReal(const UnaryOperator *E,
685 QualType PromotionType = QualType());
686 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
687 Value *VisitUnaryImag(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryExtension(const UnaryOperator *E) {
691 return Visit(E: E->getSubExpr());
692 }
693
694 // C++
695 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
696 return EmitLoadOfLValue(E);
697 }
698 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
699 auto &Ctx = CGF.getContext();
700 APValue Evaluated =
701 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
702 return ConstantEmitter(CGF).emitAbstract(loc: SLE->getLocation(), value: Evaluated,
703 T: SLE->getType());
704 }
705
706 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
707 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
708 return Visit(E: DAE->getExpr());
709 }
710 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
711 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
712 return Visit(E: DIE->getExpr());
713 }
714 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
715 return CGF.LoadCXXThis();
716 }
717
718 Value *VisitExprWithCleanups(ExprWithCleanups *E);
719 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
720 return CGF.EmitCXXNewExpr(E);
721 }
722 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
723 CGF.EmitCXXDeleteExpr(E);
724 return nullptr;
725 }
726
727 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
728 if (E->isStoredAsBoolean())
729 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
730 V: E->getBoolValue());
731 assert(E->getAPValue().isInt() && "APValue type not supported");
732 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),
733 V: E->getAPValue().getInt());
734 }
735
736 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
737 return Builder.getInt1(V: E->isSatisfied());
738 }
739
740 Value *VisitRequiresExpr(const RequiresExpr *E) {
741 return Builder.getInt1(V: E->isSatisfied());
742 }
743
744 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
745 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
746 }
747
748 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
749 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
750 }
751
752 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
753 // C++ [expr.pseudo]p1:
754 // The result shall only be used as the operand for the function call
755 // operator (), and the result of such a call has type void. The only
756 // effect is the evaluation of the postfix-expression before the dot or
757 // arrow.
758 CGF.EmitScalarExpr(E: E->getBase());
759 return nullptr;
760 }
761
762 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
763 return EmitNullValue(Ty: E->getType());
764 }
765
766 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
767 CGF.EmitCXXThrowExpr(E);
768 return nullptr;
769 }
770
771 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
772 return Builder.getInt1(V: E->getValue());
773 }
774
775 // Binary Operators.
776 Value *EmitMul(const BinOpInfo &Ops) {
777 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
778 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
779 case LangOptions::SOB_Defined:
780 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
781 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
782 [[fallthrough]];
783 case LangOptions::SOB_Undefined:
784 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
785 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
786 [[fallthrough]];
787 case LangOptions::SOB_Trapping:
788 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
789 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
790 return EmitOverflowCheckedBinOp(Ops);
791 }
792 }
793
794 if (Ops.Ty->isConstantMatrixType()) {
795 llvm::MatrixBuilder MB(Builder);
796 // We need to check the types of the operands of the operator to get the
797 // correct matrix dimensions.
798 auto *BO = cast<BinaryOperator>(Val: Ops.E);
799 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
800 Val: BO->getLHS()->getType().getCanonicalType());
801 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
802 Val: BO->getRHS()->getType().getCanonicalType());
803 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
804 if (LHSMatTy && RHSMatTy)
805 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
806 LHSColumns: LHSMatTy->getNumColumns(),
807 RHSColumns: RHSMatTy->getNumColumns());
808 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
809 }
810
811 if (Ops.Ty->isUnsignedIntegerType() &&
812 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
813 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
814 return EmitOverflowCheckedBinOp(Ops);
815
816 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
817 // Preserve the old values
818 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
819 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
820 }
821 if (Ops.isFixedPointOp())
822 return EmitFixedPointBinOp(Ops);
823 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
824 }
825 /// Create a binary op that checks for overflow.
826 /// Currently only supports +, - and *.
827 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
828
829 // Check for undefined division and modulus behaviors.
830 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
831 llvm::Value *Zero,bool isDiv);
832 // Common helper for getting how wide LHS of shift is.
833 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
834
835 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
836 // non powers of two.
837 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
838
839 Value *EmitDiv(const BinOpInfo &Ops);
840 Value *EmitRem(const BinOpInfo &Ops);
841 Value *EmitAdd(const BinOpInfo &Ops);
842 Value *EmitSub(const BinOpInfo &Ops);
843 Value *EmitShl(const BinOpInfo &Ops);
844 Value *EmitShr(const BinOpInfo &Ops);
845 Value *EmitAnd(const BinOpInfo &Ops) {
846 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
847 }
848 Value *EmitXor(const BinOpInfo &Ops) {
849 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
850 }
851 Value *EmitOr (const BinOpInfo &Ops) {
852 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
853 }
854
855 // Helper functions for fixed point binary operations.
856 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
857
858 BinOpInfo EmitBinOps(const BinaryOperator *E,
859 QualType PromotionTy = QualType());
860
861 Value *EmitPromotedValue(Value *result, QualType PromotionType);
862 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
863 Value *EmitPromoted(const Expr *E, QualType PromotionType);
864
865 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
866 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
867 Value *&Result);
868
869 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
870 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
871
872 QualType getPromotionType(QualType Ty) {
873 const auto &Ctx = CGF.getContext();
874 if (auto *CT = Ty->getAs<ComplexType>()) {
875 QualType ElementType = CT->getElementType();
876 if (ElementType.UseExcessPrecision(Ctx))
877 return Ctx.getComplexType(T: Ctx.FloatTy);
878 }
879
880 if (Ty.UseExcessPrecision(Ctx)) {
881 if (auto *VT = Ty->getAs<VectorType>()) {
882 unsigned NumElements = VT->getNumElements();
883 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
884 }
885 return Ctx.FloatTy;
886 }
887
888 return QualType();
889 }
890
891 // Binary operators and binary compound assignment operators.
892#define HANDLEBINOP(OP) \
893 Value *VisitBin##OP(const BinaryOperator *E) { \
894 QualType promotionTy = getPromotionType(E->getType()); \
895 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
896 if (result && !promotionTy.isNull()) \
897 result = EmitUnPromotedValue(result, E->getType()); \
898 return result; \
899 } \
900 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
901 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
902 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
903 }
904 HANDLEBINOP(Mul)
905 HANDLEBINOP(Div)
906 HANDLEBINOP(Rem)
907 HANDLEBINOP(Add)
908 HANDLEBINOP(Sub)
909 HANDLEBINOP(Shl)
910 HANDLEBINOP(Shr)
911 HANDLEBINOP(And)
912 HANDLEBINOP(Xor)
913 HANDLEBINOP(Or)
914#undef HANDLEBINOP
915
916 // Comparisons.
917 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
918 llvm::CmpInst::Predicate SICmpOpc,
919 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
920#define VISITCOMP(CODE, UI, SI, FP, SIG) \
921 Value *VisitBin##CODE(const BinaryOperator *E) { \
922 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
923 llvm::FCmpInst::FP, SIG); }
924 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
925 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
926 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
927 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
928 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
929 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
930#undef VISITCOMP
931
932 Value *VisitBinAssign (const BinaryOperator *E);
933
934 Value *VisitBinLAnd (const BinaryOperator *E);
935 Value *VisitBinLOr (const BinaryOperator *E);
936 Value *VisitBinComma (const BinaryOperator *E);
937
938 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
939 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
940
941 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
942 return Visit(E: E->getSemanticForm());
943 }
944
945 // Other Operators.
946 Value *VisitBlockExpr(const BlockExpr *BE);
947 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
948 Value *VisitChooseExpr(ChooseExpr *CE);
949 Value *VisitVAArgExpr(VAArgExpr *VE);
950 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
951 return CGF.EmitObjCStringLiteral(E);
952 }
953 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
954 return CGF.EmitObjCBoxedExpr(E);
955 }
956 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
957 return CGF.EmitObjCArrayLiteral(E);
958 }
959 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
960 return CGF.EmitObjCDictionaryLiteral(E);
961 }
962 Value *VisitAsTypeExpr(AsTypeExpr *CE);
963 Value *VisitAtomicExpr(AtomicExpr *AE);
964 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
965 return Visit(E: E->getSelectedExpr());
966 }
967};
968} // end anonymous namespace.
969
970//===----------------------------------------------------------------------===//
971// Utilities
972//===----------------------------------------------------------------------===//
973
974/// EmitConversionToBool - Convert the specified expression value to a
975/// boolean (i1) truth value. This is equivalent to "Val != 0".
976Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
977 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
978
979 if (SrcType->isRealFloatingType())
980 return EmitFloatToBoolConversion(V: Src);
981
982 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
983 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
984
985 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
986 "Unknown scalar type to convert");
987
988 if (isa<llvm::IntegerType>(Val: Src->getType()))
989 return EmitIntToBoolConversion(V: Src);
990
991 assert(isa<llvm::PointerType>(Src->getType()));
992 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
993}
994
995void ScalarExprEmitter::EmitFloatConversionCheck(
996 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
997 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
998 assert(SrcType->isFloatingType() && "not a conversion from floating point");
999 if (!isa<llvm::IntegerType>(Val: DstTy))
1000 return;
1001
1002 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1003 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1004 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1005 using llvm::APFloat;
1006 using llvm::APSInt;
1007
1008 llvm::Value *Check = nullptr;
1009 const llvm::fltSemantics &SrcSema =
1010 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
1011
1012 // Floating-point to integer. This has undefined behavior if the source is
1013 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1014 // to an integer).
1015 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
1016 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
1017
1018 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
1019 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1020 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1021 APFloat::opOverflow)
1022 // Don't need an overflow check for lower bound. Just check for
1023 // -Inf/NaN.
1024 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
1025 else
1026 // Find the largest value which is too small to represent (before
1027 // truncation toward zero).
1028 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
1029
1030 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
1031 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1032 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for upper bound. Just check for
1035 // +Inf/NaN.
1036 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
1037 else
1038 // Find the smallest value which is too large to represent (before
1039 // truncation toward zero).
1040 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
1041
1042 // If we're converting from __half, convert the range to float to match
1043 // the type of src.
1044 if (OrigSrcType->isHalfType()) {
1045 const llvm::fltSemantics &Sema =
1046 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1047 bool IsInexact;
1048 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1049 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1050 }
1051
1052 llvm::Value *GE =
1053 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1054 llvm::Value *LE =
1055 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1056 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1057
1058 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1059 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1060 CGF.EmitCheckTypeDescriptor(T: DstType)};
1061 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y&: CheckOrdinal), Check: CheckHandler, StaticArgs,
1062 DynamicArgs: OrigSrc);
1063}
1064
1065// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1066// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1067static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1068 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1069EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1070 QualType DstType, CGBuilderTy &Builder) {
1071 llvm::Type *SrcTy = Src->getType();
1072 llvm::Type *DstTy = Dst->getType();
1073 (void)DstTy; // Only used in assert()
1074
1075 // This should be truncation of integral types.
1076 assert(Src != Dst);
1077 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1078 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1079 "non-integer llvm type");
1080
1081 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1082 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1083
1084 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1085 // Else, it is a signed truncation.
1086 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1087 SanitizerKind::SanitizerOrdinal Ordinal;
1088 if (!SrcSigned && !DstSigned) {
1089 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1090 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1091 } else {
1092 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1093 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1094 }
1095
1096 llvm::Value *Check = nullptr;
1097 // 1. Extend the truncated value back to the same width as the Src.
1098 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1099 // 2. Equality-compare with the original source value
1100 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1101 // If the comparison result is 'i1 false', then the truncation was lossy.
1102 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Ordinal));
1103}
1104
1105static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1106 QualType SrcType, QualType DstType) {
1107 return SrcType->isIntegerType() && DstType->isIntegerType();
1108}
1109
1110void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1111 Value *Dst, QualType DstType,
1112 SourceLocation Loc) {
1113 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1114 return;
1115
1116 // We only care about int->int conversions here.
1117 // We ignore conversions to/from pointer and/or bool.
1118 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1119 DstType))
1120 return;
1121
1122 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1123 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1124 // This must be truncation. Else we do not care.
1125 if (SrcBits <= DstBits)
1126 return;
1127
1128 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1129
1130 // If the integer sign change sanitizer is enabled,
1131 // and we are truncating from larger unsigned type to smaller signed type,
1132 // let that next sanitizer deal with it.
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1136 (!SrcSigned && DstSigned))
1137 return;
1138
1139 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1140 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1141 Check;
1142
1143 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1144 {
1145 // We don't know the check kind until we call
1146 // EmitIntegerTruncationCheckHelper, but we want to annotate
1147 // EmitIntegerTruncationCheckHelper's instructions too.
1148 SanitizerDebugLocation SanScope(
1149 &CGF,
1150 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1151 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1152 CheckHandler);
1153 Check =
1154 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1155 // If the comparison result is 'i1 false', then the truncation was lossy.
1156 }
1157
1158 // Do we care about this type of truncation?
1159 if (!CGF.SanOpts.has(O: Check.second.second))
1160 return;
1161
1162 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1163
1164 // Does some SSCL ignore this type?
1165 if (CGF.getContext().isTypeIgnoredBySanitizer(
1166 Mask: SanitizerMask::bitPosToMask(Pos: Check.second.second), Ty: DstType))
1167 return;
1168
1169 llvm::Constant *StaticArgs[] = {
1170 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1171 CGF.EmitCheckTypeDescriptor(T: DstType),
1172 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1173 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1174
1175 CGF.EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1176}
1177
1178static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1179 const char *Name,
1180 CGBuilderTy &Builder) {
1181 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1182 llvm::Type *VTy = V->getType();
1183 if (!VSigned) {
1184 // If the value is unsigned, then it is never negative.
1185 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1186 }
1187 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1188 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1189 Name: llvm::Twine(Name) + "." + V->getName() +
1190 ".negativitycheck");
1191}
1192
1193// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1194// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1195static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1196 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1197EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1198 QualType DstType, CGBuilderTy &Builder) {
1199 llvm::Type *SrcTy = Src->getType();
1200 llvm::Type *DstTy = Dst->getType();
1201
1202 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1203 "non-integer llvm type");
1204
1205 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1206 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1207 (void)SrcSigned; // Only used in assert()
1208 (void)DstSigned; // Only used in assert()
1209 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1210 unsigned DstBits = DstTy->getScalarSizeInBits();
1211 (void)SrcBits; // Only used in assert()
1212 (void)DstBits; // Only used in assert()
1213
1214 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1215 "either the widths should be different, or the signednesses.");
1216
1217 // 1. Was the old Value negative?
1218 llvm::Value *SrcIsNegative =
1219 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1220 // 2. Is the new Value negative?
1221 llvm::Value *DstIsNegative =
1222 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1223 // 3. Now, was the 'negativity status' preserved during the conversion?
1224 // NOTE: conversion from negative to zero is considered to change the sign.
1225 // (We want to get 'false' when the conversion changed the sign)
1226 // So we should just equality-compare the negativity statuses.
1227 llvm::Value *Check = nullptr;
1228 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1229 // If the comparison result is 'false', then the conversion changed the sign.
1230 return std::make_pair(
1231 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1232 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitIntegerSignChange));
1233}
1234
1235void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1236 Value *Dst, QualType DstType,
1237 SourceLocation Loc) {
1238 if (!CGF.SanOpts.has(O: SanitizerKind::SO_ImplicitIntegerSignChange))
1239 return;
1240
1241 llvm::Type *SrcTy = Src->getType();
1242 llvm::Type *DstTy = Dst->getType();
1243
1244 // We only care about int->int conversions here.
1245 // We ignore conversions to/from pointer and/or bool.
1246 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1247 DstType))
1248 return;
1249
1250 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1251 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1252 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1253 unsigned DstBits = DstTy->getScalarSizeInBits();
1254
1255 // Now, we do not need to emit the check in *all* of the cases.
1256 // We can avoid emitting it in some obvious cases where it would have been
1257 // dropped by the opt passes (instcombine) always anyways.
1258 // If it's a cast between effectively the same type, no check.
1259 // NOTE: this is *not* equivalent to checking the canonical types.
1260 if (SrcSigned == DstSigned && SrcBits == DstBits)
1261 return;
1262 // At least one of the values needs to have signed type.
1263 // If both are unsigned, then obviously, neither of them can be negative.
1264 if (!SrcSigned && !DstSigned)
1265 return;
1266 // If the conversion is to *larger* *signed* type, then no check is needed.
1267 // Because either sign-extension happens (so the sign will remain),
1268 // or zero-extension will happen (the sign bit will be zero.)
1269 if ((DstBits > SrcBits) && DstSigned)
1270 return;
1271 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1272 (SrcBits > DstBits) && SrcSigned) {
1273 // If the signed integer truncation sanitizer is enabled,
1274 // and this is a truncation from signed type, then no check is needed.
1275 // Because here sign change check is interchangeable with truncation check.
1276 return;
1277 }
1278 // Does an SSCL have an entry for the DstType under its respective sanitizer
1279 // section?
1280 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1281 Mask: SanitizerKind::ImplicitSignedIntegerTruncation, Ty: DstType))
1282 return;
1283 if (!DstSigned &&
1284 CGF.getContext().isTypeIgnoredBySanitizer(
1285 Mask: SanitizerKind::ImplicitUnsignedIntegerTruncation, Ty: DstType))
1286 return;
1287 // That's it. We can't rule out any more cases with the data we have.
1288
1289 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1290 SanitizerDebugLocation SanScope(
1291 &CGF,
1292 {SanitizerKind::SO_ImplicitIntegerSignChange,
1293 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1294 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1295 CheckHandler);
1296
1297 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1298 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1299 Check;
1300
1301 // Each of these checks needs to return 'false' when an issue was detected.
1302 ImplicitConversionCheckKind CheckKind;
1303 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1304 2>
1305 Checks;
1306 // So we can 'and' all the checks together, and still get 'false',
1307 // if at least one of the checks detected an issue.
1308
1309 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1310 CheckKind = Check.first;
1311 Checks.emplace_back(Args&: Check.second);
1312
1313 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1314 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1315 // If the signed integer truncation sanitizer was enabled,
1316 // and we are truncating from larger unsigned type to smaller signed type,
1317 // let's handle the case we skipped in that check.
1318 Check =
1319 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1320 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1321 Checks.emplace_back(Args&: Check.second);
1322 // If the comparison result is 'i1 false', then the truncation was lossy.
1323 }
1324
1325 llvm::Constant *StaticArgs[] = {
1326 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1327 CGF.EmitCheckTypeDescriptor(T: DstType),
1328 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1329 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1330 // EmitCheck() will 'and' all the checks together.
1331 CGF.EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1332}
1333
1334// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1335// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1336static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1337 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1338EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1339 QualType DstType, CGBuilderTy &Builder) {
1340 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1341 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1342
1343 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1344 if (!SrcSigned && !DstSigned)
1345 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1346 else
1347 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1348
1349 llvm::Value *Check = nullptr;
1350 // 1. Extend the truncated value back to the same width as the Src.
1351 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1352 // 2. Equality-compare with the original source value
1353 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1354 // If the comparison result is 'i1 false', then the truncation was lossy.
1355
1356 return std::make_pair(
1357 x&: Kind,
1358 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1359}
1360
1361// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1362// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1363static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1364 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1365EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1366 QualType DstType, CGBuilderTy &Builder) {
1367 // 1. Was the old Value negative?
1368 llvm::Value *SrcIsNegative =
1369 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1370 // 2. Is the new Value negative?
1371 llvm::Value *DstIsNegative =
1372 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1373 // 3. Now, was the 'negativity status' preserved during the conversion?
1374 // NOTE: conversion from negative to zero is considered to change the sign.
1375 // (We want to get 'false' when the conversion changed the sign)
1376 // So we should just equality-compare the negativity statuses.
1377 llvm::Value *Check = nullptr;
1378 Check =
1379 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1380 // If the comparison result is 'false', then the conversion changed the sign.
1381 return std::make_pair(
1382 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1383 y: std::make_pair(x&: Check, y: SanitizerKind::SO_ImplicitBitfieldConversion));
1384}
1385
1386void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1387 Value *Dst, QualType DstType,
1388 const CGBitFieldInfo &Info,
1389 SourceLocation Loc) {
1390
1391 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1392 return;
1393
1394 // We only care about int->int conversions here.
1395 // We ignore conversions to/from pointer and/or bool.
1396 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1397 DstType))
1398 return;
1399
1400 if (DstType->isBooleanType() || SrcType->isBooleanType())
1401 return;
1402
1403 // This should be truncation of integral types.
1404 assert(isa<llvm::IntegerType>(Src->getType()) &&
1405 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1406
1407 // TODO: Calculate src width to avoid emitting code
1408 // for unecessary cases.
1409 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1410 unsigned DstBits = Info.Size;
1411
1412 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1413 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1414
1415 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1416 SanitizerDebugLocation SanScope(
1417 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1418
1419 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1420 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1421 Check;
1422
1423 // Truncation
1424 bool EmitTruncation = DstBits < SrcBits;
1425 // If Dst is signed and Src unsigned, we want to be more specific
1426 // about the CheckKind we emit, in this case we want to emit
1427 // ICCK_SignedIntegerTruncationOrSignChange.
1428 bool EmitTruncationFromUnsignedToSigned =
1429 EmitTruncation && DstSigned && !SrcSigned;
1430 // Sign change
1431 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1432 bool BothUnsigned = !SrcSigned && !DstSigned;
1433 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1434 // We can avoid emitting sign change checks in some obvious cases
1435 // 1. If Src and Dst have the same signedness and size
1436 // 2. If both are unsigned sign check is unecessary!
1437 // 3. If Dst is signed and bigger than Src, either
1438 // sign-extension or zero-extension will make sure
1439 // the sign remains.
1440 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1441
1442 if (EmitTruncation)
1443 Check =
1444 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1445 else if (EmitSignChange) {
1446 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1447 "either the widths should be different, or the signednesses.");
1448 Check =
1449 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1450 } else
1451 return;
1452
1453 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1454 if (EmitTruncationFromUnsignedToSigned)
1455 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1456
1457 llvm::Constant *StaticArgs[] = {
1458 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1459 EmitCheckTypeDescriptor(T: DstType),
1460 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1461 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1462
1463 EmitCheck(Checked: Check.second, Check: CheckHandler, StaticArgs, DynamicArgs: {Src, Dst});
1464}
1465
1466Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1467 QualType DstType, llvm::Type *SrcTy,
1468 llvm::Type *DstTy,
1469 ScalarConversionOpts Opts) {
1470 // The Element types determine the type of cast to perform.
1471 llvm::Type *SrcElementTy;
1472 llvm::Type *DstElementTy;
1473 QualType SrcElementType;
1474 QualType DstElementType;
1475 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1476 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1477 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1478 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1479 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1480 } else {
1481 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1482 "cannot cast between matrix and non-matrix types");
1483 SrcElementTy = SrcTy;
1484 DstElementTy = DstTy;
1485 SrcElementType = SrcType;
1486 DstElementType = DstType;
1487 }
1488
1489 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1490 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1491 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1492 InputSigned = true;
1493 }
1494
1495 if (isa<llvm::IntegerType>(Val: DstElementTy))
1496 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1497 if (InputSigned)
1498 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1499 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1500 }
1501
1502 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1503 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1504 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1505
1506 // If we can't recognize overflow as undefined behavior, assume that
1507 // overflow saturates. This protects against normal optimizations if we are
1508 // compiling with non-standard FP semantics.
1509 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1510 llvm::Intrinsic::ID IID =
1511 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1512 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1513 }
1514
1515 if (IsSigned)
1516 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1517 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1518 }
1519
1520 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1521 Value *FloatVal = Builder.CreateFPExt(V: Src, DestTy: Builder.getFloatTy(), Name: "fpext");
1522 return Builder.CreateFPTrunc(V: FloatVal, DestTy: DstTy, Name: "fptrunc");
1523 }
1524 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1525 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1526 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1527}
1528
1529/// Emit a conversion from the specified type to the specified destination type,
1530/// both of which are LLVM scalar types.
1531Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1532 QualType DstType,
1533 SourceLocation Loc,
1534 ScalarConversionOpts Opts) {
1535 // All conversions involving fixed point types should be handled by the
1536 // EmitFixedPoint family functions. This is done to prevent bloating up this
1537 // function more, and although fixed point numbers are represented by
1538 // integers, we do not want to follow any logic that assumes they should be
1539 // treated as integers.
1540 // TODO(leonardchan): When necessary, add another if statement checking for
1541 // conversions to fixed point types from other types.
1542 if (SrcType->isFixedPointType()) {
1543 if (DstType->isBooleanType())
1544 // It is important that we check this before checking if the dest type is
1545 // an integer because booleans are technically integer types.
1546 // We do not need to check the padding bit on unsigned types if unsigned
1547 // padding is enabled because overflow into this bit is undefined
1548 // behavior.
1549 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1550 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1551 DstType->isRealFloatingType())
1552 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1553
1554 llvm_unreachable(
1555 "Unhandled scalar conversion from a fixed point type to another type.");
1556 } else if (DstType->isFixedPointType()) {
1557 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1558 // This also includes converting booleans and enums to fixed point types.
1559 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1560
1561 llvm_unreachable(
1562 "Unhandled scalar conversion to a fixed point type from another type.");
1563 }
1564
1565 QualType NoncanonicalSrcType = SrcType;
1566 QualType NoncanonicalDstType = DstType;
1567
1568 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1569 DstType = CGF.getContext().getCanonicalType(T: DstType);
1570 if (SrcType == DstType) return Src;
1571
1572 if (DstType->isVoidType()) return nullptr;
1573
1574 llvm::Value *OrigSrc = Src;
1575 QualType OrigSrcType = SrcType;
1576 llvm::Type *SrcTy = Src->getType();
1577
1578 // Handle conversions to bool first, they are special: comparisons against 0.
1579 if (DstType->isBooleanType())
1580 return EmitConversionToBool(Src, SrcType);
1581
1582 llvm::Type *DstTy = ConvertType(T: DstType);
1583
1584 // Cast from half through float if half isn't a native type.
1585 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1586 // Cast to FP using the intrinsic if the half type itself isn't supported.
1587 if (DstTy->isFloatingPointTy()) {
1588 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1589 return Builder.CreateCall(
1590 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16, Tys: DstTy),
1591 Args: Src);
1592 } else {
1593 // Cast to other types through float, using either the intrinsic or FPExt,
1594 // depending on whether the half type itself is supported
1595 // (as opposed to operations on half, available with NativeHalfType).
1596 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1597 Src = Builder.CreateCall(
1598 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
1599 Tys: CGF.CGM.FloatTy),
1600 Args: Src);
1601 } else {
1602 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1603 }
1604 SrcType = CGF.getContext().FloatTy;
1605 SrcTy = CGF.FloatTy;
1606 }
1607 }
1608
1609 // Ignore conversions like int -> uint.
1610 if (SrcTy == DstTy) {
1611 if (Opts.EmitImplicitIntegerSignChangeChecks)
1612 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1613 DstType: NoncanonicalDstType, Loc);
1614
1615 return Src;
1616 }
1617
1618 // Handle pointer conversions next: pointers can only be converted to/from
1619 // other pointers and integers. Check for pointer types in terms of LLVM, as
1620 // some native types (like Obj-C id) may map to a pointer type.
1621 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1622 // The source value may be an integer, or a pointer.
1623 if (isa<llvm::PointerType>(Val: SrcTy))
1624 return Src;
1625
1626 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1627 // First, convert to the correct width so that we control the kind of
1628 // extension.
1629 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1630 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1631 llvm::Value* IntResult =
1632 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1633 // Then, cast to pointer.
1634 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1635 }
1636
1637 if (isa<llvm::PointerType>(Val: SrcTy)) {
1638 // Must be an ptr to int cast.
1639 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1640 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1641 }
1642
1643 // A scalar can be splatted to an extended vector of the same element type
1644 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1645 // Sema should add casts to make sure that the source expression's type is
1646 // the same as the vector's element type (sans qualifiers)
1647 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1648 SrcType.getTypePtr() &&
1649 "Splatted expr doesn't match with vector element type?");
1650
1651 // Splat the element across to all elements
1652 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1653 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1654 }
1655
1656 if (SrcType->isMatrixType() && DstType->isMatrixType())
1657 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1658
1659 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1660 // Allow bitcast from vector to integer/fp of the same size.
1661 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1662 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1663 if (SrcSize == DstSize)
1664 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1665
1666 // Conversions between vectors of different sizes are not allowed except
1667 // when vectors of half are involved. Operations on storage-only half
1668 // vectors require promoting half vector operands to float vectors and
1669 // truncating the result, which is either an int or float vector, to a
1670 // short or half vector.
1671
1672 // Source and destination are both expected to be vectors.
1673 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1674 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1675 (void)DstElementTy;
1676
1677 assert(((SrcElementTy->isIntegerTy() &&
1678 DstElementTy->isIntegerTy()) ||
1679 (SrcElementTy->isFloatingPointTy() &&
1680 DstElementTy->isFloatingPointTy())) &&
1681 "unexpected conversion between a floating-point vector and an "
1682 "integer vector");
1683
1684 // Truncate an i32 vector to an i16 vector.
1685 if (SrcElementTy->isIntegerTy())
1686 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1687
1688 // Truncate a float vector to a half vector.
1689 if (SrcSize > DstSize)
1690 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1691
1692 // Promote a half vector to a float vector.
1693 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1694 }
1695
1696 // Finally, we have the arithmetic types: real int/float.
1697 Value *Res = nullptr;
1698 llvm::Type *ResTy = DstTy;
1699
1700 // An overflowing conversion has undefined behavior if either the source type
1701 // or the destination type is a floating-point type. However, we consider the
1702 // range of representable values for all floating-point types to be
1703 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1704 // floating-point type.
1705 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1706 OrigSrcType->isFloatingType())
1707 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1708 Loc);
1709
1710 // Cast to half through float if half isn't a native type.
1711 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1712 // Make sure we cast in a single step if from another FP type.
1713 if (SrcTy->isFloatingPointTy()) {
1714 // Use the intrinsic if the half type itself isn't supported
1715 // (as opposed to operations on half, available with NativeHalfType).
1716 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1717 return Builder.CreateCall(
1718 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: SrcTy), Args: Src);
1719 // If the half type is supported, just use an fptrunc.
1720 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1721 }
1722 DstTy = CGF.FloatTy;
1723 }
1724
1725 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1726
1727 if (DstTy != ResTy) {
1728 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1729 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1730 Res = Builder.CreateCall(
1731 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: CGF.CGM.FloatTy),
1732 Args: Res);
1733 } else {
1734 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1735 }
1736 }
1737
1738 if (Opts.EmitImplicitIntegerTruncationChecks)
1739 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1740 DstType: NoncanonicalDstType, Loc);
1741
1742 if (Opts.EmitImplicitIntegerSignChangeChecks)
1743 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1744 DstType: NoncanonicalDstType, Loc);
1745
1746 return Res;
1747}
1748
1749Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1750 QualType DstTy,
1751 SourceLocation Loc) {
1752 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1753 llvm::Value *Result;
1754 if (SrcTy->isRealFloatingType())
1755 Result = FPBuilder.CreateFloatingToFixed(Src,
1756 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1757 else if (DstTy->isRealFloatingType())
1758 Result = FPBuilder.CreateFixedToFloating(Src,
1759 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1760 DstTy: ConvertType(T: DstTy));
1761 else {
1762 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1763 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1764
1765 if (DstTy->isIntegerType())
1766 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1767 DstWidth: DstFPSema.getWidth(),
1768 DstIsSigned: DstFPSema.isSigned());
1769 else if (SrcTy->isIntegerType())
1770 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1771 DstSema: DstFPSema);
1772 else
1773 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1774 }
1775 return Result;
1776}
1777
1778/// Emit a conversion from the specified complex type to the specified
1779/// destination type, where the destination type is an LLVM scalar type.
1780Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1781 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1782 SourceLocation Loc) {
1783 // Get the source element type.
1784 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1785
1786 // Handle conversions to bool first, they are special: comparisons against 0.
1787 if (DstTy->isBooleanType()) {
1788 // Complex != 0 -> (Real != 0) | (Imag != 0)
1789 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1790 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1791 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1792 }
1793
1794 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1795 // the imaginary part of the complex value is discarded and the value of the
1796 // real part is converted according to the conversion rules for the
1797 // corresponding real type.
1798 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1799}
1800
1801Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1802 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1803}
1804
1805/// Emit a sanitization check for the given "binary" operation (which
1806/// might actually be a unary increment which has been lowered to a binary
1807/// operation). The check passes if all values in \p Checks (which are \c i1),
1808/// are \c true.
1809void ScalarExprEmitter::EmitBinOpCheck(
1810 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1811 const BinOpInfo &Info) {
1812 assert(CGF.IsSanitizerScope);
1813 SanitizerHandler Check;
1814 SmallVector<llvm::Constant *, 4> StaticData;
1815 SmallVector<llvm::Value *, 2> DynamicData;
1816
1817 BinaryOperatorKind Opcode = Info.Opcode;
1818 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1819 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1820
1821 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1822 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1823 if (UO && UO->getOpcode() == UO_Minus) {
1824 Check = SanitizerHandler::NegateOverflow;
1825 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1826 DynamicData.push_back(Elt: Info.RHS);
1827 } else {
1828 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1829 // Shift LHS negative or too large, or RHS out of bounds.
1830 Check = SanitizerHandler::ShiftOutOfBounds;
1831 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1832 StaticData.push_back(
1833 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1834 StaticData.push_back(
1835 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1836 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1837 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1838 Check = SanitizerHandler::DivremOverflow;
1839 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1840 } else {
1841 // Arithmetic overflow (+, -, *).
1842 switch (Opcode) {
1843 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1844 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1845 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1846 default: llvm_unreachable("unexpected opcode for bin op check");
1847 }
1848 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1849 }
1850 DynamicData.push_back(Elt: Info.LHS);
1851 DynamicData.push_back(Elt: Info.RHS);
1852 }
1853
1854 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1855}
1856
1857//===----------------------------------------------------------------------===//
1858// Visitor Methods
1859//===----------------------------------------------------------------------===//
1860
1861Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1862 CGF.ErrorUnsupported(S: E, Type: "scalar expression");
1863 if (E->getType()->isVoidType())
1864 return nullptr;
1865 return llvm::PoisonValue::get(T: CGF.ConvertType(T: E->getType()));
1866}
1867
1868Value *
1869ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1870 ASTContext &Context = CGF.getContext();
1871 unsigned AddrSpace =
1872 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1873 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1874 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1875
1876 llvm::Type *ExprTy = ConvertType(T: E->getType());
1877 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1878 Name: "usn_addr_cast");
1879}
1880
1881Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1882 assert(E->getDataElementCount() == 1);
1883 auto It = E->begin();
1884 return Builder.getInt(AI: (*It)->getValue());
1885}
1886
1887Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1888 // Vector Mask Case
1889 if (E->getNumSubExprs() == 2) {
1890 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1891 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1892 Value *Mask;
1893
1894 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1895 unsigned LHSElts = LTy->getNumElements();
1896
1897 Mask = RHS;
1898
1899 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1900
1901 // Mask off the high bits of each shuffle index.
1902 Value *MaskBits =
1903 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1904 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1905
1906 // newv = undef
1907 // mask = mask & maskbits
1908 // for each elt
1909 // n = extract mask i
1910 // x = extract val n
1911 // newv = insert newv, x, i
1912 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1913 NumElts: MTy->getNumElements());
1914 Value* NewV = llvm::PoisonValue::get(T: RTy);
1915 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1916 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1917 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1918
1919 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1920 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1921 }
1922 return NewV;
1923 }
1924
1925 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1926 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1927
1928 SmallVector<int, 32> Indices;
1929 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1930 llvm::APSInt Idx = E->getShuffleMaskIdx(N: i - 2);
1931 // Check for -1 and output it as undef in the IR.
1932 if (Idx.isSigned() && Idx.isAllOnes())
1933 Indices.push_back(Elt: -1);
1934 else
1935 Indices.push_back(Elt: Idx.getZExtValue());
1936 }
1937
1938 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1939}
1940
1941Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1942 QualType SrcType = E->getSrcExpr()->getType(),
1943 DstType = E->getType();
1944
1945 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1946
1947 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1948 DstType = CGF.getContext().getCanonicalType(T: DstType);
1949 if (SrcType == DstType) return Src;
1950
1951 assert(SrcType->isVectorType() &&
1952 "ConvertVector source type must be a vector");
1953 assert(DstType->isVectorType() &&
1954 "ConvertVector destination type must be a vector");
1955
1956 llvm::Type *SrcTy = Src->getType();
1957 llvm::Type *DstTy = ConvertType(T: DstType);
1958
1959 // Ignore conversions like int -> uint.
1960 if (SrcTy == DstTy)
1961 return Src;
1962
1963 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1964 DstEltType = DstType->castAs<VectorType>()->getElementType();
1965
1966 assert(SrcTy->isVectorTy() &&
1967 "ConvertVector source IR type must be a vector");
1968 assert(DstTy->isVectorTy() &&
1969 "ConvertVector destination IR type must be a vector");
1970
1971 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1972 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1973
1974 if (DstEltType->isBooleanType()) {
1975 assert((SrcEltTy->isFloatingPointTy() ||
1976 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1977
1978 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1979 if (SrcEltTy->isFloatingPointTy()) {
1980 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1981 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1982 } else {
1983 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1984 }
1985 }
1986
1987 // We have the arithmetic types: real int/float.
1988 Value *Res = nullptr;
1989
1990 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1991 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1992 if (isa<llvm::IntegerType>(Val: DstEltTy))
1993 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1994 else {
1995 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
1996 if (InputSigned)
1997 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1998 else
1999 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
2000 }
2001 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
2002 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2003 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2004 if (DstEltType->isSignedIntegerOrEnumerationType())
2005 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
2006 else
2007 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
2008 } else {
2009 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2010 "Unknown real conversion");
2011 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2012 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2013 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
2014 else
2015 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
2016 }
2017
2018 return Res;
2019}
2020
2021Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2022 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
2023 CGF.EmitIgnoredExpr(E: E->getBase());
2024 return CGF.emitScalarConstant(Constant, E);
2025 } else {
2026 Expr::EvalResult Result;
2027 if (E->EvaluateAsInt(Result, Ctx: CGF.getContext(), AllowSideEffects: Expr::SE_AllowSideEffects)) {
2028 llvm::APSInt Value = Result.Val.getInt();
2029 CGF.EmitIgnoredExpr(E: E->getBase());
2030 return Builder.getInt(AI: Value);
2031 }
2032 }
2033
2034 llvm::Value *Result = EmitLoadOfLValue(E);
2035
2036 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2037 // debug info for the pointer, even if there is no variable associated with
2038 // the pointer's expression.
2039 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2040 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Val: Result)) {
2041 if (llvm::GetElementPtrInst *GEP =
2042 dyn_cast<llvm::GetElementPtrInst>(Val: Load->getPointerOperand())) {
2043 if (llvm::Instruction *Pointer =
2044 dyn_cast<llvm::Instruction>(Val: GEP->getPointerOperand())) {
2045 QualType Ty = E->getBase()->getType();
2046 if (!E->isArrow())
2047 Ty = CGF.getContext().getPointerType(T: Ty);
2048 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Value: Pointer, Ty);
2049 }
2050 }
2051 }
2052 }
2053 return Result;
2054}
2055
2056Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2057 TestAndClearIgnoreResultAssign();
2058
2059 // Emit subscript expressions in rvalue context's. For most cases, this just
2060 // loads the lvalue formed by the subscript expr. However, we have to be
2061 // careful, because the base of a vector subscript is occasionally an rvalue,
2062 // so we can't get it as an lvalue.
2063 if (!E->getBase()->getType()->isVectorType() &&
2064 !E->getBase()->getType()->isSveVLSBuiltinType())
2065 return EmitLoadOfLValue(E);
2066
2067 // Handle the vector case. The base must be a vector, the index must be an
2068 // integer value.
2069 Value *Base = Visit(E: E->getBase());
2070 Value *Idx = Visit(E: E->getIdx());
2071 QualType IdxTy = E->getIdx()->getType();
2072
2073 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
2074 CGF.EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, /*Accessed*/true);
2075
2076 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
2077}
2078
2079Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2080 TestAndClearIgnoreResultAssign();
2081
2082 // Handle the vector case. The base must be a vector, the index must be an
2083 // integer value.
2084 Value *RowIdx = CGF.EmitMatrixIndexExpr(E: E->getRowIdx());
2085 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E: E->getColumnIdx());
2086
2087 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2088 unsigned NumRows = MatrixTy->getNumRows();
2089 llvm::MatrixBuilder MB(Builder);
2090 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2091 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2092 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
2093
2094 Value *Matrix = Visit(E: E->getBase());
2095
2096 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2097 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
2098}
2099
2100static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2101 unsigned Off) {
2102 int MV = SVI->getMaskValue(Elt: Idx);
2103 if (MV == -1)
2104 return -1;
2105 return Off + MV;
2106}
2107
2108static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2109 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2110 "Index operand too large for shufflevector mask!");
2111 return C->getZExtValue();
2112}
2113
2114Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2115 bool Ignore = TestAndClearIgnoreResultAssign();
2116 (void)Ignore;
2117 assert (Ignore == false && "init list ignored");
2118 unsigned NumInitElements = E->getNumInits();
2119
2120 // HLSL initialization lists in the AST are an expansion which can contain
2121 // side-effecting expressions wrapped in opaque value expressions. To properly
2122 // emit these we need to emit the opaque values before we emit the argument
2123 // expressions themselves. This is a little hacky, but it prevents us needing
2124 // to do a bigger AST-level change for a language feature that we need
2125 // deprecate in the near future. See related HLSL language proposals in the
2126 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2127 // * 0005-strict-initializer-lists.md
2128 // * 0032-constructors.md
2129 if (CGF.getLangOpts().HLSL)
2130 CGF.CGM.getHLSLRuntime().emitInitListOpaqueValues(CGF, E);
2131
2132 if (E->hadArrayRangeDesignator())
2133 CGF.ErrorUnsupported(S: E, Type: "GNU array range designator extension");
2134
2135 llvm::VectorType *VType =
2136 dyn_cast<llvm::VectorType>(Val: ConvertType(T: E->getType()));
2137
2138 if (!VType) {
2139 if (NumInitElements == 0) {
2140 // C++11 value-initialization for the scalar.
2141 return EmitNullValue(Ty: E->getType());
2142 }
2143 // We have a scalar in braces. Just use the first element.
2144 return Visit(E: E->getInit(Init: 0));
2145 }
2146
2147 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2148 if (NumInitElements == 0) {
2149 // C++11 value-initialization for the vector.
2150 return EmitNullValue(Ty: E->getType());
2151 }
2152
2153 if (NumInitElements == 1) {
2154 Expr *InitVector = E->getInit(Init: 0);
2155
2156 // Initialize from another scalable vector of the same type.
2157 if (InitVector->getType().getCanonicalType() ==
2158 E->getType().getCanonicalType())
2159 return Visit(E: InitVector);
2160 }
2161
2162 llvm_unreachable("Unexpected initialization of a scalable vector!");
2163 }
2164
2165 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2166
2167 // Loop over initializers collecting the Value for each, and remembering
2168 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2169 // us to fold the shuffle for the swizzle into the shuffle for the vector
2170 // initializer, since LLVM optimizers generally do not want to touch
2171 // shuffles.
2172 unsigned CurIdx = 0;
2173 bool VIsPoisonShuffle = false;
2174 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2175 for (unsigned i = 0; i != NumInitElements; ++i) {
2176 Expr *IE = E->getInit(Init: i);
2177 Value *Init = Visit(E: IE);
2178 SmallVector<int, 16> Args;
2179
2180 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2181
2182 // Handle scalar elements. If the scalar initializer is actually one
2183 // element of a different vector of the same width, use shuffle instead of
2184 // extract+insert.
2185 if (!VVT) {
2186 if (isa<ExtVectorElementExpr>(Val: IE)) {
2187 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2188
2189 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2190 ->getNumElements() == ResElts) {
2191 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2192 Value *LHS = nullptr, *RHS = nullptr;
2193 if (CurIdx == 0) {
2194 // insert into poison -> shuffle (src, poison)
2195 // shufflemask must use an i32
2196 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2197 Args.resize(N: ResElts, NV: -1);
2198
2199 LHS = EI->getVectorOperand();
2200 RHS = V;
2201 VIsPoisonShuffle = true;
2202 } else if (VIsPoisonShuffle) {
2203 // insert into poison shuffle && size match -> shuffle (v, src)
2204 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2205 for (unsigned j = 0; j != CurIdx; ++j)
2206 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2207 Args.push_back(Elt: ResElts + C->getZExtValue());
2208 Args.resize(N: ResElts, NV: -1);
2209
2210 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2211 RHS = EI->getVectorOperand();
2212 VIsPoisonShuffle = false;
2213 }
2214 if (!Args.empty()) {
2215 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2216 ++CurIdx;
2217 continue;
2218 }
2219 }
2220 }
2221 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
2222 Name: "vecinit");
2223 VIsPoisonShuffle = false;
2224 ++CurIdx;
2225 continue;
2226 }
2227
2228 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2229
2230 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2231 // input is the same width as the vector being constructed, generate an
2232 // optimized shuffle of the swizzle input into the result.
2233 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2234 if (isa<ExtVectorElementExpr>(Val: IE)) {
2235 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2236 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2237 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2238
2239 if (OpTy->getNumElements() == ResElts) {
2240 for (unsigned j = 0; j != CurIdx; ++j) {
2241 // If the current vector initializer is a shuffle with poison, merge
2242 // this shuffle directly into it.
2243 if (VIsPoisonShuffle) {
2244 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2245 } else {
2246 Args.push_back(Elt: j);
2247 }
2248 }
2249 for (unsigned j = 0, je = InitElts; j != je; ++j)
2250 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2251 Args.resize(N: ResElts, NV: -1);
2252
2253 if (VIsPoisonShuffle)
2254 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2255
2256 Init = SVOp;
2257 }
2258 }
2259
2260 // Extend init to result vector length, and then shuffle its contribution
2261 // to the vector initializer into V.
2262 if (Args.empty()) {
2263 for (unsigned j = 0; j != InitElts; ++j)
2264 Args.push_back(Elt: j);
2265 Args.resize(N: ResElts, NV: -1);
2266 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2267
2268 Args.clear();
2269 for (unsigned j = 0; j != CurIdx; ++j)
2270 Args.push_back(Elt: j);
2271 for (unsigned j = 0; j != InitElts; ++j)
2272 Args.push_back(Elt: j + Offset);
2273 Args.resize(N: ResElts, NV: -1);
2274 }
2275
2276 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2277 // merging subsequent shuffles into this one.
2278 if (CurIdx == 0)
2279 std::swap(a&: V, b&: Init);
2280 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2281 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2282 CurIdx += InitElts;
2283 }
2284
2285 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2286 // Emit remaining default initializers.
2287 llvm::Type *EltTy = VType->getElementType();
2288
2289 // Emit remaining default initializers
2290 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2291 Value *Idx = Builder.getInt32(C: CurIdx);
2292 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2293 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2294 }
2295 return V;
2296}
2297
2298static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D) {
2299 return !D->isWeak();
2300}
2301
2302static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2303 E = E->IgnoreParens();
2304
2305 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2306 if (UO->getOpcode() == UO_Deref)
2307 return CGF.isPointerKnownNonNull(E: UO->getSubExpr());
2308
2309 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E))
2310 return isDeclRefKnownNonNull(CGF, D: DRE->getDecl());
2311
2312 if (const auto *ME = dyn_cast<MemberExpr>(Val: E)) {
2313 if (isa<FieldDecl>(Val: ME->getMemberDecl()))
2314 return true;
2315 return isDeclRefKnownNonNull(CGF, D: ME->getMemberDecl());
2316 }
2317
2318 // Array subscripts? Anything else?
2319
2320 return false;
2321}
2322
2323bool CodeGenFunction::isPointerKnownNonNull(const Expr *E) {
2324 assert(E->getType()->isSignableType(getContext()));
2325
2326 E = E->IgnoreParens();
2327
2328 if (isa<CXXThisExpr>(Val: E))
2329 return true;
2330
2331 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2332 if (UO->getOpcode() == UO_AddrOf)
2333 return isLValueKnownNonNull(CGF&: *this, E: UO->getSubExpr());
2334
2335 if (const auto *CE = dyn_cast<CastExpr>(Val: E))
2336 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2337 CE->getCastKind() == CK_ArrayToPointerDecay)
2338 return isLValueKnownNonNull(CGF&: *this, E: CE->getSubExpr());
2339
2340 // Maybe honor __nonnull?
2341
2342 return false;
2343}
2344
2345bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2346 const Expr *E = CE->getSubExpr();
2347
2348 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2349 return false;
2350
2351 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2352 // We always assume that 'this' is never null.
2353 return false;
2354 }
2355
2356 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2357 // And that glvalue casts are never null.
2358 if (ICE->isGLValue())
2359 return false;
2360 }
2361
2362 return true;
2363}
2364
2365// RHS is an aggregate type
2366static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal,
2367 QualType RHSTy, QualType LHSTy,
2368 SourceLocation Loc) {
2369 SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList;
2370 SmallVector<QualType, 16> SrcTypes; // Flattened type
2371 CGF.FlattenAccessAndType(Addr: RHSVal, AddrTy: RHSTy, AccessList&: LoadGEPList, FlatTypes&: SrcTypes);
2372 // LHS is either a vector or a builtin?
2373 // if its a vector create a temp alloca to store into and return that
2374 if (auto *VecTy = LHSTy->getAs<VectorType>()) {
2375 assert(SrcTypes.size() >= VecTy->getNumElements() &&
2376 "Flattened type on RHS must have more elements than vector on LHS.");
2377 llvm::Value *V =
2378 CGF.Builder.CreateLoad(Addr: CGF.CreateIRTemp(T: LHSTy, Name: "flatcast.tmp"));
2379 // write to V.
2380 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2381 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[I].first, Name: "load");
2382 llvm::Value *Idx = LoadGEPList[I].second;
2383 Load = Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract")
2384 : Load;
2385 llvm::Value *Cast = CGF.EmitScalarConversion(
2386 Src: Load, SrcTy: SrcTypes[I], DstTy: VecTy->getElementType(), Loc);
2387 V = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx: I);
2388 }
2389 return V;
2390 }
2391 // i its a builtin just do an extract element or load.
2392 assert(LHSTy->isBuiltinType() &&
2393 "Destination type must be a vector or builtin type.");
2394 llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[0].first, Name: "load");
2395 llvm::Value *Idx = LoadGEPList[0].second;
2396 Load =
2397 Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract") : Load;
2398 return CGF.EmitScalarConversion(Src: Load, SrcTy: LHSTy, DstTy: SrcTypes[0], Loc);
2399}
2400
2401// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2402// have to handle a more broad range of conversions than explicit casts, as they
2403// handle things like function to ptr-to-function decay etc.
2404Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2405 Expr *E = CE->getSubExpr();
2406 QualType DestTy = CE->getType();
2407 CastKind Kind = CE->getCastKind();
2408 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2409
2410 // These cases are generally not written to ignore the result of
2411 // evaluating their sub-expressions, so we clear this now.
2412 bool Ignored = TestAndClearIgnoreResultAssign();
2413
2414 // Since almost all cast kinds apply to scalars, this switch doesn't have
2415 // a default case, so the compiler will warn on a missing case. The cases
2416 // are in the same order as in the CastKind enum.
2417 switch (Kind) {
2418 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2419 case CK_BuiltinFnToFnPtr:
2420 llvm_unreachable("builtin functions are handled elsewhere");
2421
2422 case CK_LValueBitCast:
2423 case CK_ObjCObjectLValueCast: {
2424 Address Addr = EmitLValue(E).getAddress();
2425 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2426 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2427 return EmitLoadOfLValue(LV, Loc: CE->getExprLoc());
2428 }
2429
2430 case CK_LValueToRValueBitCast: {
2431 LValue SourceLVal = CGF.EmitLValue(E);
2432 Address Addr =
2433 SourceLVal.getAddress().withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2434 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2435 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2436 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2437 }
2438
2439 case CK_CPointerToObjCPointerCast:
2440 case CK_BlockPointerToObjCPointerCast:
2441 case CK_AnyPointerToBlockPointerCast:
2442 case CK_BitCast: {
2443 Value *Src = Visit(E);
2444 llvm::Type *SrcTy = Src->getType();
2445 llvm::Type *DstTy = ConvertType(T: DestTy);
2446
2447 // FIXME: this is a gross but seemingly necessary workaround for an issue
2448 // manifesting when a target uses a non-default AS for indirect sret args,
2449 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2450 // on the address of a local struct that gets returned by value yields an
2451 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2452 // DefaultAS. We can only do this subversive thing because sret args are
2453 // manufactured and them residing in the IndirectAS is a target specific
2454 // detail, and doing an AS cast here still retains the semantics the user
2455 // expects. It is desirable to remove this iff a better solution is found.
2456 if (auto A = dyn_cast<llvm::Argument>(Val: Src); A && A->hasStructRetAttr())
2457 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2458 CGF, V: Src, SrcAddr: E->getType().getAddressSpace(), DestTy: DstTy);
2459
2460 assert(
2461 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2462 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2463 "Address-space cast must be used to convert address spaces");
2464
2465 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2466 if (auto *PT = DestTy->getAs<PointerType>()) {
2467 CGF.EmitVTablePtrCheckForCast(
2468 T: PT->getPointeeType(),
2469 Derived: Address(Src,
2470 CGF.ConvertTypeForMem(
2471 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2472 CGF.getPointerAlign()),
2473 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2474 Loc: CE->getBeginLoc());
2475 }
2476 }
2477
2478 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2479 const QualType SrcType = E->getType();
2480
2481 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2482 // Casting to pointer that could carry dynamic information (provided by
2483 // invariant.group) requires launder.
2484 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2485 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2486 // Casting to pointer that does not carry dynamic information (provided
2487 // by invariant.group) requires stripping it. Note that we don't do it
2488 // if the source could not be dynamic type and destination could be
2489 // dynamic because dynamic information is already laundered. It is
2490 // because launder(strip(src)) == launder(src), so there is no need to
2491 // add extra strip before launder.
2492 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2493 }
2494 }
2495
2496 // Update heapallocsite metadata when there is an explicit pointer cast.
2497 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2498 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2499 !isa<CastExpr>(Val: E)) {
2500 QualType PointeeType = DestTy->getPointeeType();
2501 if (!PointeeType.isNull())
2502 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2503 Loc: CE->getExprLoc());
2504 }
2505 }
2506
2507 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2508 // same element type, use the llvm.vector.insert intrinsic to perform the
2509 // bitcast.
2510 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2511 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: DstTy)) {
2512 // If we are casting a fixed i8 vector to a scalable i1 predicate
2513 // vector, use a vector insert and bitcast the result.
2514 if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2515 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2516 ScalableDstTy = llvm::ScalableVectorType::get(
2517 ElementType: FixedSrcTy->getElementType(),
2518 MinNumElts: llvm::divideCeil(
2519 Numerator: ScalableDstTy->getElementCount().getKnownMinValue(), Denominator: 8));
2520 }
2521 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2522 llvm::Value *PoisonVec = llvm::PoisonValue::get(T: ScalableDstTy);
2523 llvm::Value *Result = Builder.CreateInsertVector(
2524 DstType: ScalableDstTy, SrcVec: PoisonVec, SubVec: Src, Idx: uint64_t(0), Name: "cast.scalable");
2525 ScalableDstTy = cast<llvm::ScalableVectorType>(
2526 Val: llvm::VectorType::getWithSizeAndScalar(SizeTy: ScalableDstTy, EltTy: DstTy));
2527 if (Result->getType() != ScalableDstTy)
2528 Result = Builder.CreateBitCast(V: Result, DestTy: ScalableDstTy);
2529 if (Result->getType() != DstTy)
2530 Result = Builder.CreateExtractVector(DstType: DstTy, SrcVec: Result, Idx: uint64_t(0));
2531 return Result;
2532 }
2533 }
2534 }
2535
2536 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2537 // same element type, use the llvm.vector.extract intrinsic to perform the
2538 // bitcast.
2539 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2540 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: DstTy)) {
2541 // If we are casting a scalable i1 predicate vector to a fixed i8
2542 // vector, bitcast the source and use a vector extract.
2543 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2544 FixedDstTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2545 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8)) {
2546 ScalableSrcTy = llvm::ScalableVectorType::get(
2547 ElementType: ScalableSrcTy->getElementType(),
2548 MinNumElts: llvm::alignTo<8>(
2549 Value: ScalableSrcTy->getElementCount().getKnownMinValue()));
2550 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: ScalableSrcTy);
2551 Src = Builder.CreateInsertVector(DstType: ScalableSrcTy, SrcVec: ZeroVec, SubVec: Src,
2552 Idx: uint64_t(0));
2553 }
2554
2555 ScalableSrcTy = llvm::ScalableVectorType::get(
2556 ElementType: FixedDstTy->getElementType(),
2557 MinNumElts: ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2558 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2559 }
2560 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2561 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: uint64_t(0),
2562 Name: "cast.fixed");
2563 }
2564 }
2565
2566 // Perform VLAT <-> VLST bitcast through memory.
2567 // TODO: since the llvm.vector.{insert,extract} intrinsics
2568 // require the element types of the vectors to be the same, we
2569 // need to keep this around for bitcasts between VLAT <-> VLST where
2570 // the element types of the vectors are not the same, until we figure
2571 // out a better way of doing these casts.
2572 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2573 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2574 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2575 isa<llvm::FixedVectorType>(Val: DstTy))) {
2576 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2577 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2578 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2579 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2580 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2581 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2582 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2583 }
2584
2585 llvm::Value *Result = Builder.CreateBitCast(V: Src, DestTy: DstTy);
2586 return CGF.authPointerToPointerCast(ResultPtr: Result, SourceType: E->getType(), DestType: DestTy);
2587 }
2588 case CK_AddressSpaceConversion: {
2589 Expr::EvalResult Result;
2590 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2591 Result.Val.isNullPointer()) {
2592 // If E has side effect, it is emitted even if its final result is a
2593 // null pointer. In that case, a DCE pass should be able to
2594 // eliminate the useless instructions emitted during translating E.
2595 if (Result.HasSideEffects)
2596 Visit(E);
2597 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2598 Val: ConvertType(T: DestTy)), QT: DestTy);
2599 }
2600 // Since target may map different address spaces in AST to the same address
2601 // space, an address space conversion may end up as a bitcast.
2602 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2603 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2604 DestTy: ConvertType(T: DestTy));
2605 }
2606 case CK_AtomicToNonAtomic:
2607 case CK_NonAtomicToAtomic:
2608 case CK_UserDefinedConversion:
2609 return Visit(E);
2610
2611 case CK_NoOp: {
2612 return CE->changesVolatileQualification() ? EmitLoadOfLValue(E: CE) : Visit(E);
2613 }
2614
2615 case CK_BaseToDerived: {
2616 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2617 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2618
2619 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2620 Address Derived =
2621 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2622 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2623 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2624
2625 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2626 // performed and the object is not of the derived type.
2627 if (CGF.sanitizePerformTypeCheck())
2628 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DowncastPointer, Loc: CE->getExprLoc(),
2629 Addr: Derived, Type: DestTy->getPointeeType());
2630
2631 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2632 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2633 /*MayBeNull=*/true,
2634 TCK: CodeGenFunction::CFITCK_DerivedCast,
2635 Loc: CE->getBeginLoc());
2636
2637 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2638 }
2639 case CK_UncheckedDerivedToBase:
2640 case CK_DerivedToBase: {
2641 // The EmitPointerWithAlignment path does this fine; just discard
2642 // the alignment.
2643 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(Addr: CE),
2644 PointeeType: CE->getType()->getPointeeType());
2645 }
2646
2647 case CK_Dynamic: {
2648 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2649 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2650 return CGF.EmitDynamicCast(V, DCE);
2651 }
2652
2653 case CK_ArrayToPointerDecay:
2654 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2655 PointeeType: CE->getType()->getPointeeType());
2656 case CK_FunctionToPointerDecay:
2657 return EmitLValue(E).getPointer(CGF);
2658
2659 case CK_NullToPointer:
2660 if (MustVisitNullValue(E))
2661 CGF.EmitIgnoredExpr(E);
2662
2663 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2664 QT: DestTy);
2665
2666 case CK_NullToMemberPointer: {
2667 if (MustVisitNullValue(E))
2668 CGF.EmitIgnoredExpr(E);
2669
2670 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2671 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2672 }
2673
2674 case CK_ReinterpretMemberPointer:
2675 case CK_BaseToDerivedMemberPointer:
2676 case CK_DerivedToBaseMemberPointer: {
2677 Value *Src = Visit(E);
2678
2679 // Note that the AST doesn't distinguish between checked and
2680 // unchecked member pointer conversions, so we always have to
2681 // implement checked conversions here. This is inefficient when
2682 // actual control flow may be required in order to perform the
2683 // check, which it is for data member pointers (but not member
2684 // function pointers on Itanium and ARM).
2685 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2686 }
2687
2688 case CK_ARCProduceObject:
2689 return CGF.EmitARCRetainScalarExpr(expr: E);
2690 case CK_ARCConsumeObject:
2691 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2692 case CK_ARCReclaimReturnedObject:
2693 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2694 case CK_ARCExtendBlockObject:
2695 return CGF.EmitARCExtendBlockObject(expr: E);
2696
2697 case CK_CopyAndAutoreleaseBlockObject:
2698 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2699
2700 case CK_FloatingRealToComplex:
2701 case CK_FloatingComplexCast:
2702 case CK_IntegralRealToComplex:
2703 case CK_IntegralComplexCast:
2704 case CK_IntegralComplexToFloatingComplex:
2705 case CK_FloatingComplexToIntegralComplex:
2706 case CK_ConstructorConversion:
2707 case CK_ToUnion:
2708 case CK_HLSLArrayRValue:
2709 llvm_unreachable("scalar cast to non-scalar value");
2710
2711 case CK_LValueToRValue:
2712 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2713 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2714 return Visit(E);
2715
2716 case CK_IntegralToPointer: {
2717 Value *Src = Visit(E);
2718
2719 // First, convert to the correct width so that we control the kind of
2720 // extension.
2721 auto DestLLVMTy = ConvertType(T: DestTy);
2722 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2723 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2724 llvm::Value* IntResult =
2725 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2726
2727 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2728
2729 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2730 // Going from integer to pointer that could be dynamic requires reloading
2731 // dynamic information from invariant.group.
2732 if (DestTy.mayBeDynamicClass())
2733 IntToPtr = Builder.CreateLaunderInvariantGroup(Ptr: IntToPtr);
2734 }
2735
2736 IntToPtr = CGF.authPointerToPointerCast(ResultPtr: IntToPtr, SourceType: E->getType(), DestType: DestTy);
2737 return IntToPtr;
2738 }
2739 case CK_PointerToIntegral: {
2740 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2741 auto *PtrExpr = Visit(E);
2742
2743 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2744 const QualType SrcType = E->getType();
2745
2746 // Casting to integer requires stripping dynamic information as it does
2747 // not carries it.
2748 if (SrcType.mayBeDynamicClass())
2749 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2750 }
2751
2752 PtrExpr = CGF.authPointerToPointerCast(ResultPtr: PtrExpr, SourceType: E->getType(), DestType: DestTy);
2753 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2754 }
2755 case CK_ToVoid: {
2756 CGF.EmitIgnoredExpr(E);
2757 return nullptr;
2758 }
2759 case CK_MatrixCast: {
2760 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2761 Loc: CE->getExprLoc());
2762 }
2763 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2764 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2765 // To perform any necessary Scalar Cast, so this Cast can be handled
2766 // by the regular Vector Splat cast code.
2767 case CK_HLSLAggregateSplatCast:
2768 case CK_VectorSplat: {
2769 llvm::Type *DstTy = ConvertType(T: DestTy);
2770 Value *Elt = Visit(E);
2771 // Splat the element across to all elements
2772 llvm::ElementCount NumElements =
2773 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2774 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2775 }
2776
2777 case CK_FixedPointCast:
2778 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2779 Loc: CE->getExprLoc());
2780
2781 case CK_FixedPointToBoolean:
2782 assert(E->getType()->isFixedPointType() &&
2783 "Expected src type to be fixed point type");
2784 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2785 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2786 Loc: CE->getExprLoc());
2787
2788 case CK_FixedPointToIntegral:
2789 assert(E->getType()->isFixedPointType() &&
2790 "Expected src type to be fixed point type");
2791 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2792 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2793 Loc: CE->getExprLoc());
2794
2795 case CK_IntegralToFixedPoint:
2796 assert(E->getType()->isIntegerType() &&
2797 "Expected src type to be an integer");
2798 assert(DestTy->isFixedPointType() &&
2799 "Expected dest type to be fixed point type");
2800 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2801 Loc: CE->getExprLoc());
2802
2803 case CK_IntegralCast: {
2804 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2805 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2806 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
2807 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
2808 Name: "conv");
2809 }
2810 ScalarConversionOpts Opts;
2811 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2812 if (!ICE->isPartOfExplicitCast())
2813 Opts = ScalarConversionOpts(CGF.SanOpts);
2814 }
2815 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2816 Loc: CE->getExprLoc(), Opts);
2817 }
2818 case CK_IntegralToFloating: {
2819 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2820 // TODO: Support constrained FP intrinsics.
2821 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2822 if (SrcElTy->isSignedIntegerOrEnumerationType())
2823 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2824 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2825 }
2826 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2827 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2828 Loc: CE->getExprLoc());
2829 }
2830 case CK_FloatingToIntegral: {
2831 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2832 // TODO: Support constrained FP intrinsics.
2833 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2834 if (DstElTy->isSignedIntegerOrEnumerationType())
2835 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2836 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2837 }
2838 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2839 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2840 Loc: CE->getExprLoc());
2841 }
2842 case CK_FloatingCast: {
2843 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2844 // TODO: Support constrained FP intrinsics.
2845 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2846 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2847 if (DstElTy->castAs<BuiltinType>()->getKind() <
2848 SrcElTy->castAs<BuiltinType>()->getKind())
2849 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2850 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2851 }
2852 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2853 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2854 Loc: CE->getExprLoc());
2855 }
2856 case CK_FixedPointToFloating:
2857 case CK_FloatingToFixedPoint: {
2858 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2859 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2860 Loc: CE->getExprLoc());
2861 }
2862 case CK_BooleanToSignedIntegral: {
2863 ScalarConversionOpts Opts;
2864 Opts.TreatBooleanAsSigned = true;
2865 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2866 Loc: CE->getExprLoc(), Opts);
2867 }
2868 case CK_IntegralToBoolean:
2869 return EmitIntToBoolConversion(V: Visit(E));
2870 case CK_PointerToBoolean:
2871 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2872 case CK_FloatingToBoolean: {
2873 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2874 return EmitFloatToBoolConversion(V: Visit(E));
2875 }
2876 case CK_MemberPointerToBoolean: {
2877 llvm::Value *MemPtr = Visit(E);
2878 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2879 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2880 }
2881
2882 case CK_FloatingComplexToReal:
2883 case CK_IntegralComplexToReal:
2884 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2885
2886 case CK_FloatingComplexToBoolean:
2887 case CK_IntegralComplexToBoolean: {
2888 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2889
2890 // TODO: kill this function off, inline appropriate case here
2891 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2892 Loc: CE->getExprLoc());
2893 }
2894
2895 case CK_ZeroToOCLOpaqueType: {
2896 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2897 DestTy->isOCLIntelSubgroupAVCType()) &&
2898 "CK_ZeroToOCLEvent cast on non-event type");
2899 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2900 }
2901
2902 case CK_IntToOCLSampler:
2903 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2904
2905 case CK_HLSLVectorTruncation: {
2906 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2907 "Destination type must be a vector or builtin type.");
2908 Value *Vec = Visit(E);
2909 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2910 SmallVector<int> Mask;
2911 unsigned NumElts = VecTy->getNumElements();
2912 for (unsigned I = 0; I != NumElts; ++I)
2913 Mask.push_back(Elt: I);
2914
2915 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
2916 }
2917 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.SizeTy);
2918 return Builder.CreateExtractElement(Vec, Idx: Zero, Name: "cast.vtrunc");
2919 }
2920 case CK_HLSLElementwiseCast: {
2921 RValue RV = CGF.EmitAnyExpr(E);
2922 SourceLocation Loc = CE->getExprLoc();
2923 QualType SrcTy = E->getType();
2924
2925 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2926 // RHS is an aggregate
2927 Address SrcVal = RV.getAggregateAddress();
2928 return EmitHLSLElementwiseCast(CGF, RHSVal: SrcVal, RHSTy: SrcTy, LHSTy: DestTy, Loc);
2929 }
2930 } // end of switch
2931
2932 llvm_unreachable("unknown scalar cast");
2933}
2934
2935Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2936 CodeGenFunction::StmtExprEvaluation eval(CGF);
2937 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2938 GetLast: !E->getType()->isVoidType());
2939 if (!RetAlloca.isValid())
2940 return nullptr;
2941 return CGF.EmitLoadOfScalar(lvalue: CGF.MakeAddrLValue(Addr: RetAlloca, T: E->getType()),
2942 Loc: E->getExprLoc());
2943}
2944
2945Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2946 CodeGenFunction::RunCleanupsScope Scope(CGF);
2947 Value *V = Visit(E: E->getSubExpr());
2948 // Defend against dominance problems caused by jumps out of expression
2949 // evaluation through the shared cleanup block.
2950 Scope.ForceCleanup(ValuesToReload: {&V});
2951 return V;
2952}
2953
2954//===----------------------------------------------------------------------===//
2955// Unary Operators
2956//===----------------------------------------------------------------------===//
2957
2958static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2959 llvm::Value *InVal, bool IsInc,
2960 FPOptions FPFeatures) {
2961 BinOpInfo BinOp;
2962 BinOp.LHS = InVal;
2963 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2964 BinOp.Ty = E->getType();
2965 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2966 BinOp.FPFeatures = FPFeatures;
2967 BinOp.E = E;
2968 return BinOp;
2969}
2970
2971llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2972 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2973 llvm::Value *Amount =
2974 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2975 StringRef Name = IsInc ? "inc" : "dec";
2976 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2977 case LangOptions::SOB_Defined:
2978 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2979 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2980 [[fallthrough]];
2981 case LangOptions::SOB_Undefined:
2982 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2983 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2984 [[fallthrough]];
2985 case LangOptions::SOB_Trapping:
2986 BinOpInfo Info = createBinOpInfoFromIncDec(
2987 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
2988 if (!E->canOverflow() || CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Info))
2989 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2990 return EmitOverflowCheckedBinOp(Ops: Info);
2991 }
2992 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2993}
2994
2995/// For the purposes of overflow pattern exclusion, does this match the
2996/// "while(i--)" pattern?
2997static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
2998 bool isPre, ASTContext &Ctx) {
2999 if (isInc || isPre)
3000 return false;
3001
3002 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3003 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
3004 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
3005 return false;
3006
3007 // all Parents (usually just one) must be a WhileStmt
3008 for (const auto &Parent : Ctx.getParentMapContext().getParents(Node: *UO))
3009 if (!Parent.get<WhileStmt>())
3010 return false;
3011
3012 return true;
3013}
3014
3015namespace {
3016/// Handles check and update for lastprivate conditional variables.
3017class OMPLastprivateConditionalUpdateRAII {
3018private:
3019 CodeGenFunction &CGF;
3020 const UnaryOperator *E;
3021
3022public:
3023 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3024 const UnaryOperator *E)
3025 : CGF(CGF), E(E) {}
3026 ~OMPLastprivateConditionalUpdateRAII() {
3027 if (CGF.getLangOpts().OpenMP)
3028 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
3029 CGF, LHS: E->getSubExpr());
3030 }
3031};
3032} // namespace
3033
3034llvm::Value *
3035ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3036 bool isInc, bool isPre) {
3037 ApplyAtomGroup Grp(CGF.getDebugInfo());
3038 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3039 QualType type = E->getSubExpr()->getType();
3040 llvm::PHINode *atomicPHI = nullptr;
3041 llvm::Value *value;
3042 llvm::Value *input;
3043 llvm::Value *Previous = nullptr;
3044 QualType SrcType = E->getType();
3045
3046 int amount = (isInc ? 1 : -1);
3047 bool isSubtraction = !isInc;
3048
3049 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3050 type = atomicTy->getValueType();
3051 if (isInc && type->isBooleanType()) {
3052 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
3053 if (isPre) {
3054 Builder.CreateStore(Val: True, Addr: LV.getAddress(), IsVolatile: LV.isVolatileQualified())
3055 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3056 return Builder.getTrue();
3057 }
3058 // For atomic bool increment, we just store true and return it for
3059 // preincrement, do an atomic swap with true for postincrement
3060 return Builder.CreateAtomicRMW(
3061 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(), Val: True,
3062 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3063 }
3064 // Special case for atomic increment / decrement on integers, emit
3065 // atomicrmw instructions. We skip this if we want to be doing overflow
3066 // checking, and fall into the slow path with the atomic cmpxchg loop.
3067 if (!type->isBooleanType() && type->isIntegerType() &&
3068 !(type->isUnsignedIntegerType() &&
3069 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3070 CGF.getLangOpts().getSignedOverflowBehavior() !=
3071 LangOptions::SOB_Trapping) {
3072 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3073 llvm::AtomicRMWInst::Sub;
3074 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3075 llvm::Instruction::Sub;
3076 llvm::Value *amt = CGF.EmitToMemory(
3077 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
3078 llvm::Value *old =
3079 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
3080 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3081 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3082 }
3083 // Special case for atomic increment/decrement on floats.
3084 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3085 if (type->isFloatingType()) {
3086 llvm::Type *Ty = ConvertType(T: type);
3087 if (llvm::has_single_bit(Value: Ty->getScalarSizeInBits())) {
3088 llvm::AtomicRMWInst::BinOp aop =
3089 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3090 llvm::Instruction::BinaryOps op =
3091 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3092 llvm::Value *amt = llvm::ConstantFP::get(Ty, V: 1.0);
3093 llvm::AtomicRMWInst *old =
3094 CGF.emitAtomicRMWInst(Op: aop, Addr: LV.getAddress(), Val: amt,
3095 Order: llvm::AtomicOrdering::SequentiallyConsistent);
3096
3097 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
3098 }
3099 }
3100 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3101 input = value;
3102 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3103 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3104 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3105 value = CGF.EmitToMemory(Value: value, Ty: type);
3106 Builder.CreateBr(Dest: opBB);
3107 Builder.SetInsertPoint(opBB);
3108 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
3109 atomicPHI->addIncoming(V: value, BB: startBB);
3110 value = atomicPHI;
3111 } else {
3112 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
3113 input = value;
3114 }
3115
3116 // Special case of integer increment that we have to check first: bool++.
3117 // Due to promotion rules, we get:
3118 // bool++ -> bool = bool + 1
3119 // -> bool = (int)bool + 1
3120 // -> bool = ((int)bool + 1 != 0)
3121 // An interesting aspect of this is that increment is always true.
3122 // Decrement does not have this property.
3123 if (isInc && type->isBooleanType()) {
3124 value = Builder.getTrue();
3125
3126 // Most common case by far: integer increment.
3127 } else if (type->isIntegerType()) {
3128 QualType promotedType;
3129 bool canPerformLossyDemotionCheck = false;
3130
3131 bool excludeOverflowPattern =
3132 matchesPostDecrInWhile(UO: E, isInc, isPre, Ctx&: CGF.getContext());
3133
3134 if (CGF.getContext().isPromotableIntegerType(T: type)) {
3135 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
3136 assert(promotedType != type && "Shouldn't promote to the same type.");
3137 canPerformLossyDemotionCheck = true;
3138 canPerformLossyDemotionCheck &=
3139 CGF.getContext().getCanonicalType(T: type) !=
3140 CGF.getContext().getCanonicalType(T: promotedType);
3141 canPerformLossyDemotionCheck &=
3142 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
3143 SrcType: type, DstType: promotedType);
3144 assert((!canPerformLossyDemotionCheck ||
3145 type->isSignedIntegerOrEnumerationType() ||
3146 promotedType->isSignedIntegerOrEnumerationType() ||
3147 ConvertType(type)->getScalarSizeInBits() ==
3148 ConvertType(promotedType)->getScalarSizeInBits()) &&
3149 "The following check expects that if we do promotion to different "
3150 "underlying canonical type, at least one of the types (either "
3151 "base or promoted) will be signed, or the bitwidths will match.");
3152 }
3153 if (CGF.SanOpts.hasOneOf(
3154 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
3155 SanitizerKind::ImplicitBitfieldConversion) &&
3156 canPerformLossyDemotionCheck) {
3157 // While `x += 1` (for `x` with width less than int) is modeled as
3158 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3159 // ease; inc/dec with width less than int can't overflow because of
3160 // promotion rules, so we omit promotion+demotion, which means that we can
3161 // not catch lossy "demotion". Because we still want to catch these cases
3162 // when the sanitizer is enabled, we perform the promotion, then perform
3163 // the increment/decrement in the wider type, and finally
3164 // perform the demotion. This will catch lossy demotions.
3165
3166 // We have a special case for bitfields defined using all the bits of the
3167 // type. In this case we need to do the same trick as for the integer
3168 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3169
3170 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
3171 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3172 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3173 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3174 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3175 // checks will take care of the conversion.
3176 ScalarConversionOpts Opts;
3177 if (!LV.isBitField())
3178 Opts = ScalarConversionOpts(CGF.SanOpts);
3179 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
3180 Previous = value;
3181 SrcType = promotedType;
3182 }
3183
3184 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
3185 Opts);
3186
3187 // Note that signed integer inc/dec with width less than int can't
3188 // overflow because of promotion rules; we're just eliding a few steps
3189 // here.
3190 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3191 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
3192 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3193 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
3194 !excludeOverflowPattern &&
3195 !CGF.getContext().isTypeIgnoredBySanitizer(
3196 Mask: SanitizerKind::UnsignedIntegerOverflow, Ty: E->getType())) {
3197 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
3198 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
3199 } else {
3200 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
3201 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3202 }
3203
3204 // Next most common: pointer increment.
3205 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3206 QualType type = ptr->getPointeeType();
3207
3208 // VLA types don't have constant size.
3209 if (const VariableArrayType *vla
3210 = CGF.getContext().getAsVariableArrayType(T: type)) {
3211 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3212 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
3213 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3214 if (CGF.getLangOpts().PointerOverflowDefined)
3215 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
3216 else
3217 value = CGF.EmitCheckedInBoundsGEP(
3218 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3219 Loc: E->getExprLoc(), Name: "vla.inc");
3220
3221 // Arithmetic on function pointers (!) is just +-1.
3222 } else if (type->isFunctionType()) {
3223 llvm::Value *amt = Builder.getInt32(C: amount);
3224
3225 if (CGF.getLangOpts().PointerOverflowDefined)
3226 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
3227 else
3228 value =
3229 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
3230 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3231 Loc: E->getExprLoc(), Name: "incdec.funcptr");
3232
3233 // For everything else, we can just do a simple increment.
3234 } else {
3235 llvm::Value *amt = Builder.getInt32(C: amount);
3236 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
3237 if (CGF.getLangOpts().PointerOverflowDefined)
3238 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
3239 else
3240 value = CGF.EmitCheckedInBoundsGEP(
3241 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3242 Loc: E->getExprLoc(), Name: "incdec.ptr");
3243 }
3244
3245 // Vector increment/decrement.
3246 } else if (type->isVectorType()) {
3247 if (type->hasIntegerRepresentation()) {
3248 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
3249
3250 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
3251 } else {
3252 value = Builder.CreateFAdd(
3253 L: value,
3254 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
3255 Name: isInc ? "inc" : "dec");
3256 }
3257
3258 // Floating point.
3259 } else if (type->isRealFloatingType()) {
3260 // Add the inc/dec to the real part.
3261 llvm::Value *amt;
3262 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3263
3264 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3265 // Another special case: half FP increment should be done via float
3266 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3267 value = Builder.CreateCall(
3268 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
3269 Tys: CGF.CGM.FloatTy),
3270 Args: input, Name: "incdec.conv");
3271 } else {
3272 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
3273 }
3274 }
3275
3276 if (value->getType()->isFloatTy())
3277 amt = llvm::ConstantFP::get(Context&: VMContext,
3278 V: llvm::APFloat(static_cast<float>(amount)));
3279 else if (value->getType()->isDoubleTy())
3280 amt = llvm::ConstantFP::get(Context&: VMContext,
3281 V: llvm::APFloat(static_cast<double>(amount)));
3282 else {
3283 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3284 // Convert from float.
3285 llvm::APFloat F(static_cast<float>(amount));
3286 bool ignored;
3287 const llvm::fltSemantics *FS;
3288 // Don't use getFloatTypeSemantics because Half isn't
3289 // necessarily represented using the "half" LLVM type.
3290 if (value->getType()->isFP128Ty())
3291 FS = &CGF.getTarget().getFloat128Format();
3292 else if (value->getType()->isHalfTy())
3293 FS = &CGF.getTarget().getHalfFormat();
3294 else if (value->getType()->isBFloatTy())
3295 FS = &CGF.getTarget().getBFloat16Format();
3296 else if (value->getType()->isPPC_FP128Ty())
3297 FS = &CGF.getTarget().getIbm128Format();
3298 else
3299 FS = &CGF.getTarget().getLongDoubleFormat();
3300 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
3301 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
3302 }
3303 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3304
3305 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3306 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3307 value = Builder.CreateCall(
3308 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16,
3309 Tys: CGF.CGM.FloatTy),
3310 Args: value, Name: "incdec.conv");
3311 } else {
3312 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
3313 }
3314 }
3315
3316 // Fixed-point types.
3317 } else if (type->isFixedPointType()) {
3318 // Fixed-point types are tricky. In some cases, it isn't possible to
3319 // represent a 1 or a -1 in the type at all. Piggyback off of
3320 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3321 BinOpInfo Info;
3322 Info.E = E;
3323 Info.Ty = E->getType();
3324 Info.Opcode = isInc ? BO_Add : BO_Sub;
3325 Info.LHS = value;
3326 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3327 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3328 // since -1 is guaranteed to be representable.
3329 if (type->isSignedFixedPointType()) {
3330 Info.Opcode = isInc ? BO_Sub : BO_Add;
3331 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3332 }
3333 // Now, convert from our invented integer literal to the type of the unary
3334 // op. This will upscale and saturate if necessary. This value can become
3335 // undef in some cases.
3336 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3337 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3338 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema);
3339 value = EmitFixedPointBinOp(Ops: Info);
3340
3341 // Objective-C pointer types.
3342 } else {
3343 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3344
3345 CharUnits size = CGF.getContext().getTypeSizeInChars(T: OPT->getObjectType());
3346 if (!isInc) size = -size;
3347 llvm::Value *sizeValue =
3348 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
3349
3350 if (CGF.getLangOpts().PointerOverflowDefined)
3351 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3352 else
3353 value = CGF.EmitCheckedInBoundsGEP(
3354 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3355 Loc: E->getExprLoc(), Name: "incdec.objptr");
3356 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3357 }
3358
3359 if (atomicPHI) {
3360 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3361 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3362 auto Pair = CGF.EmitAtomicCompareExchange(
3363 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3364 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3365 llvm::Value *success = Pair.second;
3366 atomicPHI->addIncoming(V: old, BB: curBlock);
3367 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3368 Builder.SetInsertPoint(contBB);
3369 return isPre ? value : input;
3370 }
3371
3372 // Store the updated result through the lvalue.
3373 if (LV.isBitField()) {
3374 Value *Src = Previous ? Previous : value;
3375 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3376 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3377 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3378 } else
3379 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3380
3381 // If this is a postinc, return the value read from memory, otherwise use the
3382 // updated value.
3383 return isPre ? value : input;
3384}
3385
3386
3387Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3388 QualType PromotionType) {
3389 QualType promotionTy = PromotionType.isNull()
3390 ? getPromotionType(Ty: E->getSubExpr()->getType())
3391 : PromotionType;
3392 Value *result = VisitPlus(E, PromotionType: promotionTy);
3393 if (result && !promotionTy.isNull())
3394 result = EmitUnPromotedValue(result, ExprType: E->getType());
3395 return result;
3396}
3397
3398Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3399 QualType PromotionType) {
3400 // This differs from gcc, though, most likely due to a bug in gcc.
3401 TestAndClearIgnoreResultAssign();
3402 if (!PromotionType.isNull())
3403 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3404 return Visit(E: E->getSubExpr());
3405}
3406
3407Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3408 QualType PromotionType) {
3409 QualType promotionTy = PromotionType.isNull()
3410 ? getPromotionType(Ty: E->getSubExpr()->getType())
3411 : PromotionType;
3412 Value *result = VisitMinus(E, PromotionType: promotionTy);
3413 if (result && !promotionTy.isNull())
3414 result = EmitUnPromotedValue(result, ExprType: E->getType());
3415 return result;
3416}
3417
3418Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3419 QualType PromotionType) {
3420 TestAndClearIgnoreResultAssign();
3421 Value *Op;
3422 if (!PromotionType.isNull())
3423 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3424 else
3425 Op = Visit(E: E->getSubExpr());
3426
3427 // Generate a unary FNeg for FP ops.
3428 if (Op->getType()->isFPOrFPVectorTy())
3429 return Builder.CreateFNeg(V: Op, Name: "fneg");
3430
3431 // Emit unary minus with EmitSub so we handle overflow cases etc.
3432 BinOpInfo BinOp;
3433 BinOp.RHS = Op;
3434 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3435 BinOp.Ty = E->getType();
3436 BinOp.Opcode = BO_Sub;
3437 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3438 BinOp.E = E;
3439 return EmitSub(Ops: BinOp);
3440}
3441
3442Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3443 TestAndClearIgnoreResultAssign();
3444 Value *Op = Visit(E: E->getSubExpr());
3445 return Builder.CreateNot(V: Op, Name: "not");
3446}
3447
3448Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3449 // Perform vector logical not on comparison with zero vector.
3450 if (E->getType()->isVectorType() &&
3451 E->getType()->castAs<VectorType>()->getVectorKind() ==
3452 VectorKind::Generic) {
3453 Value *Oper = Visit(E: E->getSubExpr());
3454 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3455 Value *Result;
3456 if (Oper->getType()->isFPOrFPVectorTy()) {
3457 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3458 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3459 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3460 } else
3461 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3462 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3463 }
3464
3465 // Compare operand to zero.
3466 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3467
3468 // Invert value.
3469 // TODO: Could dynamically modify easy computations here. For example, if
3470 // the operand is an icmp ne, turn into icmp eq.
3471 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3472
3473 // ZExt result to the expr type.
3474 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3475}
3476
3477Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3478 // Try folding the offsetof to a constant.
3479 Expr::EvalResult EVResult;
3480 if (E->EvaluateAsInt(Result&: EVResult, Ctx: CGF.getContext())) {
3481 llvm::APSInt Value = EVResult.Val.getInt();
3482 return Builder.getInt(AI: Value);
3483 }
3484
3485 // Loop over the components of the offsetof to compute the value.
3486 unsigned n = E->getNumComponents();
3487 llvm::Type* ResultType = ConvertType(T: E->getType());
3488 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3489 QualType CurrentType = E->getTypeSourceInfo()->getType();
3490 for (unsigned i = 0; i != n; ++i) {
3491 OffsetOfNode ON = E->getComponent(Idx: i);
3492 llvm::Value *Offset = nullptr;
3493 switch (ON.getKind()) {
3494 case OffsetOfNode::Array: {
3495 // Compute the index
3496 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3497 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3498 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3499 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3500
3501 // Save the element type
3502 CurrentType =
3503 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3504
3505 // Compute the element size
3506 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3507 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3508
3509 // Multiply out to compute the result
3510 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3511 break;
3512 }
3513
3514 case OffsetOfNode::Field: {
3515 FieldDecl *MemberDecl = ON.getField();
3516 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3517 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3518
3519 // Compute the index of the field in its parent.
3520 unsigned i = 0;
3521 // FIXME: It would be nice if we didn't have to loop here!
3522 for (RecordDecl::field_iterator Field = RD->field_begin(),
3523 FieldEnd = RD->field_end();
3524 Field != FieldEnd; ++Field, ++i) {
3525 if (*Field == MemberDecl)
3526 break;
3527 }
3528 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3529
3530 // Compute the offset to the field
3531 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3532 CGF.getContext().getCharWidth();
3533 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3534
3535 // Save the element type.
3536 CurrentType = MemberDecl->getType();
3537 break;
3538 }
3539
3540 case OffsetOfNode::Identifier:
3541 llvm_unreachable("dependent __builtin_offsetof");
3542
3543 case OffsetOfNode::Base: {
3544 if (ON.getBase()->isVirtual()) {
3545 CGF.ErrorUnsupported(S: E, Type: "virtual base in offsetof");
3546 continue;
3547 }
3548
3549 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3550 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3551
3552 // Save the element type.
3553 CurrentType = ON.getBase()->getType();
3554
3555 // Compute the offset to the base.
3556 auto *BaseRT = CurrentType->castAs<RecordType>();
3557 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3558 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3559 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3560 break;
3561 }
3562 }
3563 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3564 }
3565 return Result;
3566}
3567
3568/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3569/// argument of the sizeof expression as an integer.
3570Value *
3571ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3572 const UnaryExprOrTypeTraitExpr *E) {
3573 QualType TypeToSize = E->getTypeOfArgument();
3574 if (auto Kind = E->getKind();
3575 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3576 if (const VariableArrayType *VAT =
3577 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3578 // For _Countof, we only want to evaluate if the extent is actually
3579 // variable as opposed to a multi-dimensional array whose extent is
3580 // constant but whose element type is variable.
3581 bool EvaluateExtent = true;
3582 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3583 EvaluateExtent =
3584 !VAT->getSizeExpr()->isIntegerConstantExpr(Ctx: CGF.getContext());
3585 }
3586 if (EvaluateExtent) {
3587 if (E->isArgumentType()) {
3588 // sizeof(type) - make sure to emit the VLA size.
3589 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3590 } else {
3591 // C99 6.5.3.4p2: If the argument is an expression of type
3592 // VLA, it is evaluated.
3593 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3594 }
3595
3596 // For _Countof, we just want to return the size of a single dimension.
3597 if (Kind == UETT_CountOf)
3598 return CGF.getVLAElements1D(vla: VAT).NumElts;
3599
3600 // For sizeof and __datasizeof, we need to scale the number of elements
3601 // by the size of the array element type.
3602 auto VlaSize = CGF.getVLASize(vla: VAT);
3603
3604 // Scale the number of non-VLA elements by the non-VLA element size.
3605 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: VlaSize.Type);
3606 if (!eltSize.isOne())
3607 return CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize),
3608 RHS: VlaSize.NumElts);
3609 return VlaSize.NumElts;
3610 }
3611 }
3612 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3613 auto Alignment =
3614 CGF.getContext()
3615 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3616 T: E->getTypeOfArgument()->getPointeeType()))
3617 .getQuantity();
3618 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3619 } else if (E->getKind() == UETT_VectorElements) {
3620 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3621 return Builder.CreateElementCount(Ty: CGF.SizeTy, EC: VecTy->getElementCount());
3622 }
3623
3624 // If this isn't sizeof(vla), the result must be constant; use the constant
3625 // folding logic so we don't have to duplicate it here.
3626 return Builder.getInt(AI: E->EvaluateKnownConstInt(Ctx: CGF.getContext()));
3627}
3628
3629Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3630 QualType PromotionType) {
3631 QualType promotionTy = PromotionType.isNull()
3632 ? getPromotionType(Ty: E->getSubExpr()->getType())
3633 : PromotionType;
3634 Value *result = VisitReal(E, PromotionType: promotionTy);
3635 if (result && !promotionTy.isNull())
3636 result = EmitUnPromotedValue(result, ExprType: E->getType());
3637 return result;
3638}
3639
3640Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3641 QualType PromotionType) {
3642 Expr *Op = E->getSubExpr();
3643 if (Op->getType()->isAnyComplexType()) {
3644 // If it's an l-value, load through the appropriate subobject l-value.
3645 // Note that we have to ask E because Op might be an l-value that
3646 // this won't work for, e.g. an Obj-C property.
3647 if (E->isGLValue()) {
3648 if (!PromotionType.isNull()) {
3649 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3650 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3651 if (result.first)
3652 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3653 return result.first;
3654 } else {
3655 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3656 .getScalarVal();
3657 }
3658 }
3659 // Otherwise, calculate and project.
3660 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3661 }
3662
3663 if (!PromotionType.isNull())
3664 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3665 return Visit(E: Op);
3666}
3667
3668Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3669 QualType PromotionType) {
3670 QualType promotionTy = PromotionType.isNull()
3671 ? getPromotionType(Ty: E->getSubExpr()->getType())
3672 : PromotionType;
3673 Value *result = VisitImag(E, PromotionType: promotionTy);
3674 if (result && !promotionTy.isNull())
3675 result = EmitUnPromotedValue(result, ExprType: E->getType());
3676 return result;
3677}
3678
3679Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3680 QualType PromotionType) {
3681 Expr *Op = E->getSubExpr();
3682 if (Op->getType()->isAnyComplexType()) {
3683 // If it's an l-value, load through the appropriate subobject l-value.
3684 // Note that we have to ask E because Op might be an l-value that
3685 // this won't work for, e.g. an Obj-C property.
3686 if (Op->isGLValue()) {
3687 if (!PromotionType.isNull()) {
3688 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3689 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3690 if (result.second)
3691 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3692 return result.second;
3693 } else {
3694 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3695 .getScalarVal();
3696 }
3697 }
3698 // Otherwise, calculate and project.
3699 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3700 }
3701
3702 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3703 // effects are evaluated, but not the actual value.
3704 if (Op->isGLValue())
3705 CGF.EmitLValue(E: Op);
3706 else if (!PromotionType.isNull())
3707 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3708 else
3709 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3710 if (!PromotionType.isNull())
3711 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3712 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3713}
3714
3715//===----------------------------------------------------------------------===//
3716// Binary Operators
3717//===----------------------------------------------------------------------===//
3718
3719Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3720 QualType PromotionType) {
3721 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3722}
3723
3724Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3725 QualType ExprType) {
3726 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3727}
3728
3729Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3730 E = E->IgnoreParens();
3731 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3732 switch (BO->getOpcode()) {
3733#define HANDLE_BINOP(OP) \
3734 case BO_##OP: \
3735 return Emit##OP(EmitBinOps(BO, PromotionType));
3736 HANDLE_BINOP(Add)
3737 HANDLE_BINOP(Sub)
3738 HANDLE_BINOP(Mul)
3739 HANDLE_BINOP(Div)
3740#undef HANDLE_BINOP
3741 default:
3742 break;
3743 }
3744 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3745 switch (UO->getOpcode()) {
3746 case UO_Imag:
3747 return VisitImag(E: UO, PromotionType);
3748 case UO_Real:
3749 return VisitReal(E: UO, PromotionType);
3750 case UO_Minus:
3751 return VisitMinus(E: UO, PromotionType);
3752 case UO_Plus:
3753 return VisitPlus(E: UO, PromotionType);
3754 default:
3755 break;
3756 }
3757 }
3758 auto result = Visit(E: const_cast<Expr *>(E));
3759 if (result) {
3760 if (!PromotionType.isNull())
3761 return EmitPromotedValue(result, PromotionType);
3762 else
3763 return EmitUnPromotedValue(result, ExprType: E->getType());
3764 }
3765 return result;
3766}
3767
3768BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3769 QualType PromotionType) {
3770 TestAndClearIgnoreResultAssign();
3771 BinOpInfo Result;
3772 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3773 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3774 if (!PromotionType.isNull())
3775 Result.Ty = PromotionType;
3776 else
3777 Result.Ty = E->getType();
3778 Result.Opcode = E->getOpcode();
3779 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3780 Result.E = E;
3781 return Result;
3782}
3783
3784LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3785 const CompoundAssignOperator *E,
3786 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3787 Value *&Result) {
3788 QualType LHSTy = E->getLHS()->getType();
3789 BinOpInfo OpInfo;
3790
3791 if (E->getComputationResultType()->isAnyComplexType())
3792 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3793
3794 // Emit the RHS first. __block variables need to have the rhs evaluated
3795 // first, plus this should improve codegen a little.
3796
3797 QualType PromotionTypeCR;
3798 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3799 if (PromotionTypeCR.isNull())
3800 PromotionTypeCR = E->getComputationResultType();
3801 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3802 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3803 if (!PromotionTypeRHS.isNull())
3804 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3805 else
3806 OpInfo.RHS = Visit(E: E->getRHS());
3807 OpInfo.Ty = PromotionTypeCR;
3808 OpInfo.Opcode = E->getOpcode();
3809 OpInfo.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3810 OpInfo.E = E;
3811 // Load/convert the LHS.
3812 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3813
3814 llvm::PHINode *atomicPHI = nullptr;
3815 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3816 QualType type = atomicTy->getValueType();
3817 if (!type->isBooleanType() && type->isIntegerType() &&
3818 !(type->isUnsignedIntegerType() &&
3819 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3820 CGF.getLangOpts().getSignedOverflowBehavior() !=
3821 LangOptions::SOB_Trapping) {
3822 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3823 llvm::Instruction::BinaryOps Op;
3824 switch (OpInfo.Opcode) {
3825 // We don't have atomicrmw operands for *, %, /, <<, >>
3826 case BO_MulAssign: case BO_DivAssign:
3827 case BO_RemAssign:
3828 case BO_ShlAssign:
3829 case BO_ShrAssign:
3830 break;
3831 case BO_AddAssign:
3832 AtomicOp = llvm::AtomicRMWInst::Add;
3833 Op = llvm::Instruction::Add;
3834 break;
3835 case BO_SubAssign:
3836 AtomicOp = llvm::AtomicRMWInst::Sub;
3837 Op = llvm::Instruction::Sub;
3838 break;
3839 case BO_AndAssign:
3840 AtomicOp = llvm::AtomicRMWInst::And;
3841 Op = llvm::Instruction::And;
3842 break;
3843 case BO_XorAssign:
3844 AtomicOp = llvm::AtomicRMWInst::Xor;
3845 Op = llvm::Instruction::Xor;
3846 break;
3847 case BO_OrAssign:
3848 AtomicOp = llvm::AtomicRMWInst::Or;
3849 Op = llvm::Instruction::Or;
3850 break;
3851 default:
3852 llvm_unreachable("Invalid compound assignment type");
3853 }
3854 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3855 llvm::Value *Amt = CGF.EmitToMemory(
3856 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3857 Loc: E->getExprLoc()),
3858 Ty: LHSTy);
3859
3860 llvm::AtomicRMWInst *OldVal =
3861 CGF.emitAtomicRMWInst(Op: AtomicOp, Addr: LHSLV.getAddress(), Val: Amt);
3862
3863 // Since operation is atomic, the result type is guaranteed to be the
3864 // same as the input in LLVM terms.
3865 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3866 return LHSLV;
3867 }
3868 }
3869 // FIXME: For floating point types, we should be saving and restoring the
3870 // floating point environment in the loop.
3871 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3872 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3873 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3874 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3875 Builder.CreateBr(Dest: opBB);
3876 Builder.SetInsertPoint(opBB);
3877 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3878 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3879 OpInfo.LHS = atomicPHI;
3880 }
3881 else
3882 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3883
3884 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3885 SourceLocation Loc = E->getExprLoc();
3886 if (!PromotionTypeLHS.isNull())
3887 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3888 Loc: E->getExprLoc());
3889 else
3890 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3891 DstType: E->getComputationLHSType(), Loc);
3892
3893 // Expand the binary operator.
3894 Result = (this->*Func)(OpInfo);
3895
3896 // Convert the result back to the LHS type,
3897 // potentially with Implicit Conversion sanitizer check.
3898 // If LHSLV is a bitfield, use default ScalarConversionOpts
3899 // to avoid emit any implicit integer checks.
3900 Value *Previous = nullptr;
3901 if (LHSLV.isBitField()) {
3902 Previous = Result;
3903 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
3904 } else
3905 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3906 Opts: ScalarConversionOpts(CGF.SanOpts));
3907
3908 if (atomicPHI) {
3909 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3910 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3911 auto Pair = CGF.EmitAtomicCompareExchange(
3912 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3913 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3914 llvm::Value *success = Pair.second;
3915 atomicPHI->addIncoming(V: old, BB: curBlock);
3916 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3917 Builder.SetInsertPoint(contBB);
3918 return LHSLV;
3919 }
3920
3921 // Store the result value into the LHS lvalue. Bit-fields are handled
3922 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3923 // 'An assignment expression has the value of the left operand after the
3924 // assignment...'.
3925 if (LHSLV.isBitField()) {
3926 Value *Src = Previous ? Previous : Result;
3927 QualType SrcType = E->getRHS()->getType();
3928 QualType DstType = E->getLHS()->getType();
3929 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3930 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
3931 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
3932 } else
3933 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3934
3935 if (CGF.getLangOpts().OpenMP)
3936 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3937 LHS: E->getLHS());
3938 return LHSLV;
3939}
3940
3941Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3942 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3943 bool Ignore = TestAndClearIgnoreResultAssign();
3944 Value *RHS = nullptr;
3945 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3946
3947 // If the result is clearly ignored, return now.
3948 if (Ignore)
3949 return nullptr;
3950
3951 // The result of an assignment in C is the assigned r-value.
3952 if (!CGF.getLangOpts().CPlusPlus)
3953 return RHS;
3954
3955 // If the lvalue is non-volatile, return the computed value of the assignment.
3956 if (!LHS.isVolatileQualified())
3957 return RHS;
3958
3959 // Otherwise, reload the value.
3960 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
3961}
3962
3963void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3964 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3965 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
3966 Checks;
3967
3968 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3969 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3970 y: SanitizerKind::SO_IntegerDivideByZero));
3971 }
3972
3973 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3974 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3975 Ops.Ty->hasSignedIntegerRepresentation() &&
3976 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3977 Ops.mayHaveIntegerOverflow()) {
3978 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3979
3980 llvm::Value *IntMin =
3981 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3982 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3983
3984 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3985 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3986 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3987 Checks.push_back(
3988 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SO_SignedIntegerOverflow));
3989 }
3990
3991 if (Checks.size() > 0)
3992 EmitBinOpCheck(Checks, Info: Ops);
3993}
3994
3995Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3996 {
3997 SanitizerDebugLocation SanScope(&CGF,
3998 {SanitizerKind::SO_IntegerDivideByZero,
3999 SanitizerKind::SO_SignedIntegerOverflow,
4000 SanitizerKind::SO_FloatDivideByZero},
4001 SanitizerHandler::DivremOverflow);
4002 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4003 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4004 Ops.Ty->isIntegerType() &&
4005 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4006 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4007 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
4008 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
4009 Ops.Ty->isRealFloatingType() &&
4010 Ops.mayHaveFloatDivisionByZero()) {
4011 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4012 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
4013 EmitBinOpCheck(
4014 Checks: std::make_pair(x&: NonZero, y: SanitizerKind::SO_FloatDivideByZero), Info: Ops);
4015 }
4016 }
4017
4018 if (Ops.Ty->isConstantMatrixType()) {
4019 llvm::MatrixBuilder MB(Builder);
4020 // We need to check the types of the operands of the operator to get the
4021 // correct matrix dimensions.
4022 auto *BO = cast<BinaryOperator>(Val: Ops.E);
4023 (void)BO;
4024 assert(
4025 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
4026 "first operand must be a matrix");
4027 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4028 "second operand must be an arithmetic type");
4029 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4030 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
4031 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
4032 }
4033
4034 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4035 llvm::Value *Val;
4036 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4037 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
4038 CGF.SetDivFPAccuracy(Val);
4039 return Val;
4040 }
4041 else if (Ops.isFixedPointOp())
4042 return EmitFixedPointBinOp(Ops);
4043 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4044 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4045 else
4046 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
4047}
4048
4049Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4050 // Rem in C can't be a floating point type: C99 6.5.5p2.
4051 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
4052 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
4053 Ops.Ty->isIntegerType() &&
4054 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4055 SanitizerDebugLocation SanScope(&CGF,
4056 {SanitizerKind::SO_IntegerDivideByZero,
4057 SanitizerKind::SO_SignedIntegerOverflow},
4058 SanitizerHandler::DivremOverflow);
4059 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
4060 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
4061 }
4062
4063 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4064 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4065
4066 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4067 return Builder.CreateFRem(L: Ops.LHS, R: Ops.RHS, Name: "rem");
4068
4069 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
4070}
4071
4072Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4073 unsigned IID;
4074 unsigned OpID = 0;
4075 SanitizerHandler OverflowKind;
4076
4077 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4078 switch (Ops.Opcode) {
4079 case BO_Add:
4080 case BO_AddAssign:
4081 OpID = 1;
4082 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4083 llvm::Intrinsic::uadd_with_overflow;
4084 OverflowKind = SanitizerHandler::AddOverflow;
4085 break;
4086 case BO_Sub:
4087 case BO_SubAssign:
4088 OpID = 2;
4089 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4090 llvm::Intrinsic::usub_with_overflow;
4091 OverflowKind = SanitizerHandler::SubOverflow;
4092 break;
4093 case BO_Mul:
4094 case BO_MulAssign:
4095 OpID = 3;
4096 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4097 llvm::Intrinsic::umul_with_overflow;
4098 OverflowKind = SanitizerHandler::MulOverflow;
4099 break;
4100 default:
4101 llvm_unreachable("Unsupported operation for overflow detection");
4102 }
4103 OpID <<= 1;
4104 if (isSigned)
4105 OpID |= 1;
4106
4107 SanitizerDebugLocation SanScope(&CGF,
4108 {SanitizerKind::SO_SignedIntegerOverflow,
4109 SanitizerKind::SO_UnsignedIntegerOverflow},
4110 OverflowKind);
4111 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
4112
4113 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
4114
4115 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
4116 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
4117 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
4118
4119 // Handle overflow with llvm.trap if no custom handler has been specified.
4120 const std::string *handlerName =
4121 &CGF.getLangOpts().OverflowHandler;
4122 if (handlerName->empty()) {
4123 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4124 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4125 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
4126 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
4127 SanitizerKind::SanitizerOrdinal Ordinal =
4128 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4129 : SanitizerKind::SO_UnsignedIntegerOverflow;
4130 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Ordinal), Info: Ops);
4131 } else
4132 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
4133 return result;
4134 }
4135
4136 // Branch in case of overflow.
4137 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4138 llvm::BasicBlock *continueBB =
4139 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
4140 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
4141
4142 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
4143
4144 // If an overflow handler is set, then we want to call it and then use its
4145 // result, if it returns.
4146 Builder.SetInsertPoint(overflowBB);
4147
4148 // Get the overflow handler.
4149 llvm::Type *Int8Ty = CGF.Int8Ty;
4150 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4151 llvm::FunctionType *handlerTy =
4152 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
4153 llvm::FunctionCallee handler =
4154 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
4155
4156 // Sign extend the args to 64-bit, so that we can use the same handler for
4157 // all types of overflow.
4158 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
4159 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
4160
4161 // Call the handler with the two arguments, the operation, and the size of
4162 // the result.
4163 llvm::Value *handlerArgs[] = {
4164 lhs,
4165 rhs,
4166 Builder.getInt8(C: OpID),
4167 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
4168 };
4169 llvm::Value *handlerResult =
4170 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
4171
4172 // Truncate the result back to the desired size.
4173 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
4174 Builder.CreateBr(Dest: continueBB);
4175
4176 Builder.SetInsertPoint(continueBB);
4177 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
4178 phi->addIncoming(V: result, BB: initialBB);
4179 phi->addIncoming(V: handlerResult, BB: overflowBB);
4180
4181 return phi;
4182}
4183
4184/// Emit pointer + index arithmetic.
4185static Value *emitPointerArithmetic(CodeGenFunction &CGF,
4186 const BinOpInfo &op,
4187 bool isSubtraction) {
4188 // Must have binary (not unary) expr here. Unary pointer
4189 // increment/decrement doesn't use this path.
4190 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4191
4192 Value *pointer = op.LHS;
4193 Expr *pointerOperand = expr->getLHS();
4194 Value *index = op.RHS;
4195 Expr *indexOperand = expr->getRHS();
4196
4197 // In a subtraction, the LHS is always the pointer.
4198 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4199 std::swap(a&: pointer, b&: index);
4200 std::swap(a&: pointerOperand, b&: indexOperand);
4201 }
4202
4203 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4204
4205 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
4206 auto &DL = CGF.CGM.getDataLayout();
4207 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
4208
4209 // Some versions of glibc and gcc use idioms (particularly in their malloc
4210 // routines) that add a pointer-sized integer (known to be a pointer value)
4211 // to a null pointer in order to cast the value back to an integer or as
4212 // part of a pointer alignment algorithm. This is undefined behavior, but
4213 // we'd like to be able to compile programs that use it.
4214 //
4215 // Normally, we'd generate a GEP with a null-pointer base here in response
4216 // to that code, but it's also UB to dereference a pointer created that
4217 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4218 // generate a direct cast of the integer value to a pointer.
4219 //
4220 // The idiom (p = nullptr + N) is not met if any of the following are true:
4221 //
4222 // The operation is subtraction.
4223 // The index is not pointer-sized.
4224 // The pointer type is not byte-sized.
4225 //
4226 // Note that we do not suppress the pointer overflow check in this case.
4227 if (BinaryOperator::isNullPointerArithmeticExtension(
4228 Ctx&: CGF.getContext(), Opc: op.Opcode, LHS: expr->getLHS(), RHS: expr->getRHS())) {
4229 Value *Ptr = CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
4230 if (CGF.getLangOpts().PointerOverflowDefined ||
4231 !CGF.SanOpts.has(K: SanitizerKind::PointerOverflow) ||
4232 NullPointerIsDefined(F: CGF.Builder.GetInsertBlock()->getParent(),
4233 AS: PtrTy->getPointerAddressSpace()))
4234 return Ptr;
4235 // The inbounds GEP of null is valid iff the index is zero.
4236 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4237 auto CheckHandler = SanitizerHandler::PointerOverflow;
4238 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
4239 Value *IsZeroIndex = CGF.Builder.CreateIsNull(Arg: index);
4240 llvm::Constant *StaticArgs[] = {
4241 CGF.EmitCheckSourceLocation(Loc: op.E->getExprLoc())};
4242 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4243 Value *IntPtr = llvm::Constant::getNullValue(Ty: IntPtrTy);
4244 Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(V: index, DestTy: IntPtrTy);
4245 Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4246 CGF.EmitCheck(Checked: {{IsZeroIndex, CheckOrdinal}}, Check: CheckHandler, StaticArgs,
4247 DynamicArgs);
4248 return Ptr;
4249 }
4250
4251 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
4252 // Zero-extend or sign-extend the pointer value according to
4253 // whether the index is signed or not.
4254 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
4255 Name: "idx.ext");
4256 }
4257
4258 // If this is subtraction, negate the index.
4259 if (isSubtraction)
4260 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
4261
4262 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
4263 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
4264 /*Accessed*/ false);
4265
4266 const PointerType *pointerType
4267 = pointerOperand->getType()->getAs<PointerType>();
4268 if (!pointerType) {
4269 QualType objectType = pointerOperand->getType()
4270 ->castAs<ObjCObjectPointerType>()
4271 ->getPointeeType();
4272 llvm::Value *objectSize
4273 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
4274
4275 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
4276
4277 Value *result =
4278 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
4279 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
4280 }
4281
4282 QualType elementType = pointerType->getPointeeType();
4283 if (const VariableArrayType *vla
4284 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4285 // The element count here is the total number of non-VLA elements.
4286 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
4287
4288 // Effectively, the multiply by the VLA size is part of the GEP.
4289 // GEP indexes are signed, and scaling an index isn't permitted to
4290 // signed-overflow, so we use the same semantics for our explicit
4291 // multiply. We suppress this if overflow is not undefined behavior.
4292 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
4293 if (CGF.getLangOpts().PointerOverflowDefined) {
4294 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
4295 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4296 } else {
4297 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
4298 pointer = CGF.EmitCheckedInBoundsGEP(
4299 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4300 Name: "add.ptr");
4301 }
4302 return pointer;
4303 }
4304
4305 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4306 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4307 // future proof.
4308 llvm::Type *elemTy;
4309 if (elementType->isVoidType() || elementType->isFunctionType())
4310 elemTy = CGF.Int8Ty;
4311 else
4312 elemTy = CGF.ConvertTypeForMem(T: elementType);
4313
4314 if (CGF.getLangOpts().PointerOverflowDefined)
4315 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4316
4317 return CGF.EmitCheckedInBoundsGEP(
4318 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4319 Name: "add.ptr");
4320}
4321
4322// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4323// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4324// the add operand respectively. This allows fmuladd to represent a*b-c, or
4325// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4326// efficient operations.
4327static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4328 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4329 bool negMul, bool negAdd) {
4330 Value *MulOp0 = MulOp->getOperand(i: 0);
4331 Value *MulOp1 = MulOp->getOperand(i: 1);
4332 if (negMul)
4333 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
4334 if (negAdd)
4335 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
4336
4337 Value *FMulAdd = nullptr;
4338 if (Builder.getIsFPConstrained()) {
4339 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4340 "Only constrained operation should be created when Builder is in FP "
4341 "constrained mode");
4342 FMulAdd = Builder.CreateConstrainedFPCall(
4343 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::experimental_constrained_fmuladd,
4344 Tys: Addend->getType()),
4345 Args: {MulOp0, MulOp1, Addend});
4346 } else {
4347 FMulAdd = Builder.CreateCall(
4348 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::fmuladd, Tys: Addend->getType()),
4349 Args: {MulOp0, MulOp1, Addend});
4350 }
4351 MulOp->eraseFromParent();
4352
4353 return FMulAdd;
4354}
4355
4356// Check whether it would be legal to emit an fmuladd intrinsic call to
4357// represent op and if so, build the fmuladd.
4358//
4359// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4360// Does NOT check the type of the operation - it's assumed that this function
4361// will be called from contexts where it's known that the type is contractable.
4362static Value* tryEmitFMulAdd(const BinOpInfo &op,
4363 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4364 bool isSub=false) {
4365
4366 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4367 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4368 "Only fadd/fsub can be the root of an fmuladd.");
4369
4370 // Check whether this op is marked as fusable.
4371 if (!op.FPFeatures.allowFPContractWithinStatement())
4372 return nullptr;
4373
4374 Value *LHS = op.LHS;
4375 Value *RHS = op.RHS;
4376
4377 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4378 // it is the only use of its operand.
4379 bool NegLHS = false;
4380 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4381 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4382 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4383 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4384 NegLHS = true;
4385 }
4386 }
4387
4388 bool NegRHS = false;
4389 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4390 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4391 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4392 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4393 NegRHS = true;
4394 }
4395 }
4396
4397 // We have a potentially fusable op. Look for a mul on one of the operands.
4398 // Also, make sure that the mul result isn't used directly. In that case,
4399 // there's no point creating a muladd operation.
4400 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4401 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4402 (LHSBinOp->use_empty() || NegLHS)) {
4403 // If we looked through fneg, erase it.
4404 if (NegLHS)
4405 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4406 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4407 }
4408 }
4409 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4410 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4411 (RHSBinOp->use_empty() || NegRHS)) {
4412 // If we looked through fneg, erase it.
4413 if (NegRHS)
4414 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4415 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4416 }
4417 }
4418
4419 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4420 if (LHSBinOp->getIntrinsicID() ==
4421 llvm::Intrinsic::experimental_constrained_fmul &&
4422 (LHSBinOp->use_empty() || NegLHS)) {
4423 // If we looked through fneg, erase it.
4424 if (NegLHS)
4425 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4426 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4427 }
4428 }
4429 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4430 if (RHSBinOp->getIntrinsicID() ==
4431 llvm::Intrinsic::experimental_constrained_fmul &&
4432 (RHSBinOp->use_empty() || NegRHS)) {
4433 // If we looked through fneg, erase it.
4434 if (NegRHS)
4435 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4436 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4437 }
4438 }
4439
4440 return nullptr;
4441}
4442
4443Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4444 if (op.LHS->getType()->isPointerTy() ||
4445 op.RHS->getType()->isPointerTy())
4446 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4447
4448 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4449 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4450 case LangOptions::SOB_Defined:
4451 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4452 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4453 [[fallthrough]];
4454 case LangOptions::SOB_Undefined:
4455 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4456 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4457 [[fallthrough]];
4458 case LangOptions::SOB_Trapping:
4459 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4460 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4461 return EmitOverflowCheckedBinOp(Ops: op);
4462 }
4463 }
4464
4465 // For vector and matrix adds, try to fold into a fmuladd.
4466 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4467 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4468 // Try to form an fmuladd.
4469 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4470 return FMulAdd;
4471 }
4472
4473 if (op.Ty->isConstantMatrixType()) {
4474 llvm::MatrixBuilder MB(Builder);
4475 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4476 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4477 }
4478
4479 if (op.Ty->isUnsignedIntegerType() &&
4480 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4481 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4482 return EmitOverflowCheckedBinOp(Ops: op);
4483
4484 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4485 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4486 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4487 }
4488
4489 if (op.isFixedPointOp())
4490 return EmitFixedPointBinOp(Ops: op);
4491
4492 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4493}
4494
4495/// The resulting value must be calculated with exact precision, so the operands
4496/// may not be the same type.
4497Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4498 using llvm::APSInt;
4499 using llvm::ConstantInt;
4500
4501 // This is either a binary operation where at least one of the operands is
4502 // a fixed-point type, or a unary operation where the operand is a fixed-point
4503 // type. The result type of a binary operation is determined by
4504 // Sema::handleFixedPointConversions().
4505 QualType ResultTy = op.Ty;
4506 QualType LHSTy, RHSTy;
4507 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4508 RHSTy = BinOp->getRHS()->getType();
4509 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4510 // For compound assignment, the effective type of the LHS at this point
4511 // is the computation LHS type, not the actual LHS type, and the final
4512 // result type is not the type of the expression but rather the
4513 // computation result type.
4514 LHSTy = CAO->getComputationLHSType();
4515 ResultTy = CAO->getComputationResultType();
4516 } else
4517 LHSTy = BinOp->getLHS()->getType();
4518 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4519 LHSTy = UnOp->getSubExpr()->getType();
4520 RHSTy = UnOp->getSubExpr()->getType();
4521 }
4522 ASTContext &Ctx = CGF.getContext();
4523 Value *LHS = op.LHS;
4524 Value *RHS = op.RHS;
4525
4526 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4527 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4528 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4529 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4530
4531 // Perform the actual operation.
4532 Value *Result;
4533 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4534 switch (op.Opcode) {
4535 case BO_AddAssign:
4536 case BO_Add:
4537 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4538 break;
4539 case BO_SubAssign:
4540 case BO_Sub:
4541 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4542 break;
4543 case BO_MulAssign:
4544 case BO_Mul:
4545 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4546 break;
4547 case BO_DivAssign:
4548 case BO_Div:
4549 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4550 break;
4551 case BO_ShlAssign:
4552 case BO_Shl:
4553 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4554 break;
4555 case BO_ShrAssign:
4556 case BO_Shr:
4557 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4558 break;
4559 case BO_LT:
4560 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4561 case BO_GT:
4562 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4563 case BO_LE:
4564 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4565 case BO_GE:
4566 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4567 case BO_EQ:
4568 // For equality operations, we assume any padding bits on unsigned types are
4569 // zero'd out. They could be overwritten through non-saturating operations
4570 // that cause overflow, but this leads to undefined behavior.
4571 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4572 case BO_NE:
4573 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4574 case BO_Cmp:
4575 case BO_LAnd:
4576 case BO_LOr:
4577 llvm_unreachable("Found unimplemented fixed point binary operation");
4578 case BO_PtrMemD:
4579 case BO_PtrMemI:
4580 case BO_Rem:
4581 case BO_Xor:
4582 case BO_And:
4583 case BO_Or:
4584 case BO_Assign:
4585 case BO_RemAssign:
4586 case BO_AndAssign:
4587 case BO_XorAssign:
4588 case BO_OrAssign:
4589 case BO_Comma:
4590 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4591 }
4592
4593 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4594 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4595 // Convert to the result type.
4596 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4597 : CommonFixedSema,
4598 DstSema: ResultFixedSema);
4599}
4600
4601Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4602 // The LHS is always a pointer if either side is.
4603 if (!op.LHS->getType()->isPointerTy()) {
4604 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4605 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4606 case LangOptions::SOB_Defined:
4607 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4608 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4609 [[fallthrough]];
4610 case LangOptions::SOB_Undefined:
4611 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4612 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4613 [[fallthrough]];
4614 case LangOptions::SOB_Trapping:
4615 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4616 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4617 return EmitOverflowCheckedBinOp(Ops: op);
4618 }
4619 }
4620
4621 // For vector and matrix subs, try to fold into a fmuladd.
4622 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4623 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4624 // Try to form an fmuladd.
4625 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4626 return FMulAdd;
4627 }
4628
4629 if (op.Ty->isConstantMatrixType()) {
4630 llvm::MatrixBuilder MB(Builder);
4631 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4632 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4633 }
4634
4635 if (op.Ty->isUnsignedIntegerType() &&
4636 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4637 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4638 return EmitOverflowCheckedBinOp(Ops: op);
4639
4640 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4641 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4642 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4643 }
4644
4645 if (op.isFixedPointOp())
4646 return EmitFixedPointBinOp(op);
4647
4648 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4649 }
4650
4651 // If the RHS is not a pointer, then we have normal pointer
4652 // arithmetic.
4653 if (!op.RHS->getType()->isPointerTy())
4654 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4655
4656 // Otherwise, this is a pointer subtraction.
4657
4658 // Do the raw subtraction part.
4659 llvm::Value *LHS
4660 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4661 llvm::Value *RHS
4662 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4663 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4664
4665 // Okay, figure out the element size.
4666 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4667 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4668
4669 llvm::Value *divisor = nullptr;
4670
4671 // For a variable-length array, this is going to be non-constant.
4672 if (const VariableArrayType *vla
4673 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4674 auto VlaSize = CGF.getVLASize(vla);
4675 elementType = VlaSize.Type;
4676 divisor = VlaSize.NumElts;
4677
4678 // Scale the number of non-VLA elements by the non-VLA element size.
4679 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4680 if (!eltSize.isOne())
4681 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4682
4683 // For everything elese, we can just compute it, safe in the
4684 // assumption that Sema won't let anything through that we can't
4685 // safely compute the size of.
4686 } else {
4687 CharUnits elementSize;
4688 // Handle GCC extension for pointer arithmetic on void* and
4689 // function pointer types.
4690 if (elementType->isVoidType() || elementType->isFunctionType())
4691 elementSize = CharUnits::One();
4692 else
4693 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4694
4695 // Don't even emit the divide for element size of 1.
4696 if (elementSize.isOne())
4697 return diffInChars;
4698
4699 divisor = CGF.CGM.getSize(numChars: elementSize);
4700 }
4701
4702 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4703 // pointer difference in C is only defined in the case where both operands
4704 // are pointing to elements of an array.
4705 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4706}
4707
4708Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4709 bool RHSIsSigned) {
4710 llvm::IntegerType *Ty;
4711 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4712 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4713 else
4714 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4715 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4716 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4717 // this in ConstantInt::get, this results in the value getting truncated.
4718 // Constrain the return value to be max(RHS) in this case.
4719 llvm::Type *RHSTy = RHS->getType();
4720 llvm::APInt RHSMax =
4721 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
4722 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4723 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4724 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4725 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4726}
4727
4728Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4729 const Twine &Name) {
4730 llvm::IntegerType *Ty;
4731 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4732 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4733 else
4734 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4735
4736 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4737 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
4738
4739 return Builder.CreateURem(
4740 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4741}
4742
4743Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4744 // TODO: This misses out on the sanitizer check below.
4745 if (Ops.isFixedPointOp())
4746 return EmitFixedPointBinOp(op: Ops);
4747
4748 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4749 // RHS to the same size as the LHS.
4750 Value *RHS = Ops.RHS;
4751 if (Ops.LHS->getType() != RHS->getType())
4752 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4753
4754 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4755 Ops.Ty->hasSignedIntegerRepresentation() &&
4756 !CGF.getLangOpts().isSignedOverflowDefined() &&
4757 !CGF.getLangOpts().CPlusPlus20;
4758 bool SanitizeUnsignedBase =
4759 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4760 Ops.Ty->hasUnsignedIntegerRepresentation();
4761 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4762 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4763 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4764 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4765 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4766 else if ((SanitizeBase || SanitizeExponent) &&
4767 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4768 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4769 if (SanitizeSignedBase)
4770 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftBase);
4771 if (SanitizeUnsignedBase)
4772 Ordinals.push_back(Elt: SanitizerKind::SO_UnsignedShiftBase);
4773 if (SanitizeExponent)
4774 Ordinals.push_back(Elt: SanitizerKind::SO_ShiftExponent);
4775
4776 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4777 SanitizerHandler::ShiftOutOfBounds);
4778 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4779 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4780 llvm::Value *WidthMinusOne =
4781 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
4782 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4783
4784 if (SanitizeExponent) {
4785 Checks.push_back(
4786 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::SO_ShiftExponent));
4787 }
4788
4789 if (SanitizeBase) {
4790 // Check whether we are shifting any non-zero bits off the top of the
4791 // integer. We only emit this check if exponent is valid - otherwise
4792 // instructions below will have undefined behavior themselves.
4793 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4794 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4795 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4796 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4797 llvm::Value *PromotedWidthMinusOne =
4798 (RHS == Ops.RHS) ? WidthMinusOne
4799 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
4800 CGF.EmitBlock(BB: CheckShiftBase);
4801 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4802 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4803 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4804 Name: "shl.check");
4805 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4806 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4807 // Under C++11's rules, shifting a 1 bit into the sign bit is
4808 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4809 // define signed left shifts, so we use the C99 and C++11 rules there).
4810 // Unsigned shifts can always shift into the top bit.
4811 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4812 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4813 }
4814 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4815 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4816 CGF.EmitBlock(BB: Cont);
4817 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4818 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4819 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4820 Checks.push_back(Elt: std::make_pair(
4821 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4822 : SanitizerKind::SO_UnsignedShiftBase));
4823 }
4824
4825 assert(!Checks.empty());
4826 EmitBinOpCheck(Checks, Info: Ops);
4827 }
4828
4829 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4830}
4831
4832Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4833 // TODO: This misses out on the sanitizer check below.
4834 if (Ops.isFixedPointOp())
4835 return EmitFixedPointBinOp(op: Ops);
4836
4837 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4838 // RHS to the same size as the LHS.
4839 Value *RHS = Ops.RHS;
4840 if (Ops.LHS->getType() != RHS->getType())
4841 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4842
4843 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4844 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4845 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4846 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4847 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4848 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4849 SanitizerHandler::ShiftOutOfBounds);
4850 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4851 llvm::Value *Valid = Builder.CreateICmpULE(
4852 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
4853 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::SO_ShiftExponent), Info: Ops);
4854 }
4855
4856 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4857 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4858 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4859}
4860
4861enum IntrinsicType { VCMPEQ, VCMPGT };
4862// return corresponding comparison intrinsic for given vector type
4863static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4864 BuiltinType::Kind ElemKind) {
4865 switch (ElemKind) {
4866 default: llvm_unreachable("unexpected element type");
4867 case BuiltinType::Char_U:
4868 case BuiltinType::UChar:
4869 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4870 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4871 case BuiltinType::Char_S:
4872 case BuiltinType::SChar:
4873 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4874 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4875 case BuiltinType::UShort:
4876 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4877 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4878 case BuiltinType::Short:
4879 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4880 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4881 case BuiltinType::UInt:
4882 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4883 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4884 case BuiltinType::Int:
4885 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4886 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4887 case BuiltinType::ULong:
4888 case BuiltinType::ULongLong:
4889 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4890 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4891 case BuiltinType::Long:
4892 case BuiltinType::LongLong:
4893 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4894 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4895 case BuiltinType::Float:
4896 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4897 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4898 case BuiltinType::Double:
4899 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4900 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4901 case BuiltinType::UInt128:
4902 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4903 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4904 case BuiltinType::Int128:
4905 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4906 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4907 }
4908}
4909
4910Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4911 llvm::CmpInst::Predicate UICmpOpc,
4912 llvm::CmpInst::Predicate SICmpOpc,
4913 llvm::CmpInst::Predicate FCmpOpc,
4914 bool IsSignaling) {
4915 TestAndClearIgnoreResultAssign();
4916 Value *Result;
4917 QualType LHSTy = E->getLHS()->getType();
4918 QualType RHSTy = E->getRHS()->getType();
4919 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4920 assert(E->getOpcode() == BO_EQ ||
4921 E->getOpcode() == BO_NE);
4922 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4923 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4924 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4925 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4926 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4927 BinOpInfo BOInfo = EmitBinOps(E);
4928 Value *LHS = BOInfo.LHS;
4929 Value *RHS = BOInfo.RHS;
4930
4931 // If AltiVec, the comparison results in a numeric type, so we use
4932 // intrinsics comparing vectors and giving 0 or 1 as a result
4933 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4934 // constants for mapping CR6 register bits to predicate result
4935 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4936
4937 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4938
4939 // in several cases vector arguments order will be reversed
4940 Value *FirstVecArg = LHS,
4941 *SecondVecArg = RHS;
4942
4943 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4944 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4945
4946 switch(E->getOpcode()) {
4947 default: llvm_unreachable("is not a comparison operation");
4948 case BO_EQ:
4949 CR6 = CR6_LT;
4950 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4951 break;
4952 case BO_NE:
4953 CR6 = CR6_EQ;
4954 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4955 break;
4956 case BO_LT:
4957 CR6 = CR6_LT;
4958 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4959 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4960 break;
4961 case BO_GT:
4962 CR6 = CR6_LT;
4963 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4964 break;
4965 case BO_LE:
4966 if (ElementKind == BuiltinType::Float) {
4967 CR6 = CR6_LT;
4968 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4969 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4970 }
4971 else {
4972 CR6 = CR6_EQ;
4973 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4974 }
4975 break;
4976 case BO_GE:
4977 if (ElementKind == BuiltinType::Float) {
4978 CR6 = CR6_LT;
4979 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4980 }
4981 else {
4982 CR6 = CR6_EQ;
4983 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4984 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4985 }
4986 break;
4987 }
4988
4989 Value *CR6Param = Builder.getInt32(C: CR6);
4990 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4991 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4992
4993 // The result type of intrinsic may not be same as E->getType().
4994 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4995 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4996 // do nothing, if ResultTy is not i1 at the same time, it will cause
4997 // crash later.
4998 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
4999 if (ResultTy->getBitWidth() > 1 &&
5000 E->getType() == CGF.getContext().BoolTy)
5001 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
5002 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5003 Loc: E->getExprLoc());
5004 }
5005
5006 if (BOInfo.isFixedPointOp()) {
5007 Result = EmitFixedPointBinOp(op: BOInfo);
5008 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5009 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5010 if (!IsSignaling)
5011 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
5012 else
5013 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
5014 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5015 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
5016 } else {
5017 // Unsigned integers and pointers.
5018
5019 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5020 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
5021 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
5022
5023 // Dynamic information is required to be stripped for comparisons,
5024 // because it could leak the dynamic information. Based on comparisons
5025 // of pointers to dynamic objects, the optimizer can replace one pointer
5026 // with another, which might be incorrect in presence of invariant
5027 // groups. Comparison with null is safe because null does not carry any
5028 // dynamic information.
5029 if (LHSTy.mayBeDynamicClass())
5030 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
5031 if (RHSTy.mayBeDynamicClass())
5032 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
5033 }
5034
5035 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
5036 }
5037
5038 // If this is a vector comparison, sign extend the result to the appropriate
5039 // vector integer type and return it (don't convert to bool).
5040 if (LHSTy->isVectorType())
5041 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
5042
5043 } else {
5044 // Complex Comparison: can only be an equality comparison.
5045 CodeGenFunction::ComplexPairTy LHS, RHS;
5046 QualType CETy;
5047 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5048 LHS = CGF.EmitComplexExpr(E: E->getLHS());
5049 CETy = CTy->getElementType();
5050 } else {
5051 LHS.first = Visit(E: E->getLHS());
5052 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
5053 CETy = LHSTy;
5054 }
5055 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5056 RHS = CGF.EmitComplexExpr(E: E->getRHS());
5057 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5058 CTy->getElementType()) &&
5059 "The element types must always match.");
5060 (void)CTy;
5061 } else {
5062 RHS.first = Visit(E: E->getRHS());
5063 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
5064 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5065 "The element types must always match.");
5066 }
5067
5068 Value *ResultR, *ResultI;
5069 if (CETy->isRealFloatingType()) {
5070 // As complex comparisons can only be equality comparisons, they
5071 // are never signaling comparisons.
5072 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5073 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5074 } else {
5075 // Complex comparisons can only be equality comparisons. As such, signed
5076 // and unsigned opcodes are the same.
5077 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
5078 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
5079 }
5080
5081 if (E->getOpcode() == BO_EQ) {
5082 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
5083 } else {
5084 assert(E->getOpcode() == BO_NE &&
5085 "Complex comparison other than == or != ?");
5086 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
5087 }
5088 }
5089
5090 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
5091 Loc: E->getExprLoc());
5092}
5093
5094llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
5095 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5096 // In case we have the integer or bitfield sanitizer checks enabled
5097 // we want to get the expression before scalar conversion.
5098 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
5099 CastKind Kind = ICE->getCastKind();
5100 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5101 *SrcType = ICE->getSubExpr()->getType();
5102 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
5103 // Pass default ScalarConversionOpts to avoid emitting
5104 // integer sanitizer checks as E refers to bitfield.
5105 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
5106 Loc: ICE->getExprLoc());
5107 }
5108 }
5109 return EmitScalarExpr(E: E->getRHS());
5110}
5111
5112Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5113 ApplyAtomGroup Grp(CGF.getDebugInfo());
5114 bool Ignore = TestAndClearIgnoreResultAssign();
5115
5116 Value *RHS;
5117 LValue LHS;
5118
5119 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5120 LValue LV = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5121 LV.getQuals().removePointerAuth();
5122 llvm::Value *RV =
5123 CGF.EmitPointerAuthQualify(Qualifier: PtrAuth, PointerExpr: E->getRHS(), StorageAddress: LV.getAddress());
5124 CGF.EmitNullabilityCheck(LHS: LV, RHS: RV, Loc: E->getExprLoc());
5125 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RV), Dst: LV);
5126
5127 if (Ignore)
5128 return nullptr;
5129 RV = CGF.EmitPointerAuthUnqualify(Qualifier: PtrAuth, Pointer: RV, PointerType: LV.getType(),
5130 StorageAddress: LV.getAddress(), /*nonnull*/ IsKnownNonNull: false);
5131 return RV;
5132 }
5133
5134 switch (E->getLHS()->getType().getObjCLifetime()) {
5135 case Qualifiers::OCL_Strong:
5136 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(e: E, ignored: Ignore);
5137 break;
5138
5139 case Qualifiers::OCL_Autoreleasing:
5140 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
5141 break;
5142
5143 case Qualifiers::OCL_ExplicitNone:
5144 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
5145 break;
5146
5147 case Qualifiers::OCL_Weak:
5148 RHS = Visit(E: E->getRHS());
5149 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5150 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(), value: RHS, ignored: Ignore);
5151 break;
5152
5153 case Qualifiers::OCL_None:
5154 // __block variables need to have the rhs evaluated first, plus
5155 // this should improve codegen just a little.
5156 Value *Previous = nullptr;
5157 QualType SrcType = E->getRHS()->getType();
5158 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5159 // we want to extract that value and potentially (if the bitfield sanitizer
5160 // is enabled) use it to check for an implicit conversion.
5161 if (E->getLHS()->refersToBitField())
5162 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
5163 else
5164 RHS = Visit(E: E->getRHS());
5165
5166 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
5167
5168 // Store the value into the LHS. Bit-fields are handled specially
5169 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5170 // 'An assignment expression has the value of the left operand after
5171 // the assignment...'.
5172 if (LHS.isBitField()) {
5173 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
5174 // If the expression contained an implicit conversion, make sure
5175 // to use the value before the scalar conversion.
5176 Value *Src = Previous ? Previous : RHS;
5177 QualType DstType = E->getLHS()->getType();
5178 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
5179 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
5180 } else {
5181 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
5182 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
5183 }
5184 }
5185
5186 // If the result is clearly ignored, return now.
5187 if (Ignore)
5188 return nullptr;
5189
5190 // The result of an assignment in C is the assigned r-value.
5191 if (!CGF.getLangOpts().CPlusPlus)
5192 return RHS;
5193
5194 // If the lvalue is non-volatile, return the computed value of the assignment.
5195 if (!LHS.isVolatileQualified())
5196 return RHS;
5197
5198 // Otherwise, reload the value.
5199 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
5200}
5201
5202Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5203 // Perform vector logical and on comparisons with zero vectors.
5204 if (E->getType()->isVectorType()) {
5205 CGF.incrementProfileCounter(S: E);
5206
5207 Value *LHS = Visit(E: E->getLHS());
5208 Value *RHS = Visit(E: E->getRHS());
5209 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5210 if (LHS->getType()->isFPOrFPVectorTy()) {
5211 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5212 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5213 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5214 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5215 } else {
5216 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5217 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5218 }
5219 Value *And = Builder.CreateAnd(LHS, RHS);
5220 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
5221 }
5222
5223 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5224 llvm::Type *ResTy = ConvertType(T: E->getType());
5225
5226 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5227 // If we have 1 && X, just emit X without inserting the control flow.
5228 bool LHSCondVal;
5229 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5230 if (LHSCondVal) { // If we have 1 && X, just emit X.
5231 CGF.incrementProfileCounter(S: E);
5232
5233 // If the top of the logical operator nest, reset the MCDC temp to 0.
5234 if (CGF.MCDCLogOpStack.empty())
5235 CGF.maybeResetMCDCCondBitmap(E);
5236
5237 CGF.MCDCLogOpStack.push_back(Elt: E);
5238
5239 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5240
5241 // If we're generating for profiling or coverage, generate a branch to a
5242 // block that increments the RHS counter needed to track branch condition
5243 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5244 // "FalseBlock" after the increment is done.
5245 if (InstrumentRegions &&
5246 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5247 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5248 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
5249 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5250 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
5251 CGF.EmitBlock(BB: RHSBlockCnt);
5252 CGF.incrementProfileCounter(S: E->getRHS());
5253 CGF.EmitBranch(Block: FBlock);
5254 CGF.EmitBlock(BB: FBlock);
5255 } else
5256 CGF.markStmtMaybeUsed(S: E->getRHS());
5257
5258 CGF.MCDCLogOpStack.pop_back();
5259 // If the top of the logical operator nest, update the MCDC bitmap.
5260 if (CGF.MCDCLogOpStack.empty())
5261 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5262
5263 // ZExt result to int or bool.
5264 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
5265 }
5266
5267 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5268 if (!CGF.ContainsLabel(S: E->getRHS())) {
5269 CGF.markStmtMaybeUsed(S: E->getRHS());
5270 return llvm::Constant::getNullValue(Ty: ResTy);
5271 }
5272 }
5273
5274 // If the top of the logical operator nest, reset the MCDC temp to 0.
5275 if (CGF.MCDCLogOpStack.empty())
5276 CGF.maybeResetMCDCCondBitmap(E);
5277
5278 CGF.MCDCLogOpStack.push_back(Elt: E);
5279
5280 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
5281 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
5282
5283 CodeGenFunction::ConditionalEvaluation eval(CGF);
5284
5285 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5286 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
5287 TrueCount: CGF.getProfileCount(S: E->getRHS()));
5288
5289 // Any edges into the ContBlock are now from an (indeterminate number of)
5290 // edges from this first condition. All of these values will be false. Start
5291 // setting up the PHI node in the Cont Block for this.
5292 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5293 NameStr: "", InsertBefore: ContBlock);
5294 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5295 PI != PE; ++PI)
5296 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
5297
5298 eval.begin(CGF);
5299 CGF.EmitBlock(BB: RHSBlock);
5300 CGF.incrementProfileCounter(S: E);
5301 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5302 eval.end(CGF);
5303
5304 // Reaquire the RHS block, as there may be subblocks inserted.
5305 RHSBlock = Builder.GetInsertBlock();
5306
5307 // If we're generating for profiling or coverage, generate a branch on the
5308 // RHS to a block that increments the RHS true counter needed to track branch
5309 // condition coverage.
5310 if (InstrumentRegions &&
5311 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5312 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5313 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
5314 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
5315 CGF.EmitBlock(BB: RHSBlockCnt);
5316 CGF.incrementProfileCounter(S: E->getRHS());
5317 CGF.EmitBranch(Block: ContBlock);
5318 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5319 }
5320
5321 // Emit an unconditional branch from this block to ContBlock.
5322 {
5323 // There is no need to emit line number for unconditional branch.
5324 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5325 CGF.EmitBlock(BB: ContBlock);
5326 }
5327 // Insert an entry into the phi node for the edge with the value of RHSCond.
5328 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5329
5330 CGF.MCDCLogOpStack.pop_back();
5331 // If the top of the logical operator nest, update the MCDC bitmap.
5332 if (CGF.MCDCLogOpStack.empty())
5333 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5334
5335 // Artificial location to preserve the scope information
5336 {
5337 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
5338 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5339 }
5340
5341 // ZExt result to int.
5342 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
5343}
5344
5345Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5346 // Perform vector logical or on comparisons with zero vectors.
5347 if (E->getType()->isVectorType()) {
5348 CGF.incrementProfileCounter(S: E);
5349
5350 Value *LHS = Visit(E: E->getLHS());
5351 Value *RHS = Visit(E: E->getRHS());
5352 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5353 if (LHS->getType()->isFPOrFPVectorTy()) {
5354 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5355 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5356 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5357 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5358 } else {
5359 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5360 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5361 }
5362 Value *Or = Builder.CreateOr(LHS, RHS);
5363 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
5364 }
5365
5366 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5367 llvm::Type *ResTy = ConvertType(T: E->getType());
5368
5369 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5370 // If we have 0 || X, just emit X without inserting the control flow.
5371 bool LHSCondVal;
5372 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5373 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5374 CGF.incrementProfileCounter(S: E);
5375
5376 // If the top of the logical operator nest, reset the MCDC temp to 0.
5377 if (CGF.MCDCLogOpStack.empty())
5378 CGF.maybeResetMCDCCondBitmap(E);
5379
5380 CGF.MCDCLogOpStack.push_back(Elt: E);
5381
5382 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5383
5384 // If we're generating for profiling or coverage, generate a branch to a
5385 // block that increments the RHS counter need to track branch condition
5386 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5387 // "FalseBlock" after the increment is done.
5388 if (InstrumentRegions &&
5389 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5390 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5391 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5392 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5393 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
5394 CGF.EmitBlock(BB: RHSBlockCnt);
5395 CGF.incrementProfileCounter(S: E->getRHS());
5396 CGF.EmitBranch(Block: FBlock);
5397 CGF.EmitBlock(BB: FBlock);
5398 } else
5399 CGF.markStmtMaybeUsed(S: E->getRHS());
5400
5401 CGF.MCDCLogOpStack.pop_back();
5402 // If the top of the logical operator nest, update the MCDC bitmap.
5403 if (CGF.MCDCLogOpStack.empty())
5404 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5405
5406 // ZExt result to int or bool.
5407 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5408 }
5409
5410 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5411 if (!CGF.ContainsLabel(S: E->getRHS())) {
5412 CGF.markStmtMaybeUsed(S: E->getRHS());
5413 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5414 }
5415 }
5416
5417 // If the top of the logical operator nest, reset the MCDC temp to 0.
5418 if (CGF.MCDCLogOpStack.empty())
5419 CGF.maybeResetMCDCCondBitmap(E);
5420
5421 CGF.MCDCLogOpStack.push_back(Elt: E);
5422
5423 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5424 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5425
5426 CodeGenFunction::ConditionalEvaluation eval(CGF);
5427
5428 // Branch on the LHS first. If it is true, go to the success (cont) block.
5429 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
5430 TrueCount: CGF.getCurrentProfileCount() -
5431 CGF.getProfileCount(S: E->getRHS()));
5432
5433 // Any edges into the ContBlock are now from an (indeterminate number of)
5434 // edges from this first condition. All of these values will be true. Start
5435 // setting up the PHI node in the Cont Block for this.
5436 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5437 NameStr: "", InsertBefore: ContBlock);
5438 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5439 PI != PE; ++PI)
5440 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5441
5442 eval.begin(CGF);
5443
5444 // Emit the RHS condition as a bool value.
5445 CGF.EmitBlock(BB: RHSBlock);
5446 CGF.incrementProfileCounter(S: E);
5447 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5448
5449 eval.end(CGF);
5450
5451 // Reaquire the RHS block, as there may be subblocks inserted.
5452 RHSBlock = Builder.GetInsertBlock();
5453
5454 // If we're generating for profiling or coverage, generate a branch on the
5455 // RHS to a block that increments the RHS true counter needed to track branch
5456 // condition coverage.
5457 if (InstrumentRegions &&
5458 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5459 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5460 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5461 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
5462 CGF.EmitBlock(BB: RHSBlockCnt);
5463 CGF.incrementProfileCounter(S: E->getRHS());
5464 CGF.EmitBranch(Block: ContBlock);
5465 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5466 }
5467
5468 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5469 // into the phi node for the edge with the value of RHSCond.
5470 CGF.EmitBlock(BB: ContBlock);
5471 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5472
5473 CGF.MCDCLogOpStack.pop_back();
5474 // If the top of the logical operator nest, update the MCDC bitmap.
5475 if (CGF.MCDCLogOpStack.empty())
5476 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5477
5478 // ZExt result to int.
5479 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5480}
5481
5482Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5483 CGF.EmitIgnoredExpr(E: E->getLHS());
5484 CGF.EnsureInsertPoint();
5485 return Visit(E: E->getRHS());
5486}
5487
5488//===----------------------------------------------------------------------===//
5489// Other Operators
5490//===----------------------------------------------------------------------===//
5491
5492/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5493/// expression is cheap enough and side-effect-free enough to evaluate
5494/// unconditionally instead of conditionally. This is used to convert control
5495/// flow into selects in some cases.
5496static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5497 CodeGenFunction &CGF) {
5498 // Anything that is an integer or floating point constant is fine.
5499 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5500
5501 // Even non-volatile automatic variables can't be evaluated unconditionally.
5502 // Referencing a thread_local may cause non-trivial initialization work to
5503 // occur. If we're inside a lambda and one of the variables is from the scope
5504 // outside the lambda, that function may have returned already. Reading its
5505 // locals is a bad idea. Also, these reads may introduce races there didn't
5506 // exist in the source-level program.
5507}
5508
5509
5510Value *ScalarExprEmitter::
5511VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5512 TestAndClearIgnoreResultAssign();
5513
5514 // Bind the common expression if necessary.
5515 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5516
5517 Expr *condExpr = E->getCond();
5518 Expr *lhsExpr = E->getTrueExpr();
5519 Expr *rhsExpr = E->getFalseExpr();
5520
5521 // If the condition constant folds and can be elided, try to avoid emitting
5522 // the condition and the dead arm.
5523 bool CondExprBool;
5524 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5525 Expr *live = lhsExpr, *dead = rhsExpr;
5526 if (!CondExprBool) std::swap(a&: live, b&: dead);
5527
5528 // If the dead side doesn't have labels we need, just emit the Live part.
5529 if (!CGF.ContainsLabel(S: dead)) {
5530 if (CondExprBool) {
5531 if (llvm::EnableSingleByteCoverage) {
5532 CGF.incrementProfileCounter(S: lhsExpr);
5533 CGF.incrementProfileCounter(S: rhsExpr);
5534 }
5535 CGF.incrementProfileCounter(S: E);
5536 }
5537 Value *Result = Visit(E: live);
5538 CGF.markStmtMaybeUsed(S: dead);
5539
5540 // If the live part is a throw expression, it acts like it has a void
5541 // type, so evaluating it returns a null Value*. However, a conditional
5542 // with non-void type must return a non-null Value*.
5543 if (!Result && !E->getType()->isVoidType())
5544 Result = llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
5545
5546 return Result;
5547 }
5548 }
5549
5550 // OpenCL: If the condition is a vector, we can treat this condition like
5551 // the select function.
5552 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5553 condExpr->getType()->isExtVectorType()) {
5554 CGF.incrementProfileCounter(S: E);
5555
5556 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5557 llvm::Value *LHS = Visit(E: lhsExpr);
5558 llvm::Value *RHS = Visit(E: rhsExpr);
5559
5560 llvm::Type *condType = ConvertType(T: condExpr->getType());
5561 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5562
5563 unsigned numElem = vecTy->getNumElements();
5564 llvm::Type *elemType = vecTy->getElementType();
5565
5566 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5567 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5568 llvm::Value *tmp = Builder.CreateSExt(
5569 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5570 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5571
5572 // Cast float to int to perform ANDs if necessary.
5573 llvm::Value *RHSTmp = RHS;
5574 llvm::Value *LHSTmp = LHS;
5575 bool wasCast = false;
5576 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5577 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5578 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5579 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5580 wasCast = true;
5581 }
5582
5583 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5584 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5585 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5586 if (wasCast)
5587 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5588
5589 return tmp5;
5590 }
5591
5592 if (condExpr->getType()->isVectorType() ||
5593 condExpr->getType()->isSveVLSBuiltinType()) {
5594 CGF.incrementProfileCounter(S: E);
5595
5596 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5597 llvm::Value *LHS = Visit(E: lhsExpr);
5598 llvm::Value *RHS = Visit(E: rhsExpr);
5599
5600 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5601 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5602 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5603
5604 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5605 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5606 }
5607
5608 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5609 // select instead of as control flow. We can only do this if it is cheap and
5610 // safe to evaluate the LHS and RHS unconditionally.
5611 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5612 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5613 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5614 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5615
5616 if (llvm::EnableSingleByteCoverage) {
5617 CGF.incrementProfileCounter(S: lhsExpr);
5618 CGF.incrementProfileCounter(S: rhsExpr);
5619 CGF.incrementProfileCounter(S: E);
5620 } else
5621 CGF.incrementProfileCounter(S: E, StepV);
5622
5623 llvm::Value *LHS = Visit(E: lhsExpr);
5624 llvm::Value *RHS = Visit(E: rhsExpr);
5625 if (!LHS) {
5626 // If the conditional has void type, make sure we return a null Value*.
5627 assert(!RHS && "LHS and RHS types must match");
5628 return nullptr;
5629 }
5630 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5631 }
5632
5633 // If the top of the logical operator nest, reset the MCDC temp to 0.
5634 if (CGF.MCDCLogOpStack.empty())
5635 CGF.maybeResetMCDCCondBitmap(E: condExpr);
5636
5637 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5638 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5639 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5640
5641 CodeGenFunction::ConditionalEvaluation eval(CGF);
5642 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5643 TrueCount: CGF.getProfileCount(S: lhsExpr));
5644
5645 CGF.EmitBlock(BB: LHSBlock);
5646
5647 // If the top of the logical operator nest, update the MCDC bitmap for the
5648 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5649 // may also contain a boolean expression.
5650 if (CGF.MCDCLogOpStack.empty())
5651 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5652
5653 if (llvm::EnableSingleByteCoverage)
5654 CGF.incrementProfileCounter(S: lhsExpr);
5655 else
5656 CGF.incrementProfileCounter(S: E);
5657
5658 eval.begin(CGF);
5659 Value *LHS = Visit(E: lhsExpr);
5660 eval.end(CGF);
5661
5662 LHSBlock = Builder.GetInsertBlock();
5663 Builder.CreateBr(Dest: ContBlock);
5664
5665 CGF.EmitBlock(BB: RHSBlock);
5666
5667 // If the top of the logical operator nest, update the MCDC bitmap for the
5668 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5669 // may also contain a boolean expression.
5670 if (CGF.MCDCLogOpStack.empty())
5671 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5672
5673 if (llvm::EnableSingleByteCoverage)
5674 CGF.incrementProfileCounter(S: rhsExpr);
5675
5676 eval.begin(CGF);
5677 Value *RHS = Visit(E: rhsExpr);
5678 eval.end(CGF);
5679
5680 RHSBlock = Builder.GetInsertBlock();
5681 CGF.EmitBlock(BB: ContBlock);
5682
5683 // If the LHS or RHS is a throw expression, it will be legitimately null.
5684 if (!LHS)
5685 return RHS;
5686 if (!RHS)
5687 return LHS;
5688
5689 // Create a PHI node for the real part.
5690 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5691 PN->addIncoming(V: LHS, BB: LHSBlock);
5692 PN->addIncoming(V: RHS, BB: RHSBlock);
5693
5694 // When single byte coverage mode is enabled, add a counter to continuation
5695 // block.
5696 if (llvm::EnableSingleByteCoverage)
5697 CGF.incrementProfileCounter(S: E);
5698
5699 return PN;
5700}
5701
5702Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5703 return Visit(E: E->getChosenSubExpr());
5704}
5705
5706Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5707 Address ArgValue = Address::invalid();
5708 RValue ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5709
5710 return ArgPtr.getScalarVal();
5711}
5712
5713Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5714 return CGF.EmitBlockLiteral(block);
5715}
5716
5717// Convert a vec3 to vec4, or vice versa.
5718static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5719 Value *Src, unsigned NumElementsDst) {
5720 static constexpr int Mask[] = {0, 1, 2, -1};
5721 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5722}
5723
5724// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5725// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5726// but could be scalar or vectors of different lengths, and either can be
5727// pointer.
5728// There are 4 cases:
5729// 1. non-pointer -> non-pointer : needs 1 bitcast
5730// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5731// 3. pointer -> non-pointer
5732// a) pointer -> intptr_t : needs 1 ptrtoint
5733// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5734// 4. non-pointer -> pointer
5735// a) intptr_t -> pointer : needs 1 inttoptr
5736// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5737// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5738// allow casting directly between pointer types and non-integer non-pointer
5739// types.
5740static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5741 const llvm::DataLayout &DL,
5742 Value *Src, llvm::Type *DstTy,
5743 StringRef Name = "") {
5744 auto SrcTy = Src->getType();
5745
5746 // Case 1.
5747 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5748 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5749
5750 // Case 2.
5751 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5752 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5753
5754 // Case 3.
5755 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5756 // Case 3b.
5757 if (!DstTy->isIntegerTy())
5758 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5759 // Cases 3a and 3b.
5760 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5761 }
5762
5763 // Case 4b.
5764 if (!SrcTy->isIntegerTy())
5765 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5766 // Cases 4a and 4b.
5767 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5768}
5769
5770Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5771 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5772 llvm::Type *DstTy = ConvertType(T: E->getType());
5773
5774 llvm::Type *SrcTy = Src->getType();
5775 unsigned NumElementsSrc =
5776 isa<llvm::VectorType>(Val: SrcTy)
5777 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5778 : 0;
5779 unsigned NumElementsDst =
5780 isa<llvm::VectorType>(Val: DstTy)
5781 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5782 : 0;
5783
5784 // Use bit vector expansion for ext_vector_type boolean vectors.
5785 if (E->getType()->isExtVectorBoolType())
5786 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5787
5788 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5789 // vector to get a vec4, then a bitcast if the target type is different.
5790 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5791 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5792 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5793 DstTy);
5794
5795 Src->setName("astype");
5796 return Src;
5797 }
5798
5799 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5800 // to vec4 if the original type is not vec4, then a shuffle vector to
5801 // get a vec3.
5802 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5803 auto *Vec4Ty = llvm::FixedVectorType::get(
5804 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5805 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5806 DstTy: Vec4Ty);
5807
5808 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5809 Src->setName("astype");
5810 return Src;
5811 }
5812
5813 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5814 Src, DstTy, Name: "astype");
5815}
5816
5817Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5818 return CGF.EmitAtomicExpr(E).getScalarVal();
5819}
5820
5821//===----------------------------------------------------------------------===//
5822// Entry Point into this File
5823//===----------------------------------------------------------------------===//
5824
5825/// Emit the computation of the specified expression of scalar type, ignoring
5826/// the result.
5827Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5828 assert(E && hasScalarEvaluationKind(E->getType()) &&
5829 "Invalid scalar expression to emit");
5830
5831 return ScalarExprEmitter(*this, IgnoreResultAssign)
5832 .Visit(E: const_cast<Expr *>(E));
5833}
5834
5835/// Emit a conversion from the specified type to the specified destination type,
5836/// both of which are LLVM scalar types.
5837Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5838 QualType DstTy,
5839 SourceLocation Loc) {
5840 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5841 "Invalid scalar expression to emit");
5842 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5843}
5844
5845/// Emit a conversion from the specified complex type to the specified
5846/// destination type, where the destination type is an LLVM scalar type.
5847Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5848 QualType SrcTy,
5849 QualType DstTy,
5850 SourceLocation Loc) {
5851 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5852 "Invalid complex -> scalar conversion");
5853 return ScalarExprEmitter(*this)
5854 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5855}
5856
5857
5858Value *
5859CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5860 QualType PromotionType) {
5861 if (!PromotionType.isNull())
5862 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5863 else
5864 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5865}
5866
5867
5868llvm::Value *CodeGenFunction::
5869EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5870 bool isInc, bool isPre) {
5871 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5872}
5873
5874LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5875 // object->isa or (*object).isa
5876 // Generate code as for: *(Class*)object
5877
5878 Expr *BaseExpr = E->getBase();
5879 Address Addr = Address::invalid();
5880 if (BaseExpr->isPRValue()) {
5881 llvm::Type *BaseTy =
5882 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5883 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5884 } else {
5885 Addr = EmitLValue(E: BaseExpr).getAddress();
5886 }
5887
5888 // Cast the address to Class*.
5889 Addr = Addr.withElementType(ElemTy: ConvertType(T: E->getType()));
5890 return MakeAddrLValue(Addr, T: E->getType());
5891}
5892
5893
5894LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5895 const CompoundAssignOperator *E) {
5896 ApplyAtomGroup Grp(getDebugInfo());
5897 ScalarExprEmitter Scalar(*this);
5898 Value *Result = nullptr;
5899 switch (E->getOpcode()) {
5900#define COMPOUND_OP(Op) \
5901 case BO_##Op##Assign: \
5902 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5903 Result)
5904 COMPOUND_OP(Mul);
5905 COMPOUND_OP(Div);
5906 COMPOUND_OP(Rem);
5907 COMPOUND_OP(Add);
5908 COMPOUND_OP(Sub);
5909 COMPOUND_OP(Shl);
5910 COMPOUND_OP(Shr);
5911 COMPOUND_OP(And);
5912 COMPOUND_OP(Xor);
5913 COMPOUND_OP(Or);
5914#undef COMPOUND_OP
5915
5916 case BO_PtrMemD:
5917 case BO_PtrMemI:
5918 case BO_Mul:
5919 case BO_Div:
5920 case BO_Rem:
5921 case BO_Add:
5922 case BO_Sub:
5923 case BO_Shl:
5924 case BO_Shr:
5925 case BO_LT:
5926 case BO_GT:
5927 case BO_LE:
5928 case BO_GE:
5929 case BO_EQ:
5930 case BO_NE:
5931 case BO_Cmp:
5932 case BO_And:
5933 case BO_Xor:
5934 case BO_Or:
5935 case BO_LAnd:
5936 case BO_LOr:
5937 case BO_Assign:
5938 case BO_Comma:
5939 llvm_unreachable("Not valid compound assignment operators");
5940 }
5941
5942 llvm_unreachable("Unhandled compound assignment operator");
5943}
5944
5945struct GEPOffsetAndOverflow {
5946 // The total (signed) byte offset for the GEP.
5947 llvm::Value *TotalOffset;
5948 // The offset overflow flag - true if the total offset overflows.
5949 llvm::Value *OffsetOverflows;
5950};
5951
5952/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5953/// and compute the total offset it applies from it's base pointer BasePtr.
5954/// Returns offset in bytes and a boolean flag whether an overflow happened
5955/// during evaluation.
5956static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5957 llvm::LLVMContext &VMContext,
5958 CodeGenModule &CGM,
5959 CGBuilderTy &Builder) {
5960 const auto &DL = CGM.getDataLayout();
5961
5962 // The total (signed) byte offset for the GEP.
5963 llvm::Value *TotalOffset = nullptr;
5964
5965 // Was the GEP already reduced to a constant?
5966 if (isa<llvm::Constant>(Val: GEPVal)) {
5967 // Compute the offset by casting both pointers to integers and subtracting:
5968 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5969 Value *BasePtr_int =
5970 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5971 Value *GEPVal_int =
5972 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5973 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5974 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5975 }
5976
5977 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5978 assert(GEP->getPointerOperand() == BasePtr &&
5979 "BasePtr must be the base of the GEP.");
5980 assert(GEP->isInBounds() && "Expected inbounds GEP");
5981
5982 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5983
5984 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5985 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5986 auto *SAddIntrinsic =
5987 CGM.getIntrinsic(IID: llvm::Intrinsic::sadd_with_overflow, Tys: IntPtrTy);
5988 auto *SMulIntrinsic =
5989 CGM.getIntrinsic(IID: llvm::Intrinsic::smul_with_overflow, Tys: IntPtrTy);
5990
5991 // The offset overflow flag - true if the total offset overflows.
5992 llvm::Value *OffsetOverflows = Builder.getFalse();
5993
5994 /// Return the result of the given binary operation.
5995 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5996 llvm::Value *RHS) -> llvm::Value * {
5997 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5998
5999 // If the operands are constants, return a constant result.
6000 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
6001 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
6002 llvm::APInt N;
6003 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
6004 /*Signed=*/true, Result&: N);
6005 if (HasOverflow)
6006 OffsetOverflows = Builder.getTrue();
6007 return llvm::ConstantInt::get(Context&: VMContext, V: N);
6008 }
6009 }
6010
6011 // Otherwise, compute the result with checked arithmetic.
6012 auto *ResultAndOverflow = Builder.CreateCall(
6013 Callee: (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, Args: {LHS, RHS});
6014 OffsetOverflows = Builder.CreateOr(
6015 LHS: Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), RHS: OffsetOverflows);
6016 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
6017 };
6018
6019 // Determine the total byte offset by looking at each GEP operand.
6020 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6021 GTI != GTE; ++GTI) {
6022 llvm::Value *LocalOffset;
6023 auto *Index = GTI.getOperand();
6024 // Compute the local offset contributed by this indexing step:
6025 if (auto *STy = GTI.getStructTypeOrNull()) {
6026 // For struct indexing, the local offset is the byte position of the
6027 // specified field.
6028 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
6029 LocalOffset = llvm::ConstantInt::get(
6030 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
6031 } else {
6032 // Otherwise this is array-like indexing. The local offset is the index
6033 // multiplied by the element size.
6034 auto *ElementSize =
6035 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
6036 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
6037 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6038 }
6039
6040 // If this is the first offset, set it as the total offset. Otherwise, add
6041 // the local offset into the running total.
6042 if (!TotalOffset || TotalOffset == Zero)
6043 TotalOffset = LocalOffset;
6044 else
6045 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6046 }
6047
6048 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
6049}
6050
6051Value *
6052CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6053 ArrayRef<Value *> IdxList,
6054 bool SignedIndices, bool IsSubtraction,
6055 SourceLocation Loc, const Twine &Name) {
6056 llvm::Type *PtrTy = Ptr->getType();
6057
6058 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6059 if (!SignedIndices && !IsSubtraction)
6060 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6061
6062 Value *GEPVal = Builder.CreateGEP(Ty: ElemTy, Ptr, IdxList, Name, NW: NWFlags);
6063
6064 // If the pointer overflow sanitizer isn't enabled, do nothing.
6065 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
6066 return GEPVal;
6067
6068 // Perform nullptr-and-offset check unless the nullptr is defined.
6069 bool PerformNullCheck = !NullPointerIsDefined(
6070 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
6071 // Check for overflows unless the GEP got constant-folded,
6072 // and only in the default address space
6073 bool PerformOverflowCheck =
6074 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6075
6076 if (!(PerformNullCheck || PerformOverflowCheck))
6077 return GEPVal;
6078
6079 const auto &DL = CGM.getDataLayout();
6080
6081 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6082 auto CheckHandler = SanitizerHandler::PointerOverflow;
6083 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6084 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6085
6086 GEPOffsetAndOverflow EvaluatedGEP =
6087 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
6088
6089 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6090 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6091 "If the offset got constant-folded, we don't expect that there was an "
6092 "overflow.");
6093
6094 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
6095
6096 // Common case: if the total offset is zero, don't emit a check.
6097 if (EvaluatedGEP.TotalOffset == Zero)
6098 return GEPVal;
6099
6100 // Now that we've computed the total offset, add it to the base pointer (with
6101 // wrapping semantics).
6102 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
6103 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
6104
6105 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6106 2>
6107 Checks;
6108
6109 if (PerformNullCheck) {
6110 // If the base pointer evaluates to a null pointer value,
6111 // the only valid pointer this inbounds GEP can produce is also
6112 // a null pointer, so the offset must also evaluate to zero.
6113 // Likewise, if we have non-zero base pointer, we can not get null pointer
6114 // as a result, so the offset can not be -intptr_t(BasePtr).
6115 // In other words, both pointers are either null, or both are non-null,
6116 // or the behaviour is undefined.
6117 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
6118 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
6119 auto *Valid = Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
6120 Checks.emplace_back(Args&: Valid, Args&: CheckOrdinal);
6121 }
6122
6123 if (PerformOverflowCheck) {
6124 // The GEP is valid if:
6125 // 1) The total offset doesn't overflow, and
6126 // 2) The sign of the difference between the computed address and the base
6127 // pointer matches the sign of the total offset.
6128 llvm::Value *ValidGEP;
6129 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
6130 if (SignedIndices) {
6131 // GEP is computed as `unsigned base + signed offset`, therefore:
6132 // * If offset was positive, then the computed pointer can not be
6133 // [unsigned] less than the base pointer, unless it overflowed.
6134 // * If offset was negative, then the computed pointer can not be
6135 // [unsigned] greater than the bas pointere, unless it overflowed.
6136 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6137 auto *PosOrZeroOffset =
6138 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
6139 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
6140 ValidGEP =
6141 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
6142 } else if (!IsSubtraction) {
6143 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6144 // computed pointer can not be [unsigned] less than base pointer,
6145 // unless there was an overflow.
6146 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6147 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
6148 } else {
6149 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6150 // computed pointer can not be [unsigned] greater than base pointer,
6151 // unless there was an overflow.
6152 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6153 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
6154 }
6155 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
6156 Checks.emplace_back(Args&: ValidGEP, Args&: CheckOrdinal);
6157 }
6158
6159 assert(!Checks.empty() && "Should have produced some checks.");
6160
6161 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6162 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6163 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6164 EmitCheck(Checked: Checks, Check: CheckHandler, StaticArgs, DynamicArgs);
6165
6166 return GEPVal;
6167}
6168
6169Address CodeGenFunction::EmitCheckedInBoundsGEP(
6170 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6171 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6172 const Twine &Name) {
6173 if (!SanOpts.has(K: SanitizerKind::PointerOverflow)) {
6174 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6175 if (!SignedIndices && !IsSubtraction)
6176 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6177
6178 return Builder.CreateGEP(Addr, IdxList, ElementType: elementType, Align, Name, NW: NWFlags);
6179 }
6180
6181 return RawAddress(
6182 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
6183 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6184 elementType, Align);
6185}
6186