1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CGRecordLayout.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "TargetInfo.h"
23#include "clang/AST/ASTContext.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/AST/Expr.h"
27#include "clang/AST/RecordLayout.h"
28#include "clang/AST/StmtVisitor.h"
29#include "clang/Basic/CodeGenOptions.h"
30#include "clang/Basic/TargetInfo.h"
31#include "llvm/ADT/APFixedPoint.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/FixedPointBuilder.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/Support/TypeSize.h"
45#include <cstdarg>
46#include <optional>
47
48using namespace clang;
49using namespace CodeGen;
50using llvm::Value;
51
52//===----------------------------------------------------------------------===//
53// Scalar Expression Emitter
54//===----------------------------------------------------------------------===//
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60namespace {
61
62/// Determine whether the given binary operation may overflow.
63/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65/// the returned overflow check is precise. The returned value is 'true' for
66/// all other opcodes, to be conservative.
67bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
68 BinaryOperator::Opcode Opcode, bool Signed,
69 llvm::APInt &Result) {
70 // Assume overflow is possible, unless we can prove otherwise.
71 bool Overflow = true;
72 const auto &LHSAP = LHS->getValue();
73 const auto &RHSAP = RHS->getValue();
74 if (Opcode == BO_Add) {
75 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
76 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
77 } else if (Opcode == BO_Sub) {
78 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
79 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
80 } else if (Opcode == BO_Mul) {
81 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
82 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
83 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
84 if (Signed && !RHS->isZero())
85 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
86 else
87 return false;
88 }
89 return Overflow;
90}
91
92struct BinOpInfo {
93 Value *LHS;
94 Value *RHS;
95 QualType Ty; // Computation Type.
96 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
97 FPOptions FPFeatures;
98 const Expr *E; // Entire expr, for error unsupported. May not be binop.
99
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
104 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
105 if (!LHSCI || !RHSCI)
106 return true;
107
108 llvm::APInt Result;
109 return ::mayHaveIntegerOverflow(
110 LHS: LHSCI, RHS: RHSCI, Opcode, Signed: Ty->hasSignedIntegerRepresentation(), Result);
111 }
112
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
116 Opcode == BO_RemAssign;
117 }
118
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
121 if (isDivremOp())
122 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
123 return CI->isZero();
124 return true;
125 }
126
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
129 if (isDivremOp())
130 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
131 return CFP->isZero();
132 return true;
133 }
134
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
140 // an int.
141 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
142 QualType LHSType = BinOp->getLHS()->getType();
143 QualType RHSType = BinOp->getRHS()->getType();
144 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
145 }
146 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
147 return UnOp->getSubExpr()->getType()->isFixedPointType();
148 return false;
149 }
150
151 /// Check if the RHS has a signed integer representation.
152 bool rhsHasSignedIntegerRepresentation() const {
153 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
154 QualType RHSType = BinOp->getRHS()->getType();
155 return RHSType->hasSignedIntegerRepresentation();
156 }
157 return false;
158 }
159};
160
161static bool MustVisitNullValue(const Expr *E) {
162 // If a null pointer expression's type is the C++0x nullptr_t, then
163 // it's not necessarily a simple constant and it must be evaluated
164 // for its potential side effects.
165 return E->getType()->isNullPtrType();
166}
167
168/// If \p E is a widened promoted integer, get its base (unpromoted) type.
169static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
170 const Expr *E) {
171 const Expr *Base = E->IgnoreImpCasts();
172 if (E == Base)
173 return std::nullopt;
174
175 QualType BaseTy = Base->getType();
176 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
177 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
178 return std::nullopt;
179
180 return BaseTy;
181}
182
183/// Check if \p E is a widened promoted integer.
184static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
185 return getUnwidenedIntegerType(Ctx, E).has_value();
186}
187
188/// Check if we can skip the overflow check for \p Op.
189static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
190 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
191 "Expected a unary or binary operator");
192
193 // If the binop has constant inputs and we can prove there is no overflow,
194 // we can elide the overflow check.
195 if (!Op.mayHaveIntegerOverflow())
196 return true;
197
198 // If a unary op has a widened operand, the op cannot overflow.
199 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Op.E))
200 return !UO->canOverflow();
201
202 // We usually don't need overflow checks for binops with widened operands.
203 // Multiplication with promoted unsigned operands is a special case.
204 const auto *BO = cast<BinaryOperator>(Val: Op.E);
205 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, E: BO->getLHS());
206 if (!OptionalLHSTy)
207 return false;
208
209 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, E: BO->getRHS());
210 if (!OptionalRHSTy)
211 return false;
212
213 QualType LHSTy = *OptionalLHSTy;
214 QualType RHSTy = *OptionalRHSTy;
215
216 // This is the simple case: binops without unsigned multiplication, and with
217 // widened operands. No overflow check is needed here.
218 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
219 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
220 return true;
221
222 // For unsigned multiplication the overflow check can be elided if either one
223 // of the unpromoted types are less than half the size of the promoted type.
224 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
225 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
226 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
227}
228
229class ScalarExprEmitter
230 : public StmtVisitor<ScalarExprEmitter, Value*> {
231 CodeGenFunction &CGF;
232 CGBuilderTy &Builder;
233 bool IgnoreResultAssign;
234 llvm::LLVMContext &VMContext;
235public:
236
237 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
238 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
239 VMContext(cgf.getLLVMContext()) {
240 }
241
242 //===--------------------------------------------------------------------===//
243 // Utilities
244 //===--------------------------------------------------------------------===//
245
246 bool TestAndClearIgnoreResultAssign() {
247 bool I = IgnoreResultAssign;
248 IgnoreResultAssign = false;
249 return I;
250 }
251
252 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
253 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
254 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
255 return CGF.EmitCheckedLValue(E, TCK);
256 }
257
258 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
259 const BinOpInfo &Info);
260
261 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
262 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
263 }
264
265 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
266 const AlignValueAttr *AVAttr = nullptr;
267 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
268 const ValueDecl *VD = DRE->getDecl();
269
270 if (VD->getType()->isReferenceType()) {
271 if (const auto *TTy =
272 VD->getType().getNonReferenceType()->getAs<TypedefType>())
273 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
274 } else {
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
281 return;
282
283 AVAttr = VD->getAttr<AlignValueAttr>();
284 }
285 }
286
287 if (!AVAttr)
288 if (const auto *TTy = E->getType()->getAs<TypedefType>())
289 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
290
291 if (!AVAttr)
292 return;
293
294 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
295 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
296 CGF.emitAlignmentAssumption(PtrValue: V, E, AssumptionLoc: AVAttr->getLocation(), Alignment: AlignmentCI);
297 }
298
299 /// EmitLoadOfLValue - Given an expression with complex type that represents a
300 /// value l-value, this method emits the address of the l-value, then loads
301 /// and returns the result.
302 Value *EmitLoadOfLValue(const Expr *E) {
303 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
304 Loc: E->getExprLoc());
305
306 EmitLValueAlignmentAssumption(E, V);
307 return V;
308 }
309
310 /// EmitConversionToBool - Convert the specified expression value to a
311 /// boolean (i1) truth value. This is equivalent to "Val != 0".
312 Value *EmitConversionToBool(Value *Src, QualType DstTy);
313
314 /// Emit a check that a conversion from a floating-point type does not
315 /// overflow.
316 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
317 Value *Src, QualType SrcType, QualType DstType,
318 llvm::Type *DstTy, SourceLocation Loc);
319
320 /// Known implicit conversion check kinds.
321 /// This is used for bitfield conversion checks as well.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind : unsigned char {
324 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation = 1,
326 ICCK_SignedIntegerTruncation = 2,
327 ICCK_IntegerSignChange = 3,
328 ICCK_SignedIntegerTruncationOrSignChange = 4,
329 };
330
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334 QualType DstType, SourceLocation Loc);
335
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340 QualType DstType, SourceLocation Loc);
341
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts {
345 bool TreatBooleanAsSigned;
346 bool EmitImplicitIntegerTruncationChecks;
347 bool EmitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet SanOpts)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
362 llvm::Type *SrcTy, llvm::Type *DstTy,
363 ScalarConversionOpts Opts);
364 Value *
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
366 SourceLocation Loc,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
368
369 /// Convert between either a fixed point and other fixed point or fixed point
370 /// and an integer.
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
372 SourceLocation Loc);
373
374 /// Emit a conversion from the specified complex type to the specified
375 /// destination type, where the destination type is an LLVM scalar type.
376 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
377 QualType SrcTy, QualType DstTy,
378 SourceLocation Loc);
379
380 /// EmitNullValue - Emit a value that corresponds to null for the given type.
381 Value *EmitNullValue(QualType Ty);
382
383 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
384 Value *EmitFloatToBoolConversion(Value *V) {
385 // Compare against 0.0 for fp scalars.
386 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
387 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
388 }
389
390 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
391 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
392 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
393
394 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
395 }
396
397 Value *EmitIntToBoolConversion(Value *V) {
398 // Because of the type rules of C, we often end up computing a
399 // logical value, then zero extending it to int, then wanting it
400 // as a logical value again. Optimize this common case.
401 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
402 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
403 Value *Result = ZI->getOperand(i_nocapture: 0);
404 // If there aren't any more uses, zap the instruction to save space.
405 // Note that there can be more uses, for example if this
406 // is the result of an assignment.
407 if (ZI->use_empty())
408 ZI->eraseFromParent();
409 return Result;
410 }
411 }
412
413 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
414 }
415
416 //===--------------------------------------------------------------------===//
417 // Visitor Methods
418 //===--------------------------------------------------------------------===//
419
420 Value *Visit(Expr *E) {
421 ApplyDebugLocation DL(CGF, E);
422 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(S: E);
423 }
424
425 Value *VisitStmt(Stmt *S) {
426 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
427 llvm_unreachable("Stmt can't have complex result type!");
428 }
429 Value *VisitExpr(Expr *S);
430
431 Value *VisitConstantExpr(ConstantExpr *E) {
432 // A constant expression of type 'void' generates no code and produces no
433 // value.
434 if (E->getType()->isVoidType())
435 return nullptr;
436
437 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
438 if (E->isGLValue())
439 return CGF.EmitLoadOfScalar(
440 Addr: Address(Result, CGF.convertTypeForLoadStore(ASTTy: E->getType()),
441 CGF.getContext().getTypeAlignInChars(T: E->getType())),
442 /*Volatile*/ false, Ty: E->getType(), Loc: E->getExprLoc());
443 return Result;
444 }
445 return Visit(E: E->getSubExpr());
446 }
447 Value *VisitParenExpr(ParenExpr *PE) {
448 return Visit(E: PE->getSubExpr());
449 }
450 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
451 return Visit(E: E->getReplacement());
452 }
453 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
454 return Visit(E: GE->getResultExpr());
455 }
456 Value *VisitCoawaitExpr(CoawaitExpr *S) {
457 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
458 }
459 Value *VisitCoyieldExpr(CoyieldExpr *S) {
460 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
461 }
462 Value *VisitUnaryCoawait(const UnaryOperator *E) {
463 return Visit(E: E->getSubExpr());
464 }
465
466 // Leaves.
467 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
468 return Builder.getInt(AI: E->getValue());
469 }
470 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
471 return Builder.getInt(AI: E->getValue());
472 }
473 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
474 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
475 }
476 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
477 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
478 }
479 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
480 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
481 }
482 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
483 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
484 }
485 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
486 if (E->getType()->isVoidType())
487 return nullptr;
488
489 return EmitNullValue(Ty: E->getType());
490 }
491 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
492 return EmitNullValue(Ty: E->getType());
493 }
494 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
495 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
496 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
497 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
498 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
499 }
500
501 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
502 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()),V: E->getPackLength());
503 }
504
505 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
506 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
507 }
508
509 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
510 Value *VisitEmbedExpr(EmbedExpr *E);
511
512 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
513 if (E->isGLValue())
514 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
515 Loc: E->getExprLoc());
516
517 // Otherwise, assume the mapping is the scalar directly.
518 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
519 }
520
521 // l-values.
522 Value *VisitDeclRefExpr(DeclRefExpr *E) {
523 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(refExpr: E))
524 return CGF.emitScalarConstant(Constant, E);
525 return EmitLoadOfLValue(E);
526 }
527
528 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
529 return CGF.EmitObjCSelectorExpr(E);
530 }
531 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
532 return CGF.EmitObjCProtocolExpr(E);
533 }
534 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
535 return EmitLoadOfLValue(E);
536 }
537 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
538 if (E->getMethodDecl() &&
539 E->getMethodDecl()->getReturnType()->isReferenceType())
540 return EmitLoadOfLValue(E);
541 return CGF.EmitObjCMessageExpr(E).getScalarVal();
542 }
543
544 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
545 LValue LV = CGF.EmitObjCIsaExpr(E);
546 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
547 return V;
548 }
549
550 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
551 VersionTuple Version = E->getVersion();
552
553 // If we're checking for a platform older than our minimum deployment
554 // target, we can fold the check away.
555 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
556 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
557
558 return CGF.EmitBuiltinAvailable(Version);
559 }
560
561 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
562 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
563 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
564 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
565 Value *VisitMemberExpr(MemberExpr *E);
566 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
567 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
568 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
569 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
570 // literals aren't l-values in C++. We do so simply because that's the
571 // cleanest way to handle compound literals in C++.
572 // See the discussion here: https://reviews.llvm.org/D64464
573 return EmitLoadOfLValue(E);
574 }
575
576 Value *VisitInitListExpr(InitListExpr *E);
577
578 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
579 assert(CGF.getArrayInitIndex() &&
580 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
581 return CGF.getArrayInitIndex();
582 }
583
584 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
585 return EmitNullValue(Ty: E->getType());
586 }
587 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
588 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
589 return VisitCastExpr(E);
590 }
591 Value *VisitCastExpr(CastExpr *E);
592
593 Value *VisitCallExpr(const CallExpr *E) {
594 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
595 return EmitLoadOfLValue(E);
596
597 Value *V = CGF.EmitCallExpr(E).getScalarVal();
598
599 EmitLValueAlignmentAssumption(E, V);
600 return V;
601 }
602
603 Value *VisitStmtExpr(const StmtExpr *E);
604
605 // Unary Operators.
606 Value *VisitUnaryPostDec(const UnaryOperator *E) {
607 LValue LV = EmitLValue(E: E->getSubExpr());
608 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
609 }
610 Value *VisitUnaryPostInc(const UnaryOperator *E) {
611 LValue LV = EmitLValue(E: E->getSubExpr());
612 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
613 }
614 Value *VisitUnaryPreDec(const UnaryOperator *E) {
615 LValue LV = EmitLValue(E: E->getSubExpr());
616 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
617 }
618 Value *VisitUnaryPreInc(const UnaryOperator *E) {
619 LValue LV = EmitLValue(E: E->getSubExpr());
620 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
621 }
622
623 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
624 llvm::Value *InVal,
625 bool IsInc);
626
627 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
628 bool isInc, bool isPre);
629
630
631 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
632 if (isa<MemberPointerType>(Val: E->getType())) // never sugared
633 return CGF.CGM.getMemberPointerConstant(e: E);
634
635 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
636 }
637 Value *VisitUnaryDeref(const UnaryOperator *E) {
638 if (E->getType()->isVoidType())
639 return Visit(E: E->getSubExpr()); // the actual value should be unused
640 return EmitLoadOfLValue(E);
641 }
642
643 Value *VisitUnaryPlus(const UnaryOperator *E,
644 QualType PromotionType = QualType());
645 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
646 Value *VisitUnaryMinus(const UnaryOperator *E,
647 QualType PromotionType = QualType());
648 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
649
650 Value *VisitUnaryNot (const UnaryOperator *E);
651 Value *VisitUnaryLNot (const UnaryOperator *E);
652 Value *VisitUnaryReal(const UnaryOperator *E,
653 QualType PromotionType = QualType());
654 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
655 Value *VisitUnaryImag(const UnaryOperator *E,
656 QualType PromotionType = QualType());
657 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
658 Value *VisitUnaryExtension(const UnaryOperator *E) {
659 return Visit(E: E->getSubExpr());
660 }
661
662 // C++
663 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
664 return EmitLoadOfLValue(E);
665 }
666 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
667 auto &Ctx = CGF.getContext();
668 APValue Evaluated =
669 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
670 return ConstantEmitter(CGF).emitAbstract(loc: SLE->getLocation(), value: Evaluated,
671 T: SLE->getType());
672 }
673
674 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
675 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
676 return Visit(E: DAE->getExpr());
677 }
678 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
679 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
680 return Visit(E: DIE->getExpr());
681 }
682 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
683 return CGF.LoadCXXThis();
684 }
685
686 Value *VisitExprWithCleanups(ExprWithCleanups *E);
687 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
688 return CGF.EmitCXXNewExpr(E);
689 }
690 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
691 CGF.EmitCXXDeleteExpr(E);
692 return nullptr;
693 }
694
695 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
696 return llvm::ConstantInt::get(Ty: ConvertType(T: E->getType()), V: E->getValue());
697 }
698
699 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
700 return Builder.getInt1(V: E->isSatisfied());
701 }
702
703 Value *VisitRequiresExpr(const RequiresExpr *E) {
704 return Builder.getInt1(V: E->isSatisfied());
705 }
706
707 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
708 return llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: E->getValue());
709 }
710
711 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
712 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
713 }
714
715 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
716 // C++ [expr.pseudo]p1:
717 // The result shall only be used as the operand for the function call
718 // operator (), and the result of such a call has type void. The only
719 // effect is the evaluation of the postfix-expression before the dot or
720 // arrow.
721 CGF.EmitScalarExpr(E: E->getBase());
722 return nullptr;
723 }
724
725 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
726 return EmitNullValue(Ty: E->getType());
727 }
728
729 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
730 CGF.EmitCXXThrowExpr(E);
731 return nullptr;
732 }
733
734 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
735 return Builder.getInt1(V: E->getValue());
736 }
737
738 // Binary Operators.
739 Value *EmitMul(const BinOpInfo &Ops) {
740 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
741 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
742 case LangOptions::SOB_Defined:
743 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
744 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
745 [[fallthrough]];
746 case LangOptions::SOB_Undefined:
747 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
748 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
749 [[fallthrough]];
750 case LangOptions::SOB_Trapping:
751 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
752 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
753 return EmitOverflowCheckedBinOp(Ops);
754 }
755 }
756
757 if (Ops.Ty->isConstantMatrixType()) {
758 llvm::MatrixBuilder MB(Builder);
759 // We need to check the types of the operands of the operator to get the
760 // correct matrix dimensions.
761 auto *BO = cast<BinaryOperator>(Val: Ops.E);
762 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
763 Val: BO->getLHS()->getType().getCanonicalType());
764 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
765 Val: BO->getRHS()->getType().getCanonicalType());
766 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
767 if (LHSMatTy && RHSMatTy)
768 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
769 LHSColumns: LHSMatTy->getNumColumns(),
770 RHSColumns: RHSMatTy->getNumColumns());
771 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
772 }
773
774 if (Ops.Ty->isUnsignedIntegerType() &&
775 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
776 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
777 return EmitOverflowCheckedBinOp(Ops);
778
779 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
780 // Preserve the old values
781 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
782 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
783 }
784 if (Ops.isFixedPointOp())
785 return EmitFixedPointBinOp(Ops);
786 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
787 }
788 /// Create a binary op that checks for overflow.
789 /// Currently only supports +, - and *.
790 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
791
792 // Check for undefined division and modulus behaviors.
793 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
794 llvm::Value *Zero,bool isDiv);
795 // Common helper for getting how wide LHS of shift is.
796 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
797
798 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
799 // non powers of two.
800 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
801
802 Value *EmitDiv(const BinOpInfo &Ops);
803 Value *EmitRem(const BinOpInfo &Ops);
804 Value *EmitAdd(const BinOpInfo &Ops);
805 Value *EmitSub(const BinOpInfo &Ops);
806 Value *EmitShl(const BinOpInfo &Ops);
807 Value *EmitShr(const BinOpInfo &Ops);
808 Value *EmitAnd(const BinOpInfo &Ops) {
809 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
810 }
811 Value *EmitXor(const BinOpInfo &Ops) {
812 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
813 }
814 Value *EmitOr (const BinOpInfo &Ops) {
815 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
816 }
817
818 // Helper functions for fixed point binary operations.
819 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
820
821 BinOpInfo EmitBinOps(const BinaryOperator *E,
822 QualType PromotionTy = QualType());
823
824 Value *EmitPromotedValue(Value *result, QualType PromotionType);
825 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
826 Value *EmitPromoted(const Expr *E, QualType PromotionType);
827
828 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
829 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
830 Value *&Result);
831
832 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
833 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
834
835 QualType getPromotionType(QualType Ty) {
836 const auto &Ctx = CGF.getContext();
837 if (auto *CT = Ty->getAs<ComplexType>()) {
838 QualType ElementType = CT->getElementType();
839 if (ElementType.UseExcessPrecision(Ctx))
840 return Ctx.getComplexType(T: Ctx.FloatTy);
841 }
842
843 if (Ty.UseExcessPrecision(Ctx)) {
844 if (auto *VT = Ty->getAs<VectorType>()) {
845 unsigned NumElements = VT->getNumElements();
846 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
847 }
848 return Ctx.FloatTy;
849 }
850
851 return QualType();
852 }
853
854 // Binary operators and binary compound assignment operators.
855#define HANDLEBINOP(OP) \
856 Value *VisitBin##OP(const BinaryOperator *E) { \
857 QualType promotionTy = getPromotionType(E->getType()); \
858 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
859 if (result && !promotionTy.isNull()) \
860 result = EmitUnPromotedValue(result, E->getType()); \
861 return result; \
862 } \
863 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
864 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
865 }
866 HANDLEBINOP(Mul)
867 HANDLEBINOP(Div)
868 HANDLEBINOP(Rem)
869 HANDLEBINOP(Add)
870 HANDLEBINOP(Sub)
871 HANDLEBINOP(Shl)
872 HANDLEBINOP(Shr)
873 HANDLEBINOP(And)
874 HANDLEBINOP(Xor)
875 HANDLEBINOP(Or)
876#undef HANDLEBINOP
877
878 // Comparisons.
879 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
880 llvm::CmpInst::Predicate SICmpOpc,
881 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
882#define VISITCOMP(CODE, UI, SI, FP, SIG) \
883 Value *VisitBin##CODE(const BinaryOperator *E) { \
884 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
885 llvm::FCmpInst::FP, SIG); }
886 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
887 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
888 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
889 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
890 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
891 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
892#undef VISITCOMP
893
894 Value *VisitBinAssign (const BinaryOperator *E);
895
896 Value *VisitBinLAnd (const BinaryOperator *E);
897 Value *VisitBinLOr (const BinaryOperator *E);
898 Value *VisitBinComma (const BinaryOperator *E);
899
900 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
901 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
902
903 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
904 return Visit(E: E->getSemanticForm());
905 }
906
907 // Other Operators.
908 Value *VisitBlockExpr(const BlockExpr *BE);
909 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
910 Value *VisitChooseExpr(ChooseExpr *CE);
911 Value *VisitVAArgExpr(VAArgExpr *VE);
912 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
913 return CGF.EmitObjCStringLiteral(E);
914 }
915 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
916 return CGF.EmitObjCBoxedExpr(E);
917 }
918 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
919 return CGF.EmitObjCArrayLiteral(E);
920 }
921 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
922 return CGF.EmitObjCDictionaryLiteral(E);
923 }
924 Value *VisitAsTypeExpr(AsTypeExpr *CE);
925 Value *VisitAtomicExpr(AtomicExpr *AE);
926 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
927 return Visit(E: E->getSelectedExpr());
928 }
929};
930} // end anonymous namespace.
931
932//===----------------------------------------------------------------------===//
933// Utilities
934//===----------------------------------------------------------------------===//
935
936/// EmitConversionToBool - Convert the specified expression value to a
937/// boolean (i1) truth value. This is equivalent to "Val != 0".
938Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
939 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
940
941 if (SrcType->isRealFloatingType())
942 return EmitFloatToBoolConversion(V: Src);
943
944 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
945 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
946
947 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
948 "Unknown scalar type to convert");
949
950 if (isa<llvm::IntegerType>(Val: Src->getType()))
951 return EmitIntToBoolConversion(V: Src);
952
953 assert(isa<llvm::PointerType>(Src->getType()));
954 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
955}
956
957void ScalarExprEmitter::EmitFloatConversionCheck(
958 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
959 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
960 assert(SrcType->isFloatingType() && "not a conversion from floating point");
961 if (!isa<llvm::IntegerType>(Val: DstTy))
962 return;
963
964 CodeGenFunction::SanitizerScope SanScope(&CGF);
965 using llvm::APFloat;
966 using llvm::APSInt;
967
968 llvm::Value *Check = nullptr;
969 const llvm::fltSemantics &SrcSema =
970 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
971
972 // Floating-point to integer. This has undefined behavior if the source is
973 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
974 // to an integer).
975 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
976 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
977
978 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
979 APFloat MinSrc(SrcSema, APFloat::uninitialized);
980 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
981 APFloat::opOverflow)
982 // Don't need an overflow check for lower bound. Just check for
983 // -Inf/NaN.
984 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
985 else
986 // Find the largest value which is too small to represent (before
987 // truncation toward zero).
988 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
989
990 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
991 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
992 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
993 APFloat::opOverflow)
994 // Don't need an overflow check for upper bound. Just check for
995 // +Inf/NaN.
996 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
997 else
998 // Find the smallest value which is too large to represent (before
999 // truncation toward zero).
1000 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
1001
1002 // If we're converting from __half, convert the range to float to match
1003 // the type of src.
1004 if (OrigSrcType->isHalfType()) {
1005 const llvm::fltSemantics &Sema =
1006 CGF.getContext().getFloatTypeSemantics(T: SrcType);
1007 bool IsInexact;
1008 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1009 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
1010 }
1011
1012 llvm::Value *GE =
1013 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
1014 llvm::Value *LE =
1015 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
1016 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
1017
1018 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1019 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1020 CGF.EmitCheckTypeDescriptor(T: DstType)};
1021 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::FloatCastOverflow),
1022 Check: SanitizerHandler::FloatCastOverflow, StaticArgs, DynamicArgs: OrigSrc);
1023}
1024
1025// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1026// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1027static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1028 std::pair<llvm::Value *, SanitizerMask>>
1029EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1030 QualType DstType, CGBuilderTy &Builder) {
1031 llvm::Type *SrcTy = Src->getType();
1032 llvm::Type *DstTy = Dst->getType();
1033 (void)DstTy; // Only used in assert()
1034
1035 // This should be truncation of integral types.
1036 assert(Src != Dst);
1037 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1038 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1039 "non-integer llvm type");
1040
1041 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1042 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1043
1044 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1045 // Else, it is a signed truncation.
1046 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1047 SanitizerMask Mask;
1048 if (!SrcSigned && !DstSigned) {
1049 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1050 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1051 } else {
1052 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1053 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1054 }
1055
1056 llvm::Value *Check = nullptr;
1057 // 1. Extend the truncated value back to the same width as the Src.
1058 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1059 // 2. Equality-compare with the original source value
1060 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1061 // If the comparison result is 'i1 false', then the truncation was lossy.
1062 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Mask));
1063}
1064
1065static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1066 QualType SrcType, QualType DstType) {
1067 return SrcType->isIntegerType() && DstType->isIntegerType();
1068}
1069
1070void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1071 Value *Dst, QualType DstType,
1072 SourceLocation Loc) {
1073 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1074 return;
1075
1076 // We only care about int->int conversions here.
1077 // We ignore conversions to/from pointer and/or bool.
1078 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1079 DstType))
1080 return;
1081
1082 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1083 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1084 // This must be truncation. Else we do not care.
1085 if (SrcBits <= DstBits)
1086 return;
1087
1088 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1089
1090 // If the integer sign change sanitizer is enabled,
1091 // and we are truncating from larger unsigned type to smaller signed type,
1092 // let that next sanitizer deal with it.
1093 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1094 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1095 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1096 (!SrcSigned && DstSigned))
1097 return;
1098
1099 CodeGenFunction::SanitizerScope SanScope(&CGF);
1100
1101 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1102 std::pair<llvm::Value *, SanitizerMask>>
1103 Check =
1104 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1105 // If the comparison result is 'i1 false', then the truncation was lossy.
1106
1107 // Do we care about this type of truncation?
1108 if (!CGF.SanOpts.has(K: Check.second.second))
1109 return;
1110
1111 llvm::Constant *StaticArgs[] = {
1112 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1113 CGF.EmitCheckTypeDescriptor(T: DstType),
1114 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first),
1115 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1116
1117 CGF.EmitCheck(Checked: Check.second, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1118 DynamicArgs: {Src, Dst});
1119}
1120
1121static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1122 const char *Name,
1123 CGBuilderTy &Builder) {
1124 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1125 llvm::Type *VTy = V->getType();
1126 if (!VSigned) {
1127 // If the value is unsigned, then it is never negative.
1128 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1129 }
1130 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1131 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1132 Name: llvm::Twine(Name) + "." + V->getName() +
1133 ".negativitycheck");
1134}
1135
1136// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1137// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1138static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1139 std::pair<llvm::Value *, SanitizerMask>>
1140EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1141 QualType DstType, CGBuilderTy &Builder) {
1142 llvm::Type *SrcTy = Src->getType();
1143 llvm::Type *DstTy = Dst->getType();
1144
1145 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1146 "non-integer llvm type");
1147
1148 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1149 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1150 (void)SrcSigned; // Only used in assert()
1151 (void)DstSigned; // Only used in assert()
1152 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1153 unsigned DstBits = DstTy->getScalarSizeInBits();
1154 (void)SrcBits; // Only used in assert()
1155 (void)DstBits; // Only used in assert()
1156
1157 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1158 "either the widths should be different, or the signednesses.");
1159
1160 // 1. Was the old Value negative?
1161 llvm::Value *SrcIsNegative =
1162 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "src", Builder);
1163 // 2. Is the new Value negative?
1164 llvm::Value *DstIsNegative =
1165 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "dst", Builder);
1166 // 3. Now, was the 'negativity status' preserved during the conversion?
1167 // NOTE: conversion from negative to zero is considered to change the sign.
1168 // (We want to get 'false' when the conversion changed the sign)
1169 // So we should just equality-compare the negativity statuses.
1170 llvm::Value *Check = nullptr;
1171 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1172 // If the comparison result is 'false', then the conversion changed the sign.
1173 return std::make_pair(
1174 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1175 y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitIntegerSignChange));
1176}
1177
1178void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1179 Value *Dst, QualType DstType,
1180 SourceLocation Loc) {
1181 if (!CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange))
1182 return;
1183
1184 llvm::Type *SrcTy = Src->getType();
1185 llvm::Type *DstTy = Dst->getType();
1186
1187 // We only care about int->int conversions here.
1188 // We ignore conversions to/from pointer and/or bool.
1189 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1190 DstType))
1191 return;
1192
1193 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1194 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1195 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1196 unsigned DstBits = DstTy->getScalarSizeInBits();
1197
1198 // Now, we do not need to emit the check in *all* of the cases.
1199 // We can avoid emitting it in some obvious cases where it would have been
1200 // dropped by the opt passes (instcombine) always anyways.
1201 // If it's a cast between effectively the same type, no check.
1202 // NOTE: this is *not* equivalent to checking the canonical types.
1203 if (SrcSigned == DstSigned && SrcBits == DstBits)
1204 return;
1205 // At least one of the values needs to have signed type.
1206 // If both are unsigned, then obviously, neither of them can be negative.
1207 if (!SrcSigned && !DstSigned)
1208 return;
1209 // If the conversion is to *larger* *signed* type, then no check is needed.
1210 // Because either sign-extension happens (so the sign will remain),
1211 // or zero-extension will happen (the sign bit will be zero.)
1212 if ((DstBits > SrcBits) && DstSigned)
1213 return;
1214 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1215 (SrcBits > DstBits) && SrcSigned) {
1216 // If the signed integer truncation sanitizer is enabled,
1217 // and this is a truncation from signed type, then no check is needed.
1218 // Because here sign change check is interchangeable with truncation check.
1219 return;
1220 }
1221 // That's it. We can't rule out any more cases with the data we have.
1222
1223 CodeGenFunction::SanitizerScope SanScope(&CGF);
1224
1225 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1226 std::pair<llvm::Value *, SanitizerMask>>
1227 Check;
1228
1229 // Each of these checks needs to return 'false' when an issue was detected.
1230 ImplicitConversionCheckKind CheckKind;
1231 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1232 // So we can 'and' all the checks together, and still get 'false',
1233 // if at least one of the checks detected an issue.
1234
1235 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1236 CheckKind = Check.first;
1237 Checks.emplace_back(Args&: Check.second);
1238
1239 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1240 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1241 // If the signed integer truncation sanitizer was enabled,
1242 // and we are truncating from larger unsigned type to smaller signed type,
1243 // let's handle the case we skipped in that check.
1244 Check =
1245 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1246 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1247 Checks.emplace_back(Args&: Check.second);
1248 // If the comparison result is 'i1 false', then the truncation was lossy.
1249 }
1250
1251 llvm::Constant *StaticArgs[] = {
1252 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1253 CGF.EmitCheckTypeDescriptor(T: DstType),
1254 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1255 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0)};
1256 // EmitCheck() will 'and' all the checks together.
1257 CGF.EmitCheck(Checked: Checks, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1258 DynamicArgs: {Src, Dst});
1259}
1260
1261// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1262// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1263static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1264 std::pair<llvm::Value *, SanitizerMask>>
1265EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1266 QualType DstType, CGBuilderTy &Builder) {
1267 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1268 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1269
1270 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1271 if (!SrcSigned && !DstSigned)
1272 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1273 else
1274 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1275
1276 llvm::Value *Check = nullptr;
1277 // 1. Extend the truncated value back to the same width as the Src.
1278 Check = Builder.CreateIntCast(V: Dst, DestTy: Src->getType(), isSigned: DstSigned, Name: "bf.anyext");
1279 // 2. Equality-compare with the original source value
1280 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "bf.truncheck");
1281 // If the comparison result is 'i1 false', then the truncation was lossy.
1282
1283 return std::make_pair(
1284 x&: Kind, y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitBitfieldConversion));
1285}
1286
1287// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1288// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1289static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1290 std::pair<llvm::Value *, SanitizerMask>>
1291EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1292 QualType DstType, CGBuilderTy &Builder) {
1293 // 1. Was the old Value negative?
1294 llvm::Value *SrcIsNegative =
1295 EmitIsNegativeTestHelper(V: Src, VType: SrcType, Name: "bf.src", Builder);
1296 // 2. Is the new Value negative?
1297 llvm::Value *DstIsNegative =
1298 EmitIsNegativeTestHelper(V: Dst, VType: DstType, Name: "bf.dst", Builder);
1299 // 3. Now, was the 'negativity status' preserved during the conversion?
1300 // NOTE: conversion from negative to zero is considered to change the sign.
1301 // (We want to get 'false' when the conversion changed the sign)
1302 // So we should just equality-compare the negativity statuses.
1303 llvm::Value *Check = nullptr;
1304 Check =
1305 Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "bf.signchangecheck");
1306 // If the comparison result is 'false', then the conversion changed the sign.
1307 return std::make_pair(
1308 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1309 y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitBitfieldConversion));
1310}
1311
1312void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1313 Value *Dst, QualType DstType,
1314 const CGBitFieldInfo &Info,
1315 SourceLocation Loc) {
1316
1317 if (!SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion))
1318 return;
1319
1320 // We only care about int->int conversions here.
1321 // We ignore conversions to/from pointer and/or bool.
1322 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1323 DstType))
1324 return;
1325
1326 if (DstType->isBooleanType() || SrcType->isBooleanType())
1327 return;
1328
1329 // This should be truncation of integral types.
1330 assert(isa<llvm::IntegerType>(Src->getType()) &&
1331 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1332
1333 // TODO: Calculate src width to avoid emitting code
1334 // for unecessary cases.
1335 unsigned SrcBits = ConvertType(T: SrcType)->getScalarSizeInBits();
1336 unsigned DstBits = Info.Size;
1337
1338 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1339 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1340
1341 CodeGenFunction::SanitizerScope SanScope(this);
1342
1343 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1344 std::pair<llvm::Value *, SanitizerMask>>
1345 Check;
1346
1347 // Truncation
1348 bool EmitTruncation = DstBits < SrcBits;
1349 // If Dst is signed and Src unsigned, we want to be more specific
1350 // about the CheckKind we emit, in this case we want to emit
1351 // ICCK_SignedIntegerTruncationOrSignChange.
1352 bool EmitTruncationFromUnsignedToSigned =
1353 EmitTruncation && DstSigned && !SrcSigned;
1354 // Sign change
1355 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1356 bool BothUnsigned = !SrcSigned && !DstSigned;
1357 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1358 // We can avoid emitting sign change checks in some obvious cases
1359 // 1. If Src and Dst have the same signedness and size
1360 // 2. If both are unsigned sign check is unecessary!
1361 // 3. If Dst is signed and bigger than Src, either
1362 // sign-extension or zero-extension will make sure
1363 // the sign remains.
1364 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1365
1366 if (EmitTruncation)
1367 Check =
1368 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1369 else if (EmitSignChange) {
1370 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1371 "either the widths should be different, or the signednesses.");
1372 Check =
1373 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1374 } else
1375 return;
1376
1377 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1378 if (EmitTruncationFromUnsignedToSigned)
1379 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1380
1381 llvm::Constant *StaticArgs[] = {
1382 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: SrcType),
1383 EmitCheckTypeDescriptor(T: DstType),
1384 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind),
1385 llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: Info.Size)};
1386
1387 EmitCheck(Checked: Check.second, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1388 DynamicArgs: {Src, Dst});
1389}
1390
1391Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1392 QualType DstType, llvm::Type *SrcTy,
1393 llvm::Type *DstTy,
1394 ScalarConversionOpts Opts) {
1395 // The Element types determine the type of cast to perform.
1396 llvm::Type *SrcElementTy;
1397 llvm::Type *DstElementTy;
1398 QualType SrcElementType;
1399 QualType DstElementType;
1400 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1401 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1402 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1403 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1404 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1405 } else {
1406 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1407 "cannot cast between matrix and non-matrix types");
1408 SrcElementTy = SrcTy;
1409 DstElementTy = DstTy;
1410 SrcElementType = SrcType;
1411 DstElementType = DstType;
1412 }
1413
1414 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1415 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1416 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1417 InputSigned = true;
1418 }
1419
1420 if (isa<llvm::IntegerType>(Val: DstElementTy))
1421 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1422 if (InputSigned)
1423 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1424 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1425 }
1426
1427 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1428 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1429 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1430
1431 // If we can't recognize overflow as undefined behavior, assume that
1432 // overflow saturates. This protects against normal optimizations if we are
1433 // compiling with non-standard FP semantics.
1434 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1435 llvm::Intrinsic::ID IID =
1436 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1437 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1438 }
1439
1440 if (IsSigned)
1441 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1442 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1443 }
1444
1445 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1446 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1447 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1448}
1449
1450/// Emit a conversion from the specified type to the specified destination type,
1451/// both of which are LLVM scalar types.
1452Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1453 QualType DstType,
1454 SourceLocation Loc,
1455 ScalarConversionOpts Opts) {
1456 // All conversions involving fixed point types should be handled by the
1457 // EmitFixedPoint family functions. This is done to prevent bloating up this
1458 // function more, and although fixed point numbers are represented by
1459 // integers, we do not want to follow any logic that assumes they should be
1460 // treated as integers.
1461 // TODO(leonardchan): When necessary, add another if statement checking for
1462 // conversions to fixed point types from other types.
1463 if (SrcType->isFixedPointType()) {
1464 if (DstType->isBooleanType())
1465 // It is important that we check this before checking if the dest type is
1466 // an integer because booleans are technically integer types.
1467 // We do not need to check the padding bit on unsigned types if unsigned
1468 // padding is enabled because overflow into this bit is undefined
1469 // behavior.
1470 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1471 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1472 DstType->isRealFloatingType())
1473 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1474
1475 llvm_unreachable(
1476 "Unhandled scalar conversion from a fixed point type to another type.");
1477 } else if (DstType->isFixedPointType()) {
1478 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1479 // This also includes converting booleans and enums to fixed point types.
1480 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1481
1482 llvm_unreachable(
1483 "Unhandled scalar conversion to a fixed point type from another type.");
1484 }
1485
1486 QualType NoncanonicalSrcType = SrcType;
1487 QualType NoncanonicalDstType = DstType;
1488
1489 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1490 DstType = CGF.getContext().getCanonicalType(T: DstType);
1491 if (SrcType == DstType) return Src;
1492
1493 if (DstType->isVoidType()) return nullptr;
1494
1495 llvm::Value *OrigSrc = Src;
1496 QualType OrigSrcType = SrcType;
1497 llvm::Type *SrcTy = Src->getType();
1498
1499 // Handle conversions to bool first, they are special: comparisons against 0.
1500 if (DstType->isBooleanType())
1501 return EmitConversionToBool(Src, SrcType);
1502
1503 llvm::Type *DstTy = ConvertType(T: DstType);
1504
1505 // Cast from half through float if half isn't a native type.
1506 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1507 // Cast to FP using the intrinsic if the half type itself isn't supported.
1508 if (DstTy->isFloatingPointTy()) {
1509 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1510 return Builder.CreateCall(
1511 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16, Tys: DstTy),
1512 Args: Src);
1513 } else {
1514 // Cast to other types through float, using either the intrinsic or FPExt,
1515 // depending on whether the half type itself is supported
1516 // (as opposed to operations on half, available with NativeHalfType).
1517 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1518 Src = Builder.CreateCall(
1519 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
1520 Tys: CGF.CGM.FloatTy),
1521 Args: Src);
1522 } else {
1523 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1524 }
1525 SrcType = CGF.getContext().FloatTy;
1526 SrcTy = CGF.FloatTy;
1527 }
1528 }
1529
1530 // Ignore conversions like int -> uint.
1531 if (SrcTy == DstTy) {
1532 if (Opts.EmitImplicitIntegerSignChangeChecks)
1533 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1534 DstType: NoncanonicalDstType, Loc);
1535
1536 return Src;
1537 }
1538
1539 // Handle pointer conversions next: pointers can only be converted to/from
1540 // other pointers and integers. Check for pointer types in terms of LLVM, as
1541 // some native types (like Obj-C id) may map to a pointer type.
1542 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1543 // The source value may be an integer, or a pointer.
1544 if (isa<llvm::PointerType>(Val: SrcTy))
1545 return Src;
1546
1547 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1548 // First, convert to the correct width so that we control the kind of
1549 // extension.
1550 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1551 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1552 llvm::Value* IntResult =
1553 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1554 // Then, cast to pointer.
1555 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1556 }
1557
1558 if (isa<llvm::PointerType>(Val: SrcTy)) {
1559 // Must be an ptr to int cast.
1560 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1561 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1562 }
1563
1564 // A scalar can be splatted to an extended vector of the same element type
1565 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1566 // Sema should add casts to make sure that the source expression's type is
1567 // the same as the vector's element type (sans qualifiers)
1568 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1569 SrcType.getTypePtr() &&
1570 "Splatted expr doesn't match with vector element type?");
1571
1572 // Splat the element across to all elements
1573 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1574 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1575 }
1576
1577 if (SrcType->isMatrixType() && DstType->isMatrixType())
1578 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1579
1580 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1581 // Allow bitcast from vector to integer/fp of the same size.
1582 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1583 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1584 if (SrcSize == DstSize)
1585 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1586
1587 // Conversions between vectors of different sizes are not allowed except
1588 // when vectors of half are involved. Operations on storage-only half
1589 // vectors require promoting half vector operands to float vectors and
1590 // truncating the result, which is either an int or float vector, to a
1591 // short or half vector.
1592
1593 // Source and destination are both expected to be vectors.
1594 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1595 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1596 (void)DstElementTy;
1597
1598 assert(((SrcElementTy->isIntegerTy() &&
1599 DstElementTy->isIntegerTy()) ||
1600 (SrcElementTy->isFloatingPointTy() &&
1601 DstElementTy->isFloatingPointTy())) &&
1602 "unexpected conversion between a floating-point vector and an "
1603 "integer vector");
1604
1605 // Truncate an i32 vector to an i16 vector.
1606 if (SrcElementTy->isIntegerTy())
1607 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1608
1609 // Truncate a float vector to a half vector.
1610 if (SrcSize > DstSize)
1611 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1612
1613 // Promote a half vector to a float vector.
1614 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1615 }
1616
1617 // Finally, we have the arithmetic types: real int/float.
1618 Value *Res = nullptr;
1619 llvm::Type *ResTy = DstTy;
1620
1621 // An overflowing conversion has undefined behavior if either the source type
1622 // or the destination type is a floating-point type. However, we consider the
1623 // range of representable values for all floating-point types to be
1624 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1625 // floating-point type.
1626 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1627 OrigSrcType->isFloatingType())
1628 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1629 Loc);
1630
1631 // Cast to half through float if half isn't a native type.
1632 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1633 // Make sure we cast in a single step if from another FP type.
1634 if (SrcTy->isFloatingPointTy()) {
1635 // Use the intrinsic if the half type itself isn't supported
1636 // (as opposed to operations on half, available with NativeHalfType).
1637 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1638 return Builder.CreateCall(
1639 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: SrcTy), Args: Src);
1640 // If the half type is supported, just use an fptrunc.
1641 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1642 }
1643 DstTy = CGF.FloatTy;
1644 }
1645
1646 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1647
1648 if (DstTy != ResTy) {
1649 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1650 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1651 Res = Builder.CreateCall(
1652 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16, Tys: CGF.CGM.FloatTy),
1653 Args: Res);
1654 } else {
1655 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1656 }
1657 }
1658
1659 if (Opts.EmitImplicitIntegerTruncationChecks)
1660 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1661 DstType: NoncanonicalDstType, Loc);
1662
1663 if (Opts.EmitImplicitIntegerSignChangeChecks)
1664 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1665 DstType: NoncanonicalDstType, Loc);
1666
1667 return Res;
1668}
1669
1670Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1671 QualType DstTy,
1672 SourceLocation Loc) {
1673 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1674 llvm::Value *Result;
1675 if (SrcTy->isRealFloatingType())
1676 Result = FPBuilder.CreateFloatingToFixed(Src,
1677 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1678 else if (DstTy->isRealFloatingType())
1679 Result = FPBuilder.CreateFixedToFloating(Src,
1680 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1681 DstTy: ConvertType(T: DstTy));
1682 else {
1683 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1684 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1685
1686 if (DstTy->isIntegerType())
1687 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1688 DstWidth: DstFPSema.getWidth(),
1689 DstIsSigned: DstFPSema.isSigned());
1690 else if (SrcTy->isIntegerType())
1691 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1692 DstSema: DstFPSema);
1693 else
1694 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1695 }
1696 return Result;
1697}
1698
1699/// Emit a conversion from the specified complex type to the specified
1700/// destination type, where the destination type is an LLVM scalar type.
1701Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1702 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1703 SourceLocation Loc) {
1704 // Get the source element type.
1705 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1706
1707 // Handle conversions to bool first, they are special: comparisons against 0.
1708 if (DstTy->isBooleanType()) {
1709 // Complex != 0 -> (Real != 0) | (Imag != 0)
1710 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1711 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1712 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1713 }
1714
1715 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1716 // the imaginary part of the complex value is discarded and the value of the
1717 // real part is converted according to the conversion rules for the
1718 // corresponding real type.
1719 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1720}
1721
1722Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1723 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1724}
1725
1726/// Emit a sanitization check for the given "binary" operation (which
1727/// might actually be a unary increment which has been lowered to a binary
1728/// operation). The check passes if all values in \p Checks (which are \c i1),
1729/// are \c true.
1730void ScalarExprEmitter::EmitBinOpCheck(
1731 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1732 assert(CGF.IsSanitizerScope);
1733 SanitizerHandler Check;
1734 SmallVector<llvm::Constant *, 4> StaticData;
1735 SmallVector<llvm::Value *, 2> DynamicData;
1736
1737 BinaryOperatorKind Opcode = Info.Opcode;
1738 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1739 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1740
1741 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1742 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1743 if (UO && UO->getOpcode() == UO_Minus) {
1744 Check = SanitizerHandler::NegateOverflow;
1745 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1746 DynamicData.push_back(Elt: Info.RHS);
1747 } else {
1748 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1749 // Shift LHS negative or too large, or RHS out of bounds.
1750 Check = SanitizerHandler::ShiftOutOfBounds;
1751 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1752 StaticData.push_back(
1753 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1754 StaticData.push_back(
1755 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1756 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1757 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1758 Check = SanitizerHandler::DivremOverflow;
1759 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1760 } else {
1761 // Arithmetic overflow (+, -, *).
1762 switch (Opcode) {
1763 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1764 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1765 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1766 default: llvm_unreachable("unexpected opcode for bin op check");
1767 }
1768 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1769 }
1770 DynamicData.push_back(Elt: Info.LHS);
1771 DynamicData.push_back(Elt: Info.RHS);
1772 }
1773
1774 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1775}
1776
1777//===----------------------------------------------------------------------===//
1778// Visitor Methods
1779//===----------------------------------------------------------------------===//
1780
1781Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1782 CGF.ErrorUnsupported(S: E, Type: "scalar expression");
1783 if (E->getType()->isVoidType())
1784 return nullptr;
1785 return llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
1786}
1787
1788Value *
1789ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1790 ASTContext &Context = CGF.getContext();
1791 unsigned AddrSpace =
1792 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1793 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1794 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1795
1796 llvm::Type *ExprTy = ConvertType(T: E->getType());
1797 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1798 Name: "usn_addr_cast");
1799}
1800
1801Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1802 assert(E->getDataElementCount() == 1);
1803 auto It = E->begin();
1804 return Builder.getInt(AI: (*It)->getValue());
1805}
1806
1807Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1808 // Vector Mask Case
1809 if (E->getNumSubExprs() == 2) {
1810 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1811 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1812 Value *Mask;
1813
1814 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1815 unsigned LHSElts = LTy->getNumElements();
1816
1817 Mask = RHS;
1818
1819 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1820
1821 // Mask off the high bits of each shuffle index.
1822 Value *MaskBits =
1823 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1824 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1825
1826 // newv = undef
1827 // mask = mask & maskbits
1828 // for each elt
1829 // n = extract mask i
1830 // x = extract val n
1831 // newv = insert newv, x, i
1832 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1833 NumElts: MTy->getNumElements());
1834 Value* NewV = llvm::PoisonValue::get(T: RTy);
1835 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1836 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1837 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1838
1839 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1840 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1841 }
1842 return NewV;
1843 }
1844
1845 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1846 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1847
1848 SmallVector<int, 32> Indices;
1849 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1850 llvm::APSInt Idx = E->getShuffleMaskIdx(Ctx: CGF.getContext(), N: i-2);
1851 // Check for -1 and output it as undef in the IR.
1852 if (Idx.isSigned() && Idx.isAllOnes())
1853 Indices.push_back(Elt: -1);
1854 else
1855 Indices.push_back(Elt: Idx.getZExtValue());
1856 }
1857
1858 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1859}
1860
1861Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1862 QualType SrcType = E->getSrcExpr()->getType(),
1863 DstType = E->getType();
1864
1865 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1866
1867 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1868 DstType = CGF.getContext().getCanonicalType(T: DstType);
1869 if (SrcType == DstType) return Src;
1870
1871 assert(SrcType->isVectorType() &&
1872 "ConvertVector source type must be a vector");
1873 assert(DstType->isVectorType() &&
1874 "ConvertVector destination type must be a vector");
1875
1876 llvm::Type *SrcTy = Src->getType();
1877 llvm::Type *DstTy = ConvertType(T: DstType);
1878
1879 // Ignore conversions like int -> uint.
1880 if (SrcTy == DstTy)
1881 return Src;
1882
1883 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1884 DstEltType = DstType->castAs<VectorType>()->getElementType();
1885
1886 assert(SrcTy->isVectorTy() &&
1887 "ConvertVector source IR type must be a vector");
1888 assert(DstTy->isVectorTy() &&
1889 "ConvertVector destination IR type must be a vector");
1890
1891 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1892 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1893
1894 if (DstEltType->isBooleanType()) {
1895 assert((SrcEltTy->isFloatingPointTy() ||
1896 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1897
1898 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1899 if (SrcEltTy->isFloatingPointTy()) {
1900 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1901 } else {
1902 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1903 }
1904 }
1905
1906 // We have the arithmetic types: real int/float.
1907 Value *Res = nullptr;
1908
1909 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1910 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1911 if (isa<llvm::IntegerType>(Val: DstEltTy))
1912 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1913 else if (InputSigned)
1914 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1915 else
1916 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1917 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
1918 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1919 if (DstEltType->isSignedIntegerOrEnumerationType())
1920 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1921 else
1922 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1923 } else {
1924 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1925 "Unknown real conversion");
1926 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1927 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1928 else
1929 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1930 }
1931
1932 return Res;
1933}
1934
1935Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1936 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
1937 CGF.EmitIgnoredExpr(E: E->getBase());
1938 return CGF.emitScalarConstant(Constant, E);
1939 } else {
1940 Expr::EvalResult Result;
1941 if (E->EvaluateAsInt(Result, Ctx: CGF.getContext(), AllowSideEffects: Expr::SE_AllowSideEffects)) {
1942 llvm::APSInt Value = Result.Val.getInt();
1943 CGF.EmitIgnoredExpr(E: E->getBase());
1944 return Builder.getInt(AI: Value);
1945 }
1946 }
1947
1948 llvm::Value *Result = EmitLoadOfLValue(E);
1949
1950 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
1951 // debug info for the pointer, even if there is no variable associated with
1952 // the pointer's expression.
1953 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
1954 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Val: Result)) {
1955 if (llvm::GetElementPtrInst *GEP =
1956 dyn_cast<llvm::GetElementPtrInst>(Val: Load->getPointerOperand())) {
1957 if (llvm::Instruction *Pointer =
1958 dyn_cast<llvm::Instruction>(Val: GEP->getPointerOperand())) {
1959 QualType Ty = E->getBase()->getType();
1960 if (!E->isArrow())
1961 Ty = CGF.getContext().getPointerType(T: Ty);
1962 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Value: Pointer, Ty);
1963 }
1964 }
1965 }
1966 }
1967 return Result;
1968}
1969
1970Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1971 TestAndClearIgnoreResultAssign();
1972
1973 // Emit subscript expressions in rvalue context's. For most cases, this just
1974 // loads the lvalue formed by the subscript expr. However, we have to be
1975 // careful, because the base of a vector subscript is occasionally an rvalue,
1976 // so we can't get it as an lvalue.
1977 if (!E->getBase()->getType()->isVectorType() &&
1978 !E->getBase()->getType()->isSveVLSBuiltinType())
1979 return EmitLoadOfLValue(E);
1980
1981 // Handle the vector case. The base must be a vector, the index must be an
1982 // integer value.
1983 Value *Base = Visit(E: E->getBase());
1984 Value *Idx = Visit(E: E->getIdx());
1985 QualType IdxTy = E->getIdx()->getType();
1986
1987 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
1988 CGF.EmitBoundsCheck(E, Base: E->getBase(), Index: Idx, IndexType: IdxTy, /*Accessed*/true);
1989
1990 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
1991}
1992
1993Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1994 TestAndClearIgnoreResultAssign();
1995
1996 // Handle the vector case. The base must be a vector, the index must be an
1997 // integer value.
1998 Value *RowIdx = Visit(E: E->getRowIdx());
1999 Value *ColumnIdx = Visit(E: E->getColumnIdx());
2000
2001 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2002 unsigned NumRows = MatrixTy->getNumRows();
2003 llvm::MatrixBuilder MB(Builder);
2004 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2005 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2006 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
2007
2008 Value *Matrix = Visit(E: E->getBase());
2009
2010 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2011 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
2012}
2013
2014static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2015 unsigned Off) {
2016 int MV = SVI->getMaskValue(Elt: Idx);
2017 if (MV == -1)
2018 return -1;
2019 return Off + MV;
2020}
2021
2022static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2023 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2024 "Index operand too large for shufflevector mask!");
2025 return C->getZExtValue();
2026}
2027
2028Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2029 bool Ignore = TestAndClearIgnoreResultAssign();
2030 (void)Ignore;
2031 assert (Ignore == false && "init list ignored");
2032 unsigned NumInitElements = E->getNumInits();
2033
2034 if (E->hadArrayRangeDesignator())
2035 CGF.ErrorUnsupported(S: E, Type: "GNU array range designator extension");
2036
2037 llvm::VectorType *VType =
2038 dyn_cast<llvm::VectorType>(Val: ConvertType(T: E->getType()));
2039
2040 if (!VType) {
2041 if (NumInitElements == 0) {
2042 // C++11 value-initialization for the scalar.
2043 return EmitNullValue(Ty: E->getType());
2044 }
2045 // We have a scalar in braces. Just use the first element.
2046 return Visit(E: E->getInit(Init: 0));
2047 }
2048
2049 if (isa<llvm::ScalableVectorType>(Val: VType)) {
2050 if (NumInitElements == 0) {
2051 // C++11 value-initialization for the vector.
2052 return EmitNullValue(Ty: E->getType());
2053 }
2054
2055 if (NumInitElements == 1) {
2056 Expr *InitVector = E->getInit(Init: 0);
2057
2058 // Initialize from another scalable vector of the same type.
2059 if (InitVector->getType() == E->getType())
2060 return Visit(E: InitVector);
2061 }
2062
2063 llvm_unreachable("Unexpected initialization of a scalable vector!");
2064 }
2065
2066 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
2067
2068 // Loop over initializers collecting the Value for each, and remembering
2069 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2070 // us to fold the shuffle for the swizzle into the shuffle for the vector
2071 // initializer, since LLVM optimizers generally do not want to touch
2072 // shuffles.
2073 unsigned CurIdx = 0;
2074 bool VIsPoisonShuffle = false;
2075 llvm::Value *V = llvm::PoisonValue::get(T: VType);
2076 for (unsigned i = 0; i != NumInitElements; ++i) {
2077 Expr *IE = E->getInit(Init: i);
2078 Value *Init = Visit(E: IE);
2079 SmallVector<int, 16> Args;
2080
2081 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
2082
2083 // Handle scalar elements. If the scalar initializer is actually one
2084 // element of a different vector of the same width, use shuffle instead of
2085 // extract+insert.
2086 if (!VVT) {
2087 if (isa<ExtVectorElementExpr>(Val: IE)) {
2088 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
2089
2090 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
2091 ->getNumElements() == ResElts) {
2092 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
2093 Value *LHS = nullptr, *RHS = nullptr;
2094 if (CurIdx == 0) {
2095 // insert into poison -> shuffle (src, poison)
2096 // shufflemask must use an i32
2097 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
2098 Args.resize(N: ResElts, NV: -1);
2099
2100 LHS = EI->getVectorOperand();
2101 RHS = V;
2102 VIsPoisonShuffle = true;
2103 } else if (VIsPoisonShuffle) {
2104 // insert into poison shuffle && size match -> shuffle (v, src)
2105 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
2106 for (unsigned j = 0; j != CurIdx; ++j)
2107 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
2108 Args.push_back(Elt: ResElts + C->getZExtValue());
2109 Args.resize(N: ResElts, NV: -1);
2110
2111 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2112 RHS = EI->getVectorOperand();
2113 VIsPoisonShuffle = false;
2114 }
2115 if (!Args.empty()) {
2116 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
2117 ++CurIdx;
2118 continue;
2119 }
2120 }
2121 }
2122 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
2123 Name: "vecinit");
2124 VIsPoisonShuffle = false;
2125 ++CurIdx;
2126 continue;
2127 }
2128
2129 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
2130
2131 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2132 // input is the same width as the vector being constructed, generate an
2133 // optimized shuffle of the swizzle input into the result.
2134 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2135 if (isa<ExtVectorElementExpr>(Val: IE)) {
2136 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
2137 Value *SVOp = SVI->getOperand(i_nocapture: 0);
2138 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
2139
2140 if (OpTy->getNumElements() == ResElts) {
2141 for (unsigned j = 0; j != CurIdx; ++j) {
2142 // If the current vector initializer is a shuffle with poison, merge
2143 // this shuffle directly into it.
2144 if (VIsPoisonShuffle) {
2145 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
2146 } else {
2147 Args.push_back(Elt: j);
2148 }
2149 }
2150 for (unsigned j = 0, je = InitElts; j != je; ++j)
2151 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
2152 Args.resize(N: ResElts, NV: -1);
2153
2154 if (VIsPoisonShuffle)
2155 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
2156
2157 Init = SVOp;
2158 }
2159 }
2160
2161 // Extend init to result vector length, and then shuffle its contribution
2162 // to the vector initializer into V.
2163 if (Args.empty()) {
2164 for (unsigned j = 0; j != InitElts; ++j)
2165 Args.push_back(Elt: j);
2166 Args.resize(N: ResElts, NV: -1);
2167 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
2168
2169 Args.clear();
2170 for (unsigned j = 0; j != CurIdx; ++j)
2171 Args.push_back(Elt: j);
2172 for (unsigned j = 0; j != InitElts; ++j)
2173 Args.push_back(Elt: j + Offset);
2174 Args.resize(N: ResElts, NV: -1);
2175 }
2176
2177 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2178 // merging subsequent shuffles into this one.
2179 if (CurIdx == 0)
2180 std::swap(a&: V, b&: Init);
2181 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2182 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2183 CurIdx += InitElts;
2184 }
2185
2186 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2187 // Emit remaining default initializers.
2188 llvm::Type *EltTy = VType->getElementType();
2189
2190 // Emit remaining default initializers
2191 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2192 Value *Idx = Builder.getInt32(C: CurIdx);
2193 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2194 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2195 }
2196 return V;
2197}
2198
2199bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2200 const Expr *E = CE->getSubExpr();
2201
2202 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2203 return false;
2204
2205 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2206 // We always assume that 'this' is never null.
2207 return false;
2208 }
2209
2210 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2211 // And that glvalue casts are never null.
2212 if (ICE->isGLValue())
2213 return false;
2214 }
2215
2216 return true;
2217}
2218
2219// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2220// have to handle a more broad range of conversions than explicit casts, as they
2221// handle things like function to ptr-to-function decay etc.
2222Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2223 Expr *E = CE->getSubExpr();
2224 QualType DestTy = CE->getType();
2225 CastKind Kind = CE->getCastKind();
2226 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2227
2228 // These cases are generally not written to ignore the result of
2229 // evaluating their sub-expressions, so we clear this now.
2230 bool Ignored = TestAndClearIgnoreResultAssign();
2231
2232 // Since almost all cast kinds apply to scalars, this switch doesn't have
2233 // a default case, so the compiler will warn on a missing case. The cases
2234 // are in the same order as in the CastKind enum.
2235 switch (Kind) {
2236 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2237 case CK_BuiltinFnToFnPtr:
2238 llvm_unreachable("builtin functions are handled elsewhere");
2239
2240 case CK_LValueBitCast:
2241 case CK_ObjCObjectLValueCast: {
2242 Address Addr = EmitLValue(E).getAddress();
2243 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2244 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2245 return EmitLoadOfLValue(LV, Loc: CE->getExprLoc());
2246 }
2247
2248 case CK_LValueToRValueBitCast: {
2249 LValue SourceLVal = CGF.EmitLValue(E);
2250 Address Addr =
2251 SourceLVal.getAddress().withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2252 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2253 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2254 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2255 }
2256
2257 case CK_CPointerToObjCPointerCast:
2258 case CK_BlockPointerToObjCPointerCast:
2259 case CK_AnyPointerToBlockPointerCast:
2260 case CK_BitCast: {
2261 Value *Src = Visit(E: const_cast<Expr*>(E));
2262 llvm::Type *SrcTy = Src->getType();
2263 llvm::Type *DstTy = ConvertType(T: DestTy);
2264 assert(
2265 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2266 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2267 "Address-space cast must be used to convert address spaces");
2268
2269 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2270 if (auto *PT = DestTy->getAs<PointerType>()) {
2271 CGF.EmitVTablePtrCheckForCast(
2272 T: PT->getPointeeType(),
2273 Derived: Address(Src,
2274 CGF.ConvertTypeForMem(
2275 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2276 CGF.getPointerAlign()),
2277 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2278 Loc: CE->getBeginLoc());
2279 }
2280 }
2281
2282 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2283 const QualType SrcType = E->getType();
2284
2285 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2286 // Casting to pointer that could carry dynamic information (provided by
2287 // invariant.group) requires launder.
2288 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2289 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2290 // Casting to pointer that does not carry dynamic information (provided
2291 // by invariant.group) requires stripping it. Note that we don't do it
2292 // if the source could not be dynamic type and destination could be
2293 // dynamic because dynamic information is already laundered. It is
2294 // because launder(strip(src)) == launder(src), so there is no need to
2295 // add extra strip before launder.
2296 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2297 }
2298 }
2299
2300 // Update heapallocsite metadata when there is an explicit pointer cast.
2301 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2302 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2303 !isa<CastExpr>(Val: E)) {
2304 QualType PointeeType = DestTy->getPointeeType();
2305 if (!PointeeType.isNull())
2306 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2307 Loc: CE->getExprLoc());
2308 }
2309 }
2310
2311 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2312 // same element type, use the llvm.vector.insert intrinsic to perform the
2313 // bitcast.
2314 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2315 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Val: DstTy)) {
2316 // If we are casting a fixed i8 vector to a scalable i1 predicate
2317 // vector, use a vector insert and bitcast the result.
2318 if (ScalableDstTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2319 ScalableDstTy->getElementCount().isKnownMultipleOf(RHS: 8) &&
2320 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2321 ScalableDstTy = llvm::ScalableVectorType::get(
2322 ElementType: FixedSrcTy->getElementType(),
2323 MinNumElts: ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2324 }
2325 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2326 llvm::Value *UndefVec = llvm::UndefValue::get(T: ScalableDstTy);
2327 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2328 llvm::Value *Result = Builder.CreateInsertVector(
2329 DstType: ScalableDstTy, SrcVec: UndefVec, SubVec: Src, Idx: Zero, Name: "cast.scalable");
2330 if (Result->getType() != DstTy)
2331 Result = Builder.CreateBitCast(V: Result, DestTy: DstTy);
2332 return Result;
2333 }
2334 }
2335 }
2336
2337 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2338 // same element type, use the llvm.vector.extract intrinsic to perform the
2339 // bitcast.
2340 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2341 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(Val: DstTy)) {
2342 // If we are casting a scalable i1 predicate vector to a fixed i8
2343 // vector, bitcast the source and use a vector extract.
2344 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2345 ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8) &&
2346 FixedDstTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2347 ScalableSrcTy = llvm::ScalableVectorType::get(
2348 ElementType: FixedDstTy->getElementType(),
2349 MinNumElts: ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2350 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2351 }
2352 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2353 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2354 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: Zero, Name: "cast.fixed");
2355 }
2356 }
2357 }
2358
2359 // Perform VLAT <-> VLST bitcast through memory.
2360 // TODO: since the llvm.vector.{insert,extract} intrinsics
2361 // require the element types of the vectors to be the same, we
2362 // need to keep this around for bitcasts between VLAT <-> VLST where
2363 // the element types of the vectors are not the same, until we figure
2364 // out a better way of doing these casts.
2365 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2366 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2367 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2368 isa<llvm::FixedVectorType>(Val: DstTy))) {
2369 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2370 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2371 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2372 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2373 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2374 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2375 return EmitLoadOfLValue(LV: DestLV, Loc: CE->getExprLoc());
2376 }
2377
2378 llvm::Value *Result = Builder.CreateBitCast(V: Src, DestTy: DstTy);
2379 return CGF.authPointerToPointerCast(ResultPtr: Result, SourceType: E->getType(), DestType: DestTy);
2380 }
2381 case CK_AddressSpaceConversion: {
2382 Expr::EvalResult Result;
2383 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2384 Result.Val.isNullPointer()) {
2385 // If E has side effect, it is emitted even if its final result is a
2386 // null pointer. In that case, a DCE pass should be able to
2387 // eliminate the useless instructions emitted during translating E.
2388 if (Result.HasSideEffects)
2389 Visit(E);
2390 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2391 Val: ConvertType(T: DestTy)), QT: DestTy);
2392 }
2393 // Since target may map different address spaces in AST to the same address
2394 // space, an address space conversion may end up as a bitcast.
2395 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2396 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2397 DestAddr: DestTy->getPointeeType().getAddressSpace(), DestTy: ConvertType(T: DestTy));
2398 }
2399 case CK_AtomicToNonAtomic:
2400 case CK_NonAtomicToAtomic:
2401 case CK_UserDefinedConversion:
2402 return Visit(E: const_cast<Expr*>(E));
2403
2404 case CK_NoOp: {
2405 return CE->changesVolatileQualification() ? EmitLoadOfLValue(E: CE)
2406 : Visit(E: const_cast<Expr *>(E));
2407 }
2408
2409 case CK_BaseToDerived: {
2410 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2411 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2412
2413 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2414 Address Derived =
2415 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2416 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2417 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2418
2419 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2420 // performed and the object is not of the derived type.
2421 if (CGF.sanitizePerformTypeCheck())
2422 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DowncastPointer, Loc: CE->getExprLoc(),
2423 Addr: Derived, Type: DestTy->getPointeeType());
2424
2425 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2426 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2427 /*MayBeNull=*/true,
2428 TCK: CodeGenFunction::CFITCK_DerivedCast,
2429 Loc: CE->getBeginLoc());
2430
2431 return CGF.getAsNaturalPointerTo(Addr: Derived, PointeeType: CE->getType()->getPointeeType());
2432 }
2433 case CK_UncheckedDerivedToBase:
2434 case CK_DerivedToBase: {
2435 // The EmitPointerWithAlignment path does this fine; just discard
2436 // the alignment.
2437 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitPointerWithAlignment(Addr: CE),
2438 PointeeType: CE->getType()->getPointeeType());
2439 }
2440
2441 case CK_Dynamic: {
2442 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2443 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2444 return CGF.EmitDynamicCast(V, DCE);
2445 }
2446
2447 case CK_ArrayToPointerDecay:
2448 return CGF.getAsNaturalPointerTo(Addr: CGF.EmitArrayToPointerDecay(Array: E),
2449 PointeeType: CE->getType()->getPointeeType());
2450 case CK_FunctionToPointerDecay:
2451 return EmitLValue(E).getPointer(CGF);
2452
2453 case CK_NullToPointer:
2454 if (MustVisitNullValue(E))
2455 CGF.EmitIgnoredExpr(E);
2456
2457 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2458 QT: DestTy);
2459
2460 case CK_NullToMemberPointer: {
2461 if (MustVisitNullValue(E))
2462 CGF.EmitIgnoredExpr(E);
2463
2464 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2465 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2466 }
2467
2468 case CK_ReinterpretMemberPointer:
2469 case CK_BaseToDerivedMemberPointer:
2470 case CK_DerivedToBaseMemberPointer: {
2471 Value *Src = Visit(E);
2472
2473 // Note that the AST doesn't distinguish between checked and
2474 // unchecked member pointer conversions, so we always have to
2475 // implement checked conversions here. This is inefficient when
2476 // actual control flow may be required in order to perform the
2477 // check, which it is for data member pointers (but not member
2478 // function pointers on Itanium and ARM).
2479 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2480 }
2481
2482 case CK_ARCProduceObject:
2483 return CGF.EmitARCRetainScalarExpr(expr: E);
2484 case CK_ARCConsumeObject:
2485 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2486 case CK_ARCReclaimReturnedObject:
2487 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2488 case CK_ARCExtendBlockObject:
2489 return CGF.EmitARCExtendBlockObject(expr: E);
2490
2491 case CK_CopyAndAutoreleaseBlockObject:
2492 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2493
2494 case CK_FloatingRealToComplex:
2495 case CK_FloatingComplexCast:
2496 case CK_IntegralRealToComplex:
2497 case CK_IntegralComplexCast:
2498 case CK_IntegralComplexToFloatingComplex:
2499 case CK_FloatingComplexToIntegralComplex:
2500 case CK_ConstructorConversion:
2501 case CK_ToUnion:
2502 case CK_HLSLArrayRValue:
2503 llvm_unreachable("scalar cast to non-scalar value");
2504
2505 case CK_LValueToRValue:
2506 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2507 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2508 return Visit(E: const_cast<Expr*>(E));
2509
2510 case CK_IntegralToPointer: {
2511 Value *Src = Visit(E: const_cast<Expr*>(E));
2512
2513 // First, convert to the correct width so that we control the kind of
2514 // extension.
2515 auto DestLLVMTy = ConvertType(T: DestTy);
2516 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2517 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2518 llvm::Value* IntResult =
2519 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2520
2521 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2522
2523 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2524 // Going from integer to pointer that could be dynamic requires reloading
2525 // dynamic information from invariant.group.
2526 if (DestTy.mayBeDynamicClass())
2527 IntToPtr = Builder.CreateLaunderInvariantGroup(Ptr: IntToPtr);
2528 }
2529
2530 IntToPtr = CGF.authPointerToPointerCast(ResultPtr: IntToPtr, SourceType: E->getType(), DestType: DestTy);
2531 return IntToPtr;
2532 }
2533 case CK_PointerToIntegral: {
2534 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2535 auto *PtrExpr = Visit(E);
2536
2537 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2538 const QualType SrcType = E->getType();
2539
2540 // Casting to integer requires stripping dynamic information as it does
2541 // not carries it.
2542 if (SrcType.mayBeDynamicClass())
2543 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2544 }
2545
2546 PtrExpr = CGF.authPointerToPointerCast(ResultPtr: PtrExpr, SourceType: E->getType(), DestType: DestTy);
2547 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2548 }
2549 case CK_ToVoid: {
2550 CGF.EmitIgnoredExpr(E);
2551 return nullptr;
2552 }
2553 case CK_MatrixCast: {
2554 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2555 Loc: CE->getExprLoc());
2556 }
2557 case CK_VectorSplat: {
2558 llvm::Type *DstTy = ConvertType(T: DestTy);
2559 Value *Elt = Visit(E: const_cast<Expr *>(E));
2560 // Splat the element across to all elements
2561 llvm::ElementCount NumElements =
2562 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2563 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2564 }
2565
2566 case CK_FixedPointCast:
2567 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2568 Loc: CE->getExprLoc());
2569
2570 case CK_FixedPointToBoolean:
2571 assert(E->getType()->isFixedPointType() &&
2572 "Expected src type to be fixed point type");
2573 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2574 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2575 Loc: CE->getExprLoc());
2576
2577 case CK_FixedPointToIntegral:
2578 assert(E->getType()->isFixedPointType() &&
2579 "Expected src type to be fixed point type");
2580 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2581 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2582 Loc: CE->getExprLoc());
2583
2584 case CK_IntegralToFixedPoint:
2585 assert(E->getType()->isIntegerType() &&
2586 "Expected src type to be an integer");
2587 assert(DestTy->isFixedPointType() &&
2588 "Expected dest type to be fixed point type");
2589 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2590 Loc: CE->getExprLoc());
2591
2592 case CK_IntegralCast: {
2593 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2594 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2595 return Builder.CreateIntCast(V: Visit(E), DestTy: ConvertType(T: DestTy),
2596 isSigned: SrcElTy->isSignedIntegerOrEnumerationType(),
2597 Name: "conv");
2598 }
2599 ScalarConversionOpts Opts;
2600 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2601 if (!ICE->isPartOfExplicitCast())
2602 Opts = ScalarConversionOpts(CGF.SanOpts);
2603 }
2604 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2605 Loc: CE->getExprLoc(), Opts);
2606 }
2607 case CK_IntegralToFloating: {
2608 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2609 // TODO: Support constrained FP intrinsics.
2610 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2611 if (SrcElTy->isSignedIntegerOrEnumerationType())
2612 return Builder.CreateSIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2613 return Builder.CreateUIToFP(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2614 }
2615 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2616 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2617 Loc: CE->getExprLoc());
2618 }
2619 case CK_FloatingToIntegral: {
2620 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2621 // TODO: Support constrained FP intrinsics.
2622 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2623 if (DstElTy->isSignedIntegerOrEnumerationType())
2624 return Builder.CreateFPToSI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2625 return Builder.CreateFPToUI(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2626 }
2627 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2628 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2629 Loc: CE->getExprLoc());
2630 }
2631 case CK_FloatingCast: {
2632 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2633 // TODO: Support constrained FP intrinsics.
2634 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2635 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2636 if (DstElTy->castAs<BuiltinType>()->getKind() <
2637 SrcElTy->castAs<BuiltinType>()->getKind())
2638 return Builder.CreateFPTrunc(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2639 return Builder.CreateFPExt(V: Visit(E), DestTy: ConvertType(T: DestTy), Name: "conv");
2640 }
2641 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2642 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2643 Loc: CE->getExprLoc());
2644 }
2645 case CK_FixedPointToFloating:
2646 case CK_FloatingToFixedPoint: {
2647 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2648 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2649 Loc: CE->getExprLoc());
2650 }
2651 case CK_BooleanToSignedIntegral: {
2652 ScalarConversionOpts Opts;
2653 Opts.TreatBooleanAsSigned = true;
2654 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2655 Loc: CE->getExprLoc(), Opts);
2656 }
2657 case CK_IntegralToBoolean:
2658 return EmitIntToBoolConversion(V: Visit(E));
2659 case CK_PointerToBoolean:
2660 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2661 case CK_FloatingToBoolean: {
2662 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2663 return EmitFloatToBoolConversion(V: Visit(E));
2664 }
2665 case CK_MemberPointerToBoolean: {
2666 llvm::Value *MemPtr = Visit(E);
2667 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2668 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2669 }
2670
2671 case CK_FloatingComplexToReal:
2672 case CK_IntegralComplexToReal:
2673 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2674
2675 case CK_FloatingComplexToBoolean:
2676 case CK_IntegralComplexToBoolean: {
2677 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2678
2679 // TODO: kill this function off, inline appropriate case here
2680 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2681 Loc: CE->getExprLoc());
2682 }
2683
2684 case CK_ZeroToOCLOpaqueType: {
2685 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2686 DestTy->isOCLIntelSubgroupAVCType()) &&
2687 "CK_ZeroToOCLEvent cast on non-event type");
2688 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2689 }
2690
2691 case CK_IntToOCLSampler:
2692 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2693
2694 case CK_HLSLVectorTruncation: {
2695 assert(DestTy->isVectorType() && "Expected dest type to be vector type");
2696 Value *Vec = Visit(E: const_cast<Expr *>(E));
2697 SmallVector<int, 16> Mask;
2698 unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
2699 for (unsigned I = 0; I != NumElts; ++I)
2700 Mask.push_back(Elt: I);
2701
2702 return Builder.CreateShuffleVector(V: Vec, Mask, Name: "trunc");
2703 }
2704
2705 } // end of switch
2706
2707 llvm_unreachable("unknown scalar cast");
2708}
2709
2710Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2711 CodeGenFunction::StmtExprEvaluation eval(CGF);
2712 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2713 GetLast: !E->getType()->isVoidType());
2714 if (!RetAlloca.isValid())
2715 return nullptr;
2716 return CGF.EmitLoadOfScalar(lvalue: CGF.MakeAddrLValue(Addr: RetAlloca, T: E->getType()),
2717 Loc: E->getExprLoc());
2718}
2719
2720Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2721 CodeGenFunction::RunCleanupsScope Scope(CGF);
2722 Value *V = Visit(E: E->getSubExpr());
2723 // Defend against dominance problems caused by jumps out of expression
2724 // evaluation through the shared cleanup block.
2725 Scope.ForceCleanup(ValuesToReload: {&V});
2726 return V;
2727}
2728
2729//===----------------------------------------------------------------------===//
2730// Unary Operators
2731//===----------------------------------------------------------------------===//
2732
2733static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2734 llvm::Value *InVal, bool IsInc,
2735 FPOptions FPFeatures) {
2736 BinOpInfo BinOp;
2737 BinOp.LHS = InVal;
2738 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2739 BinOp.Ty = E->getType();
2740 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2741 BinOp.FPFeatures = FPFeatures;
2742 BinOp.E = E;
2743 return BinOp;
2744}
2745
2746llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2747 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2748 llvm::Value *Amount =
2749 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2750 StringRef Name = IsInc ? "inc" : "dec";
2751 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2752 case LangOptions::SOB_Defined:
2753 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2754 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2755 [[fallthrough]];
2756 case LangOptions::SOB_Undefined:
2757 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2758 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2759 [[fallthrough]];
2760 case LangOptions::SOB_Trapping:
2761 if (!E->canOverflow())
2762 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2763 return EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2764 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2765 }
2766 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2767}
2768
2769namespace {
2770/// Handles check and update for lastprivate conditional variables.
2771class OMPLastprivateConditionalUpdateRAII {
2772private:
2773 CodeGenFunction &CGF;
2774 const UnaryOperator *E;
2775
2776public:
2777 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2778 const UnaryOperator *E)
2779 : CGF(CGF), E(E) {}
2780 ~OMPLastprivateConditionalUpdateRAII() {
2781 if (CGF.getLangOpts().OpenMP)
2782 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2783 CGF, LHS: E->getSubExpr());
2784 }
2785};
2786} // namespace
2787
2788llvm::Value *
2789ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2790 bool isInc, bool isPre) {
2791 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2792 QualType type = E->getSubExpr()->getType();
2793 llvm::PHINode *atomicPHI = nullptr;
2794 llvm::Value *value;
2795 llvm::Value *input;
2796 llvm::Value *Previous = nullptr;
2797 QualType SrcType = E->getType();
2798
2799 int amount = (isInc ? 1 : -1);
2800 bool isSubtraction = !isInc;
2801
2802 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2803 type = atomicTy->getValueType();
2804 if (isInc && type->isBooleanType()) {
2805 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
2806 if (isPre) {
2807 Builder.CreateStore(Val: True, Addr: LV.getAddress(), IsVolatile: LV.isVolatileQualified())
2808 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2809 return Builder.getTrue();
2810 }
2811 // For atomic bool increment, we just store true and return it for
2812 // preincrement, do an atomic swap with true for postincrement
2813 return Builder.CreateAtomicRMW(
2814 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(), Val: True,
2815 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2816 }
2817 // Special case for atomic increment / decrement on integers, emit
2818 // atomicrmw instructions. We skip this if we want to be doing overflow
2819 // checking, and fall into the slow path with the atomic cmpxchg loop.
2820 if (!type->isBooleanType() && type->isIntegerType() &&
2821 !(type->isUnsignedIntegerType() &&
2822 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
2823 CGF.getLangOpts().getSignedOverflowBehavior() !=
2824 LangOptions::SOB_Trapping) {
2825 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2826 llvm::AtomicRMWInst::Sub;
2827 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2828 llvm::Instruction::Sub;
2829 llvm::Value *amt = CGF.EmitToMemory(
2830 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
2831 llvm::Value *old =
2832 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
2833 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2834 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
2835 }
2836 // Special case for atomic increment/decrement on floats
2837 if (type->isFloatingType()) {
2838 llvm::AtomicRMWInst::BinOp aop =
2839 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
2840 llvm::Instruction::BinaryOps op =
2841 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
2842 llvm::Value *amt = llvm::ConstantFP::get(
2843 Context&: VMContext, V: llvm::APFloat(static_cast<float>(1.0)));
2844 llvm::Value *old =
2845 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(), Val: amt,
2846 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2847 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
2848 }
2849 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2850 input = value;
2851 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2852 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2853 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
2854 value = CGF.EmitToMemory(Value: value, Ty: type);
2855 Builder.CreateBr(Dest: opBB);
2856 Builder.SetInsertPoint(opBB);
2857 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
2858 atomicPHI->addIncoming(V: value, BB: startBB);
2859 value = atomicPHI;
2860 } else {
2861 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2862 input = value;
2863 }
2864
2865 // Special case of integer increment that we have to check first: bool++.
2866 // Due to promotion rules, we get:
2867 // bool++ -> bool = bool + 1
2868 // -> bool = (int)bool + 1
2869 // -> bool = ((int)bool + 1 != 0)
2870 // An interesting aspect of this is that increment is always true.
2871 // Decrement does not have this property.
2872 if (isInc && type->isBooleanType()) {
2873 value = Builder.getTrue();
2874
2875 // Most common case by far: integer increment.
2876 } else if (type->isIntegerType()) {
2877 QualType promotedType;
2878 bool canPerformLossyDemotionCheck = false;
2879 if (CGF.getContext().isPromotableIntegerType(T: type)) {
2880 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
2881 assert(promotedType != type && "Shouldn't promote to the same type.");
2882 canPerformLossyDemotionCheck = true;
2883 canPerformLossyDemotionCheck &=
2884 CGF.getContext().getCanonicalType(T: type) !=
2885 CGF.getContext().getCanonicalType(T: promotedType);
2886 canPerformLossyDemotionCheck &=
2887 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2888 SrcType: type, DstType: promotedType);
2889 assert((!canPerformLossyDemotionCheck ||
2890 type->isSignedIntegerOrEnumerationType() ||
2891 promotedType->isSignedIntegerOrEnumerationType() ||
2892 ConvertType(type)->getScalarSizeInBits() ==
2893 ConvertType(promotedType)->getScalarSizeInBits()) &&
2894 "The following check expects that if we do promotion to different "
2895 "underlying canonical type, at least one of the types (either "
2896 "base or promoted) will be signed, or the bitwidths will match.");
2897 }
2898 if (CGF.SanOpts.hasOneOf(
2899 K: SanitizerKind::ImplicitIntegerArithmeticValueChange |
2900 SanitizerKind::ImplicitBitfieldConversion) &&
2901 canPerformLossyDemotionCheck) {
2902 // While `x += 1` (for `x` with width less than int) is modeled as
2903 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2904 // ease; inc/dec with width less than int can't overflow because of
2905 // promotion rules, so we omit promotion+demotion, which means that we can
2906 // not catch lossy "demotion". Because we still want to catch these cases
2907 // when the sanitizer is enabled, we perform the promotion, then perform
2908 // the increment/decrement in the wider type, and finally
2909 // perform the demotion. This will catch lossy demotions.
2910
2911 // We have a special case for bitfields defined using all the bits of the
2912 // type. In this case we need to do the same trick as for the integer
2913 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2914
2915 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
2916 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2917 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2918 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2919 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2920 // checks will take care of the conversion.
2921 ScalarConversionOpts Opts;
2922 if (!LV.isBitField())
2923 Opts = ScalarConversionOpts(CGF.SanOpts);
2924 else if (CGF.SanOpts.has(K: SanitizerKind::ImplicitBitfieldConversion)) {
2925 Previous = value;
2926 SrcType = promotedType;
2927 }
2928
2929 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
2930 Opts);
2931
2932 // Note that signed integer inc/dec with width less than int can't
2933 // overflow because of promotion rules; we're just eliding a few steps
2934 // here.
2935 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2936 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
2937 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2938 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) {
2939 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2940 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2941 } else {
2942 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2943 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2944 }
2945
2946 // Next most common: pointer increment.
2947 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2948 QualType type = ptr->getPointeeType();
2949
2950 // VLA types don't have constant size.
2951 if (const VariableArrayType *vla
2952 = CGF.getContext().getAsVariableArrayType(T: type)) {
2953 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2954 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
2955 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
2956 if (CGF.getLangOpts().isSignedOverflowDefined())
2957 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
2958 else
2959 value = CGF.EmitCheckedInBoundsGEP(
2960 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2961 Loc: E->getExprLoc(), Name: "vla.inc");
2962
2963 // Arithmetic on function pointers (!) is just +-1.
2964 } else if (type->isFunctionType()) {
2965 llvm::Value *amt = Builder.getInt32(C: amount);
2966
2967 if (CGF.getLangOpts().isSignedOverflowDefined())
2968 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
2969 else
2970 value =
2971 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
2972 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2973 Loc: E->getExprLoc(), Name: "incdec.funcptr");
2974
2975 // For everything else, we can just do a simple increment.
2976 } else {
2977 llvm::Value *amt = Builder.getInt32(C: amount);
2978 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2979 if (CGF.getLangOpts().isSignedOverflowDefined())
2980 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
2981 else
2982 value = CGF.EmitCheckedInBoundsGEP(
2983 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2984 Loc: E->getExprLoc(), Name: "incdec.ptr");
2985 }
2986
2987 // Vector increment/decrement.
2988 } else if (type->isVectorType()) {
2989 if (type->hasIntegerRepresentation()) {
2990 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
2991
2992 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2993 } else {
2994 value = Builder.CreateFAdd(
2995 L: value,
2996 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
2997 Name: isInc ? "inc" : "dec");
2998 }
2999
3000 // Floating point.
3001 } else if (type->isRealFloatingType()) {
3002 // Add the inc/dec to the real part.
3003 llvm::Value *amt;
3004 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3005
3006 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3007 // Another special case: half FP increment should be done via float
3008 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3009 value = Builder.CreateCall(
3010 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_from_fp16,
3011 Tys: CGF.CGM.FloatTy),
3012 Args: input, Name: "incdec.conv");
3013 } else {
3014 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
3015 }
3016 }
3017
3018 if (value->getType()->isFloatTy())
3019 amt = llvm::ConstantFP::get(Context&: VMContext,
3020 V: llvm::APFloat(static_cast<float>(amount)));
3021 else if (value->getType()->isDoubleTy())
3022 amt = llvm::ConstantFP::get(Context&: VMContext,
3023 V: llvm::APFloat(static_cast<double>(amount)));
3024 else {
3025 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3026 // Convert from float.
3027 llvm::APFloat F(static_cast<float>(amount));
3028 bool ignored;
3029 const llvm::fltSemantics *FS;
3030 // Don't use getFloatTypeSemantics because Half isn't
3031 // necessarily represented using the "half" LLVM type.
3032 if (value->getType()->isFP128Ty())
3033 FS = &CGF.getTarget().getFloat128Format();
3034 else if (value->getType()->isHalfTy())
3035 FS = &CGF.getTarget().getHalfFormat();
3036 else if (value->getType()->isBFloatTy())
3037 FS = &CGF.getTarget().getBFloat16Format();
3038 else if (value->getType()->isPPC_FP128Ty())
3039 FS = &CGF.getTarget().getIbm128Format();
3040 else
3041 FS = &CGF.getTarget().getLongDoubleFormat();
3042 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
3043 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
3044 }
3045 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
3046
3047 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3048 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3049 value = Builder.CreateCall(
3050 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::convert_to_fp16,
3051 Tys: CGF.CGM.FloatTy),
3052 Args: value, Name: "incdec.conv");
3053 } else {
3054 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
3055 }
3056 }
3057
3058 // Fixed-point types.
3059 } else if (type->isFixedPointType()) {
3060 // Fixed-point types are tricky. In some cases, it isn't possible to
3061 // represent a 1 or a -1 in the type at all. Piggyback off of
3062 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3063 BinOpInfo Info;
3064 Info.E = E;
3065 Info.Ty = E->getType();
3066 Info.Opcode = isInc ? BO_Add : BO_Sub;
3067 Info.LHS = value;
3068 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
3069 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3070 // since -1 is guaranteed to be representable.
3071 if (type->isSignedFixedPointType()) {
3072 Info.Opcode = isInc ? BO_Sub : BO_Add;
3073 Info.RHS = Builder.CreateNeg(V: Info.RHS);
3074 }
3075 // Now, convert from our invented integer literal to the type of the unary
3076 // op. This will upscale and saturate if necessary. This value can become
3077 // undef in some cases.
3078 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3079 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
3080 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema);
3081 value = EmitFixedPointBinOp(Ops: Info);
3082
3083 // Objective-C pointer types.
3084 } else {
3085 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3086
3087 CharUnits size = CGF.getContext().getTypeSizeInChars(T: OPT->getObjectType());
3088 if (!isInc) size = -size;
3089 llvm::Value *sizeValue =
3090 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
3091
3092 if (CGF.getLangOpts().isSignedOverflowDefined())
3093 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
3094 else
3095 value = CGF.EmitCheckedInBoundsGEP(
3096 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
3097 Loc: E->getExprLoc(), Name: "incdec.objptr");
3098 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
3099 }
3100
3101 if (atomicPHI) {
3102 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3103 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3104 auto Pair = CGF.EmitAtomicCompareExchange(
3105 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
3106 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
3107 llvm::Value *success = Pair.second;
3108 atomicPHI->addIncoming(V: old, BB: curBlock);
3109 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3110 Builder.SetInsertPoint(contBB);
3111 return isPre ? value : input;
3112 }
3113
3114 // Store the updated result through the lvalue.
3115 if (LV.isBitField()) {
3116 Value *Src = Previous ? Previous : value;
3117 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
3118 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: value, DstType: E->getType(),
3119 Info: LV.getBitFieldInfo(), Loc: E->getExprLoc());
3120 } else
3121 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
3122
3123 // If this is a postinc, return the value read from memory, otherwise use the
3124 // updated value.
3125 return isPre ? value : input;
3126}
3127
3128
3129Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3130 QualType PromotionType) {
3131 QualType promotionTy = PromotionType.isNull()
3132 ? getPromotionType(Ty: E->getSubExpr()->getType())
3133 : PromotionType;
3134 Value *result = VisitPlus(E, PromotionType: promotionTy);
3135 if (result && !promotionTy.isNull())
3136 result = EmitUnPromotedValue(result, ExprType: E->getType());
3137 return result;
3138}
3139
3140Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3141 QualType PromotionType) {
3142 // This differs from gcc, though, most likely due to a bug in gcc.
3143 TestAndClearIgnoreResultAssign();
3144 if (!PromotionType.isNull())
3145 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3146 return Visit(E: E->getSubExpr());
3147}
3148
3149Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3150 QualType PromotionType) {
3151 QualType promotionTy = PromotionType.isNull()
3152 ? getPromotionType(Ty: E->getSubExpr()->getType())
3153 : PromotionType;
3154 Value *result = VisitMinus(E, PromotionType: promotionTy);
3155 if (result && !promotionTy.isNull())
3156 result = EmitUnPromotedValue(result, ExprType: E->getType());
3157 return result;
3158}
3159
3160Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3161 QualType PromotionType) {
3162 TestAndClearIgnoreResultAssign();
3163 Value *Op;
3164 if (!PromotionType.isNull())
3165 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
3166 else
3167 Op = Visit(E: E->getSubExpr());
3168
3169 // Generate a unary FNeg for FP ops.
3170 if (Op->getType()->isFPOrFPVectorTy())
3171 return Builder.CreateFNeg(V: Op, Name: "fneg");
3172
3173 // Emit unary minus with EmitSub so we handle overflow cases etc.
3174 BinOpInfo BinOp;
3175 BinOp.RHS = Op;
3176 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
3177 BinOp.Ty = E->getType();
3178 BinOp.Opcode = BO_Sub;
3179 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3180 BinOp.E = E;
3181 return EmitSub(Ops: BinOp);
3182}
3183
3184Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3185 TestAndClearIgnoreResultAssign();
3186 Value *Op = Visit(E: E->getSubExpr());
3187 return Builder.CreateNot(V: Op, Name: "not");
3188}
3189
3190Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3191 // Perform vector logical not on comparison with zero vector.
3192 if (E->getType()->isVectorType() &&
3193 E->getType()->castAs<VectorType>()->getVectorKind() ==
3194 VectorKind::Generic) {
3195 Value *Oper = Visit(E: E->getSubExpr());
3196 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
3197 Value *Result;
3198 if (Oper->getType()->isFPOrFPVectorTy()) {
3199 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3200 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
3201 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
3202 } else
3203 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
3204 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
3205 }
3206
3207 // Compare operand to zero.
3208 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
3209
3210 // Invert value.
3211 // TODO: Could dynamically modify easy computations here. For example, if
3212 // the operand is an icmp ne, turn into icmp eq.
3213 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
3214
3215 // ZExt result to the expr type.
3216 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
3217}
3218
3219Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3220 // Try folding the offsetof to a constant.
3221 Expr::EvalResult EVResult;
3222 if (E->EvaluateAsInt(Result&: EVResult, Ctx: CGF.getContext())) {
3223 llvm::APSInt Value = EVResult.Val.getInt();
3224 return Builder.getInt(AI: Value);
3225 }
3226
3227 // Loop over the components of the offsetof to compute the value.
3228 unsigned n = E->getNumComponents();
3229 llvm::Type* ResultType = ConvertType(T: E->getType());
3230 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
3231 QualType CurrentType = E->getTypeSourceInfo()->getType();
3232 for (unsigned i = 0; i != n; ++i) {
3233 OffsetOfNode ON = E->getComponent(Idx: i);
3234 llvm::Value *Offset = nullptr;
3235 switch (ON.getKind()) {
3236 case OffsetOfNode::Array: {
3237 // Compute the index
3238 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
3239 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
3240 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3241 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
3242
3243 // Save the element type
3244 CurrentType =
3245 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
3246
3247 // Compute the element size
3248 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
3249 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
3250
3251 // Multiply out to compute the result
3252 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
3253 break;
3254 }
3255
3256 case OffsetOfNode::Field: {
3257 FieldDecl *MemberDecl = ON.getField();
3258 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3259 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3260
3261 // Compute the index of the field in its parent.
3262 unsigned i = 0;
3263 // FIXME: It would be nice if we didn't have to loop here!
3264 for (RecordDecl::field_iterator Field = RD->field_begin(),
3265 FieldEnd = RD->field_end();
3266 Field != FieldEnd; ++Field, ++i) {
3267 if (*Field == MemberDecl)
3268 break;
3269 }
3270 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3271
3272 // Compute the offset to the field
3273 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3274 CGF.getContext().getCharWidth();
3275 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3276
3277 // Save the element type.
3278 CurrentType = MemberDecl->getType();
3279 break;
3280 }
3281
3282 case OffsetOfNode::Identifier:
3283 llvm_unreachable("dependent __builtin_offsetof");
3284
3285 case OffsetOfNode::Base: {
3286 if (ON.getBase()->isVirtual()) {
3287 CGF.ErrorUnsupported(S: E, Type: "virtual base in offsetof");
3288 continue;
3289 }
3290
3291 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3292 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3293
3294 // Save the element type.
3295 CurrentType = ON.getBase()->getType();
3296
3297 // Compute the offset to the base.
3298 auto *BaseRT = CurrentType->castAs<RecordType>();
3299 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3300 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3301 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3302 break;
3303 }
3304 }
3305 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3306 }
3307 return Result;
3308}
3309
3310/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3311/// argument of the sizeof expression as an integer.
3312Value *
3313ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3314 const UnaryExprOrTypeTraitExpr *E) {
3315 QualType TypeToSize = E->getTypeOfArgument();
3316 if (auto Kind = E->getKind();
3317 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3318 if (const VariableArrayType *VAT =
3319 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3320 if (E->isArgumentType()) {
3321 // sizeof(type) - make sure to emit the VLA size.
3322 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3323 } else {
3324 // C99 6.5.3.4p2: If the argument is an expression of type
3325 // VLA, it is evaluated.
3326 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3327 }
3328
3329 auto VlaSize = CGF.getVLASize(vla: VAT);
3330 llvm::Value *size = VlaSize.NumElts;
3331
3332 // Scale the number of non-VLA elements by the non-VLA element size.
3333 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: VlaSize.Type);
3334 if (!eltSize.isOne())
3335 size = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: size);
3336
3337 return size;
3338 }
3339 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3340 auto Alignment =
3341 CGF.getContext()
3342 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3343 T: E->getTypeOfArgument()->getPointeeType()))
3344 .getQuantity();
3345 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3346 } else if (E->getKind() == UETT_VectorElements) {
3347 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3348 return Builder.CreateElementCount(DstType: CGF.SizeTy, EC: VecTy->getElementCount());
3349 }
3350
3351 // If this isn't sizeof(vla), the result must be constant; use the constant
3352 // folding logic so we don't have to duplicate it here.
3353 return Builder.getInt(AI: E->EvaluateKnownConstInt(Ctx: CGF.getContext()));
3354}
3355
3356Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3357 QualType PromotionType) {
3358 QualType promotionTy = PromotionType.isNull()
3359 ? getPromotionType(Ty: E->getSubExpr()->getType())
3360 : PromotionType;
3361 Value *result = VisitReal(E, PromotionType: promotionTy);
3362 if (result && !promotionTy.isNull())
3363 result = EmitUnPromotedValue(result, ExprType: E->getType());
3364 return result;
3365}
3366
3367Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3368 QualType PromotionType) {
3369 Expr *Op = E->getSubExpr();
3370 if (Op->getType()->isAnyComplexType()) {
3371 // If it's an l-value, load through the appropriate subobject l-value.
3372 // Note that we have to ask E because Op might be an l-value that
3373 // this won't work for, e.g. an Obj-C property.
3374 if (E->isGLValue()) {
3375 if (!PromotionType.isNull()) {
3376 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3377 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3378 if (result.first)
3379 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3380 return result.first;
3381 } else {
3382 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3383 .getScalarVal();
3384 }
3385 }
3386 // Otherwise, calculate and project.
3387 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3388 }
3389
3390 if (!PromotionType.isNull())
3391 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3392 return Visit(E: Op);
3393}
3394
3395Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3396 QualType PromotionType) {
3397 QualType promotionTy = PromotionType.isNull()
3398 ? getPromotionType(Ty: E->getSubExpr()->getType())
3399 : PromotionType;
3400 Value *result = VisitImag(E, PromotionType: promotionTy);
3401 if (result && !promotionTy.isNull())
3402 result = EmitUnPromotedValue(result, ExprType: E->getType());
3403 return result;
3404}
3405
3406Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3407 QualType PromotionType) {
3408 Expr *Op = E->getSubExpr();
3409 if (Op->getType()->isAnyComplexType()) {
3410 // If it's an l-value, load through the appropriate subobject l-value.
3411 // Note that we have to ask E because Op might be an l-value that
3412 // this won't work for, e.g. an Obj-C property.
3413 if (Op->isGLValue()) {
3414 if (!PromotionType.isNull()) {
3415 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3416 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3417 if (result.second)
3418 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3419 return result.second;
3420 } else {
3421 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3422 .getScalarVal();
3423 }
3424 }
3425 // Otherwise, calculate and project.
3426 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3427 }
3428
3429 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3430 // effects are evaluated, but not the actual value.
3431 if (Op->isGLValue())
3432 CGF.EmitLValue(E: Op);
3433 else if (!PromotionType.isNull())
3434 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3435 else
3436 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3437 if (!PromotionType.isNull())
3438 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3439 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3440}
3441
3442//===----------------------------------------------------------------------===//
3443// Binary Operators
3444//===----------------------------------------------------------------------===//
3445
3446Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3447 QualType PromotionType) {
3448 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3449}
3450
3451Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3452 QualType ExprType) {
3453 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3454}
3455
3456Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3457 E = E->IgnoreParens();
3458 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3459 switch (BO->getOpcode()) {
3460#define HANDLE_BINOP(OP) \
3461 case BO_##OP: \
3462 return Emit##OP(EmitBinOps(BO, PromotionType));
3463 HANDLE_BINOP(Add)
3464 HANDLE_BINOP(Sub)
3465 HANDLE_BINOP(Mul)
3466 HANDLE_BINOP(Div)
3467#undef HANDLE_BINOP
3468 default:
3469 break;
3470 }
3471 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3472 switch (UO->getOpcode()) {
3473 case UO_Imag:
3474 return VisitImag(E: UO, PromotionType);
3475 case UO_Real:
3476 return VisitReal(E: UO, PromotionType);
3477 case UO_Minus:
3478 return VisitMinus(E: UO, PromotionType);
3479 case UO_Plus:
3480 return VisitPlus(E: UO, PromotionType);
3481 default:
3482 break;
3483 }
3484 }
3485 auto result = Visit(E: const_cast<Expr *>(E));
3486 if (result) {
3487 if (!PromotionType.isNull())
3488 return EmitPromotedValue(result, PromotionType);
3489 else
3490 return EmitUnPromotedValue(result, ExprType: E->getType());
3491 }
3492 return result;
3493}
3494
3495BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3496 QualType PromotionType) {
3497 TestAndClearIgnoreResultAssign();
3498 BinOpInfo Result;
3499 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3500 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3501 if (!PromotionType.isNull())
3502 Result.Ty = PromotionType;
3503 else
3504 Result.Ty = E->getType();
3505 Result.Opcode = E->getOpcode();
3506 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3507 Result.E = E;
3508 return Result;
3509}
3510
3511LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3512 const CompoundAssignOperator *E,
3513 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3514 Value *&Result) {
3515 QualType LHSTy = E->getLHS()->getType();
3516 BinOpInfo OpInfo;
3517
3518 if (E->getComputationResultType()->isAnyComplexType())
3519 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3520
3521 // Emit the RHS first. __block variables need to have the rhs evaluated
3522 // first, plus this should improve codegen a little.
3523
3524 QualType PromotionTypeCR;
3525 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3526 if (PromotionTypeCR.isNull())
3527 PromotionTypeCR = E->getComputationResultType();
3528 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3529 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3530 if (!PromotionTypeRHS.isNull())
3531 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3532 else
3533 OpInfo.RHS = Visit(E: E->getRHS());
3534 OpInfo.Ty = PromotionTypeCR;
3535 OpInfo.Opcode = E->getOpcode();
3536 OpInfo.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3537 OpInfo.E = E;
3538 // Load/convert the LHS.
3539 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3540
3541 llvm::PHINode *atomicPHI = nullptr;
3542 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3543 QualType type = atomicTy->getValueType();
3544 if (!type->isBooleanType() && type->isIntegerType() &&
3545 !(type->isUnsignedIntegerType() &&
3546 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3547 CGF.getLangOpts().getSignedOverflowBehavior() !=
3548 LangOptions::SOB_Trapping) {
3549 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3550 llvm::Instruction::BinaryOps Op;
3551 switch (OpInfo.Opcode) {
3552 // We don't have atomicrmw operands for *, %, /, <<, >>
3553 case BO_MulAssign: case BO_DivAssign:
3554 case BO_RemAssign:
3555 case BO_ShlAssign:
3556 case BO_ShrAssign:
3557 break;
3558 case BO_AddAssign:
3559 AtomicOp = llvm::AtomicRMWInst::Add;
3560 Op = llvm::Instruction::Add;
3561 break;
3562 case BO_SubAssign:
3563 AtomicOp = llvm::AtomicRMWInst::Sub;
3564 Op = llvm::Instruction::Sub;
3565 break;
3566 case BO_AndAssign:
3567 AtomicOp = llvm::AtomicRMWInst::And;
3568 Op = llvm::Instruction::And;
3569 break;
3570 case BO_XorAssign:
3571 AtomicOp = llvm::AtomicRMWInst::Xor;
3572 Op = llvm::Instruction::Xor;
3573 break;
3574 case BO_OrAssign:
3575 AtomicOp = llvm::AtomicRMWInst::Or;
3576 Op = llvm::Instruction::Or;
3577 break;
3578 default:
3579 llvm_unreachable("Invalid compound assignment type");
3580 }
3581 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3582 llvm::Value *Amt = CGF.EmitToMemory(
3583 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3584 Loc: E->getExprLoc()),
3585 Ty: LHSTy);
3586 Value *OldVal = Builder.CreateAtomicRMW(
3587 Op: AtomicOp, Addr: LHSLV.getAddress(), Val: Amt,
3588 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3589
3590 // Since operation is atomic, the result type is guaranteed to be the
3591 // same as the input in LLVM terms.
3592 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3593 return LHSLV;
3594 }
3595 }
3596 // FIXME: For floating point types, we should be saving and restoring the
3597 // floating point environment in the loop.
3598 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3599 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3600 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3601 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3602 Builder.CreateBr(Dest: opBB);
3603 Builder.SetInsertPoint(opBB);
3604 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3605 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3606 OpInfo.LHS = atomicPHI;
3607 }
3608 else
3609 OpInfo.LHS = EmitLoadOfLValue(LV: LHSLV, Loc: E->getExprLoc());
3610
3611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3612 SourceLocation Loc = E->getExprLoc();
3613 if (!PromotionTypeLHS.isNull())
3614 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3615 Loc: E->getExprLoc());
3616 else
3617 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3618 DstType: E->getComputationLHSType(), Loc);
3619
3620 // Expand the binary operator.
3621 Result = (this->*Func)(OpInfo);
3622
3623 // Convert the result back to the LHS type,
3624 // potentially with Implicit Conversion sanitizer check.
3625 // If LHSLV is a bitfield, use default ScalarConversionOpts
3626 // to avoid emit any implicit integer checks.
3627 Value *Previous = nullptr;
3628 if (LHSLV.isBitField()) {
3629 Previous = Result;
3630 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc);
3631 } else
3632 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3633 Opts: ScalarConversionOpts(CGF.SanOpts));
3634
3635 if (atomicPHI) {
3636 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3637 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3638 auto Pair = CGF.EmitAtomicCompareExchange(
3639 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3640 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3641 llvm::Value *success = Pair.second;
3642 atomicPHI->addIncoming(V: old, BB: curBlock);
3643 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3644 Builder.SetInsertPoint(contBB);
3645 return LHSLV;
3646 }
3647
3648 // Store the result value into the LHS lvalue. Bit-fields are handled
3649 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3650 // 'An assignment expression has the value of the left operand after the
3651 // assignment...'.
3652 if (LHSLV.isBitField()) {
3653 Value *Src = Previous ? Previous : Result;
3654 QualType SrcType = E->getRHS()->getType();
3655 QualType DstType = E->getLHS()->getType();
3656 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3657 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: Result, DstType,
3658 Info: LHSLV.getBitFieldInfo(), Loc: E->getExprLoc());
3659 } else
3660 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3661
3662 if (CGF.getLangOpts().OpenMP)
3663 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3664 LHS: E->getLHS());
3665 return LHSLV;
3666}
3667
3668Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3669 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3670 bool Ignore = TestAndClearIgnoreResultAssign();
3671 Value *RHS = nullptr;
3672 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3673
3674 // If the result is clearly ignored, return now.
3675 if (Ignore)
3676 return nullptr;
3677
3678 // The result of an assignment in C is the assigned r-value.
3679 if (!CGF.getLangOpts().CPlusPlus)
3680 return RHS;
3681
3682 // If the lvalue is non-volatile, return the computed value of the assignment.
3683 if (!LHS.isVolatileQualified())
3684 return RHS;
3685
3686 // Otherwise, reload the value.
3687 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
3688}
3689
3690void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3691 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3692 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3693
3694 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3695 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3696 y: SanitizerKind::IntegerDivideByZero));
3697 }
3698
3699 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3700 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3701 Ops.Ty->hasSignedIntegerRepresentation() &&
3702 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3703 Ops.mayHaveIntegerOverflow()) {
3704 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3705
3706 llvm::Value *IntMin =
3707 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3708 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3709
3710 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3711 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3712 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3713 Checks.push_back(
3714 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SignedIntegerOverflow));
3715 }
3716
3717 if (Checks.size() > 0)
3718 EmitBinOpCheck(Checks, Info: Ops);
3719}
3720
3721Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3722 {
3723 CodeGenFunction::SanitizerScope SanScope(&CGF);
3724 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3725 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3726 Ops.Ty->isIntegerType() &&
3727 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3728 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3729 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
3730 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
3731 Ops.Ty->isRealFloatingType() &&
3732 Ops.mayHaveFloatDivisionByZero()) {
3733 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3734 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
3735 EmitBinOpCheck(Checks: std::make_pair(x&: NonZero, y: SanitizerKind::FloatDivideByZero),
3736 Info: Ops);
3737 }
3738 }
3739
3740 if (Ops.Ty->isConstantMatrixType()) {
3741 llvm::MatrixBuilder MB(Builder);
3742 // We need to check the types of the operands of the operator to get the
3743 // correct matrix dimensions.
3744 auto *BO = cast<BinaryOperator>(Val: Ops.E);
3745 (void)BO;
3746 assert(
3747 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3748 "first operand must be a matrix");
3749 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3750 "second operand must be an arithmetic type");
3751 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3752 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
3753 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
3754 }
3755
3756 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3757 llvm::Value *Val;
3758 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3759 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
3760 CGF.SetDivFPAccuracy(Val);
3761 return Val;
3762 }
3763 else if (Ops.isFixedPointOp())
3764 return EmitFixedPointBinOp(Ops);
3765 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3766 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3767 else
3768 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3769}
3770
3771Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3772 // Rem in C can't be a floating point type: C99 6.5.5p2.
3773 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3774 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3775 Ops.Ty->isIntegerType() &&
3776 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3777 CodeGenFunction::SanitizerScope SanScope(&CGF);
3778 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3779 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
3780 }
3781
3782 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3783 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3784 else
3785 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3786}
3787
3788Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3789 unsigned IID;
3790 unsigned OpID = 0;
3791 SanitizerHandler OverflowKind;
3792
3793 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3794 switch (Ops.Opcode) {
3795 case BO_Add:
3796 case BO_AddAssign:
3797 OpID = 1;
3798 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3799 llvm::Intrinsic::uadd_with_overflow;
3800 OverflowKind = SanitizerHandler::AddOverflow;
3801 break;
3802 case BO_Sub:
3803 case BO_SubAssign:
3804 OpID = 2;
3805 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3806 llvm::Intrinsic::usub_with_overflow;
3807 OverflowKind = SanitizerHandler::SubOverflow;
3808 break;
3809 case BO_Mul:
3810 case BO_MulAssign:
3811 OpID = 3;
3812 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3813 llvm::Intrinsic::umul_with_overflow;
3814 OverflowKind = SanitizerHandler::MulOverflow;
3815 break;
3816 default:
3817 llvm_unreachable("Unsupported operation for overflow detection");
3818 }
3819 OpID <<= 1;
3820 if (isSigned)
3821 OpID |= 1;
3822
3823 CodeGenFunction::SanitizerScope SanScope(&CGF);
3824 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
3825
3826 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
3827
3828 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
3829 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
3830 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
3831
3832 // Handle overflow with llvm.trap if no custom handler has been specified.
3833 const std::string *handlerName =
3834 &CGF.getLangOpts().OverflowHandler;
3835 if (handlerName->empty()) {
3836 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3837 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3838 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
3839 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
3840 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3841 : SanitizerKind::UnsignedIntegerOverflow;
3842 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Kind), Info: Ops);
3843 } else
3844 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
3845 return result;
3846 }
3847
3848 // Branch in case of overflow.
3849 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3850 llvm::BasicBlock *continueBB =
3851 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
3852 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
3853
3854 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
3855
3856 // If an overflow handler is set, then we want to call it and then use its
3857 // result, if it returns.
3858 Builder.SetInsertPoint(overflowBB);
3859
3860 // Get the overflow handler.
3861 llvm::Type *Int8Ty = CGF.Int8Ty;
3862 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3863 llvm::FunctionType *handlerTy =
3864 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
3865 llvm::FunctionCallee handler =
3866 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
3867
3868 // Sign extend the args to 64-bit, so that we can use the same handler for
3869 // all types of overflow.
3870 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
3871 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
3872
3873 // Call the handler with the two arguments, the operation, and the size of
3874 // the result.
3875 llvm::Value *handlerArgs[] = {
3876 lhs,
3877 rhs,
3878 Builder.getInt8(C: OpID),
3879 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
3880 };
3881 llvm::Value *handlerResult =
3882 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
3883
3884 // Truncate the result back to the desired size.
3885 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
3886 Builder.CreateBr(Dest: continueBB);
3887
3888 Builder.SetInsertPoint(continueBB);
3889 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
3890 phi->addIncoming(V: result, BB: initialBB);
3891 phi->addIncoming(V: handlerResult, BB: overflowBB);
3892
3893 return phi;
3894}
3895
3896/// Emit pointer + index arithmetic.
3897static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3898 const BinOpInfo &op,
3899 bool isSubtraction) {
3900 // Must have binary (not unary) expr here. Unary pointer
3901 // increment/decrement doesn't use this path.
3902 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
3903
3904 Value *pointer = op.LHS;
3905 Expr *pointerOperand = expr->getLHS();
3906 Value *index = op.RHS;
3907 Expr *indexOperand = expr->getRHS();
3908
3909 // In a subtraction, the LHS is always the pointer.
3910 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3911 std::swap(a&: pointer, b&: index);
3912 std::swap(a&: pointerOperand, b&: indexOperand);
3913 }
3914
3915 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3916
3917 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
3918 auto &DL = CGF.CGM.getDataLayout();
3919 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
3920
3921 // Some versions of glibc and gcc use idioms (particularly in their malloc
3922 // routines) that add a pointer-sized integer (known to be a pointer value)
3923 // to a null pointer in order to cast the value back to an integer or as
3924 // part of a pointer alignment algorithm. This is undefined behavior, but
3925 // we'd like to be able to compile programs that use it.
3926 //
3927 // Normally, we'd generate a GEP with a null-pointer base here in response
3928 // to that code, but it's also UB to dereference a pointer created that
3929 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3930 // generate a direct cast of the integer value to a pointer.
3931 //
3932 // The idiom (p = nullptr + N) is not met if any of the following are true:
3933 //
3934 // The operation is subtraction.
3935 // The index is not pointer-sized.
3936 // The pointer type is not byte-sized.
3937 //
3938 if (BinaryOperator::isNullPointerArithmeticExtension(Ctx&: CGF.getContext(),
3939 Opc: op.Opcode,
3940 LHS: expr->getLHS(),
3941 RHS: expr->getRHS()))
3942 return CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
3943
3944 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
3945 // Zero-extend or sign-extend the pointer value according to
3946 // whether the index is signed or not.
3947 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
3948 Name: "idx.ext");
3949 }
3950
3951 // If this is subtraction, negate the index.
3952 if (isSubtraction)
3953 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
3954
3955 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
3956 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
3957 /*Accessed*/ false);
3958
3959 const PointerType *pointerType
3960 = pointerOperand->getType()->getAs<PointerType>();
3961 if (!pointerType) {
3962 QualType objectType = pointerOperand->getType()
3963 ->castAs<ObjCObjectPointerType>()
3964 ->getPointeeType();
3965 llvm::Value *objectSize
3966 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
3967
3968 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
3969
3970 Value *result =
3971 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
3972 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
3973 }
3974
3975 QualType elementType = pointerType->getPointeeType();
3976 if (const VariableArrayType *vla
3977 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
3978 // The element count here is the total number of non-VLA elements.
3979 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3980
3981 // Effectively, the multiply by the VLA size is part of the GEP.
3982 // GEP indexes are signed, and scaling an index isn't permitted to
3983 // signed-overflow, so we use the same semantics for our explicit
3984 // multiply. We suppress this if overflow is not undefined behavior.
3985 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3986 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3987 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
3988 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
3989 } else {
3990 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
3991 pointer = CGF.EmitCheckedInBoundsGEP(
3992 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
3993 Name: "add.ptr");
3994 }
3995 return pointer;
3996 }
3997
3998 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3999 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4000 // future proof.
4001 llvm::Type *elemTy;
4002 if (elementType->isVoidType() || elementType->isFunctionType())
4003 elemTy = CGF.Int8Ty;
4004 else
4005 elemTy = CGF.ConvertTypeForMem(T: elementType);
4006
4007 if (CGF.getLangOpts().isSignedOverflowDefined())
4008 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
4009
4010 return CGF.EmitCheckedInBoundsGEP(
4011 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
4012 Name: "add.ptr");
4013}
4014
4015// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4016// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4017// the add operand respectively. This allows fmuladd to represent a*b-c, or
4018// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4019// efficient operations.
4020static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4021 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4022 bool negMul, bool negAdd) {
4023 Value *MulOp0 = MulOp->getOperand(i: 0);
4024 Value *MulOp1 = MulOp->getOperand(i: 1);
4025 if (negMul)
4026 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
4027 if (negAdd)
4028 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
4029
4030 Value *FMulAdd = nullptr;
4031 if (Builder.getIsFPConstrained()) {
4032 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4033 "Only constrained operation should be created when Builder is in FP "
4034 "constrained mode");
4035 FMulAdd = Builder.CreateConstrainedFPCall(
4036 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::experimental_constrained_fmuladd,
4037 Tys: Addend->getType()),
4038 Args: {MulOp0, MulOp1, Addend});
4039 } else {
4040 FMulAdd = Builder.CreateCall(
4041 Callee: CGF.CGM.getIntrinsic(IID: llvm::Intrinsic::fmuladd, Tys: Addend->getType()),
4042 Args: {MulOp0, MulOp1, Addend});
4043 }
4044 MulOp->eraseFromParent();
4045
4046 return FMulAdd;
4047}
4048
4049// Check whether it would be legal to emit an fmuladd intrinsic call to
4050// represent op and if so, build the fmuladd.
4051//
4052// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4053// Does NOT check the type of the operation - it's assumed that this function
4054// will be called from contexts where it's known that the type is contractable.
4055static Value* tryEmitFMulAdd(const BinOpInfo &op,
4056 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4057 bool isSub=false) {
4058
4059 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4060 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4061 "Only fadd/fsub can be the root of an fmuladd.");
4062
4063 // Check whether this op is marked as fusable.
4064 if (!op.FPFeatures.allowFPContractWithinStatement())
4065 return nullptr;
4066
4067 Value *LHS = op.LHS;
4068 Value *RHS = op.RHS;
4069
4070 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4071 // it is the only use of its operand.
4072 bool NegLHS = false;
4073 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
4074 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4075 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4076 LHS = LHSUnOp->getOperand(i_nocapture: 0);
4077 NegLHS = true;
4078 }
4079 }
4080
4081 bool NegRHS = false;
4082 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
4083 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4084 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
4085 RHS = RHSUnOp->getOperand(i_nocapture: 0);
4086 NegRHS = true;
4087 }
4088 }
4089
4090 // We have a potentially fusable op. Look for a mul on one of the operands.
4091 // Also, make sure that the mul result isn't used directly. In that case,
4092 // there's no point creating a muladd operation.
4093 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
4094 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4095 (LHSBinOp->use_empty() || NegLHS)) {
4096 // If we looked through fneg, erase it.
4097 if (NegLHS)
4098 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4099 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4100 }
4101 }
4102 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
4103 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4104 (RHSBinOp->use_empty() || NegRHS)) {
4105 // If we looked through fneg, erase it.
4106 if (NegRHS)
4107 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4108 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4109 }
4110 }
4111
4112 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
4113 if (LHSBinOp->getIntrinsicID() ==
4114 llvm::Intrinsic::experimental_constrained_fmul &&
4115 (LHSBinOp->use_empty() || NegLHS)) {
4116 // If we looked through fneg, erase it.
4117 if (NegLHS)
4118 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
4119 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
4120 }
4121 }
4122 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
4123 if (RHSBinOp->getIntrinsicID() ==
4124 llvm::Intrinsic::experimental_constrained_fmul &&
4125 (RHSBinOp->use_empty() || NegRHS)) {
4126 // If we looked through fneg, erase it.
4127 if (NegRHS)
4128 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
4129 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
4130 }
4131 }
4132
4133 return nullptr;
4134}
4135
4136Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4137 if (op.LHS->getType()->isPointerTy() ||
4138 op.RHS->getType()->isPointerTy())
4139 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
4140
4141 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4142 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4143 case LangOptions::SOB_Defined:
4144 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4145 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4146 [[fallthrough]];
4147 case LangOptions::SOB_Undefined:
4148 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4149 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4150 [[fallthrough]];
4151 case LangOptions::SOB_Trapping:
4152 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4153 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4154 return EmitOverflowCheckedBinOp(Ops: op);
4155 }
4156 }
4157
4158 // For vector and matrix adds, try to fold into a fmuladd.
4159 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4160 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4161 // Try to form an fmuladd.
4162 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4163 return FMulAdd;
4164 }
4165
4166 if (op.Ty->isConstantMatrixType()) {
4167 llvm::MatrixBuilder MB(Builder);
4168 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4169 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
4170 }
4171
4172 if (op.Ty->isUnsignedIntegerType() &&
4173 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4174 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4175 return EmitOverflowCheckedBinOp(Ops: op);
4176
4177 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4178 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4179 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
4180 }
4181
4182 if (op.isFixedPointOp())
4183 return EmitFixedPointBinOp(Ops: op);
4184
4185 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
4186}
4187
4188/// The resulting value must be calculated with exact precision, so the operands
4189/// may not be the same type.
4190Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4191 using llvm::APSInt;
4192 using llvm::ConstantInt;
4193
4194 // This is either a binary operation where at least one of the operands is
4195 // a fixed-point type, or a unary operation where the operand is a fixed-point
4196 // type. The result type of a binary operation is determined by
4197 // Sema::handleFixedPointConversions().
4198 QualType ResultTy = op.Ty;
4199 QualType LHSTy, RHSTy;
4200 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
4201 RHSTy = BinOp->getRHS()->getType();
4202 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
4203 // For compound assignment, the effective type of the LHS at this point
4204 // is the computation LHS type, not the actual LHS type, and the final
4205 // result type is not the type of the expression but rather the
4206 // computation result type.
4207 LHSTy = CAO->getComputationLHSType();
4208 ResultTy = CAO->getComputationResultType();
4209 } else
4210 LHSTy = BinOp->getLHS()->getType();
4211 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
4212 LHSTy = UnOp->getSubExpr()->getType();
4213 RHSTy = UnOp->getSubExpr()->getType();
4214 }
4215 ASTContext &Ctx = CGF.getContext();
4216 Value *LHS = op.LHS;
4217 Value *RHS = op.RHS;
4218
4219 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
4220 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
4221 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
4222 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
4223
4224 // Perform the actual operation.
4225 Value *Result;
4226 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4227 switch (op.Opcode) {
4228 case BO_AddAssign:
4229 case BO_Add:
4230 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4231 break;
4232 case BO_SubAssign:
4233 case BO_Sub:
4234 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4235 break;
4236 case BO_MulAssign:
4237 case BO_Mul:
4238 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4239 break;
4240 case BO_DivAssign:
4241 case BO_Div:
4242 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4243 break;
4244 case BO_ShlAssign:
4245 case BO_Shl:
4246 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
4247 break;
4248 case BO_ShrAssign:
4249 case BO_Shr:
4250 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
4251 break;
4252 case BO_LT:
4253 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4254 case BO_GT:
4255 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4256 case BO_LE:
4257 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4258 case BO_GE:
4259 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4260 case BO_EQ:
4261 // For equality operations, we assume any padding bits on unsigned types are
4262 // zero'd out. They could be overwritten through non-saturating operations
4263 // that cause overflow, but this leads to undefined behavior.
4264 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4265 case BO_NE:
4266 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
4267 case BO_Cmp:
4268 case BO_LAnd:
4269 case BO_LOr:
4270 llvm_unreachable("Found unimplemented fixed point binary operation");
4271 case BO_PtrMemD:
4272 case BO_PtrMemI:
4273 case BO_Rem:
4274 case BO_Xor:
4275 case BO_And:
4276 case BO_Or:
4277 case BO_Assign:
4278 case BO_RemAssign:
4279 case BO_AndAssign:
4280 case BO_XorAssign:
4281 case BO_OrAssign:
4282 case BO_Comma:
4283 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4284 }
4285
4286 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4287 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4288 // Convert to the result type.
4289 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4290 : CommonFixedSema,
4291 DstSema: ResultFixedSema);
4292}
4293
4294Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4295 // The LHS is always a pointer if either side is.
4296 if (!op.LHS->getType()->isPointerTy()) {
4297 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4298 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4299 case LangOptions::SOB_Defined:
4300 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4301 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4302 [[fallthrough]];
4303 case LangOptions::SOB_Undefined:
4304 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4305 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4306 [[fallthrough]];
4307 case LangOptions::SOB_Trapping:
4308 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4309 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4310 return EmitOverflowCheckedBinOp(Ops: op);
4311 }
4312 }
4313
4314 // For vector and matrix subs, try to fold into a fmuladd.
4315 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4316 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4317 // Try to form an fmuladd.
4318 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4319 return FMulAdd;
4320 }
4321
4322 if (op.Ty->isConstantMatrixType()) {
4323 llvm::MatrixBuilder MB(Builder);
4324 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4325 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4326 }
4327
4328 if (op.Ty->isUnsignedIntegerType() &&
4329 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4330 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4331 return EmitOverflowCheckedBinOp(Ops: op);
4332
4333 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4334 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4335 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4336 }
4337
4338 if (op.isFixedPointOp())
4339 return EmitFixedPointBinOp(op);
4340
4341 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4342 }
4343
4344 // If the RHS is not a pointer, then we have normal pointer
4345 // arithmetic.
4346 if (!op.RHS->getType()->isPointerTy())
4347 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4348
4349 // Otherwise, this is a pointer subtraction.
4350
4351 // Do the raw subtraction part.
4352 llvm::Value *LHS
4353 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4354 llvm::Value *RHS
4355 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4356 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4357
4358 // Okay, figure out the element size.
4359 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4360 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4361
4362 llvm::Value *divisor = nullptr;
4363
4364 // For a variable-length array, this is going to be non-constant.
4365 if (const VariableArrayType *vla
4366 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4367 auto VlaSize = CGF.getVLASize(vla);
4368 elementType = VlaSize.Type;
4369 divisor = VlaSize.NumElts;
4370
4371 // Scale the number of non-VLA elements by the non-VLA element size.
4372 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4373 if (!eltSize.isOne())
4374 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4375
4376 // For everything elese, we can just compute it, safe in the
4377 // assumption that Sema won't let anything through that we can't
4378 // safely compute the size of.
4379 } else {
4380 CharUnits elementSize;
4381 // Handle GCC extension for pointer arithmetic on void* and
4382 // function pointer types.
4383 if (elementType->isVoidType() || elementType->isFunctionType())
4384 elementSize = CharUnits::One();
4385 else
4386 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4387
4388 // Don't even emit the divide for element size of 1.
4389 if (elementSize.isOne())
4390 return diffInChars;
4391
4392 divisor = CGF.CGM.getSize(numChars: elementSize);
4393 }
4394
4395 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4396 // pointer difference in C is only defined in the case where both operands
4397 // are pointing to elements of an array.
4398 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4399}
4400
4401Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4402 bool RHSIsSigned) {
4403 llvm::IntegerType *Ty;
4404 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4405 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4406 else
4407 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4408 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4409 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4410 // this in ConstantInt::get, this results in the value getting truncated.
4411 // Constrain the return value to be max(RHS) in this case.
4412 llvm::Type *RHSTy = RHS->getType();
4413 llvm::APInt RHSMax =
4414 RHSIsSigned ? llvm::APInt::getSignedMaxValue(numBits: RHSTy->getScalarSizeInBits())
4415 : llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4416 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4417 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4418 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4419}
4420
4421Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4422 const Twine &Name) {
4423 llvm::IntegerType *Ty;
4424 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4425 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4426 else
4427 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4428
4429 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4430 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS, RHSIsSigned: false), Name);
4431
4432 return Builder.CreateURem(
4433 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4434}
4435
4436Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4437 // TODO: This misses out on the sanitizer check below.
4438 if (Ops.isFixedPointOp())
4439 return EmitFixedPointBinOp(op: Ops);
4440
4441 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4442 // RHS to the same size as the LHS.
4443 Value *RHS = Ops.RHS;
4444 if (Ops.LHS->getType() != RHS->getType())
4445 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4446
4447 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4448 Ops.Ty->hasSignedIntegerRepresentation() &&
4449 !CGF.getLangOpts().isSignedOverflowDefined() &&
4450 !CGF.getLangOpts().CPlusPlus20;
4451 bool SanitizeUnsignedBase =
4452 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4453 Ops.Ty->hasUnsignedIntegerRepresentation();
4454 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4455 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4456 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4457 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4458 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4459 else if ((SanitizeBase || SanitizeExponent) &&
4460 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4461 CodeGenFunction::SanitizerScope SanScope(&CGF);
4462 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
4463 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4464 llvm::Value *WidthMinusOne =
4465 GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned);
4466 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4467
4468 if (SanitizeExponent) {
4469 Checks.push_back(
4470 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::ShiftExponent));
4471 }
4472
4473 if (SanitizeBase) {
4474 // Check whether we are shifting any non-zero bits off the top of the
4475 // integer. We only emit this check if exponent is valid - otherwise
4476 // instructions below will have undefined behavior themselves.
4477 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4478 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4479 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4480 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4481 llvm::Value *PromotedWidthMinusOne =
4482 (RHS == Ops.RHS) ? WidthMinusOne
4483 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS, RHSIsSigned);
4484 CGF.EmitBlock(BB: CheckShiftBase);
4485 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4486 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4487 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4488 Name: "shl.check");
4489 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4490 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4491 // Under C++11's rules, shifting a 1 bit into the sign bit is
4492 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4493 // define signed left shifts, so we use the C99 and C++11 rules there).
4494 // Unsigned shifts can always shift into the top bit.
4495 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4496 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4497 }
4498 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4499 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4500 CGF.EmitBlock(BB: Cont);
4501 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4502 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4503 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4504 Checks.push_back(Elt: std::make_pair(
4505 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::ShiftBase
4506 : SanitizerKind::UnsignedShiftBase));
4507 }
4508
4509 assert(!Checks.empty());
4510 EmitBinOpCheck(Checks, Info: Ops);
4511 }
4512
4513 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4514}
4515
4516Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4517 // TODO: This misses out on the sanitizer check below.
4518 if (Ops.isFixedPointOp())
4519 return EmitFixedPointBinOp(op: Ops);
4520
4521 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4522 // RHS to the same size as the LHS.
4523 Value *RHS = Ops.RHS;
4524 if (Ops.LHS->getType() != RHS->getType())
4525 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4526
4527 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4528 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4529 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4530 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4531 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4532 CodeGenFunction::SanitizerScope SanScope(&CGF);
4533 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4534 llvm::Value *Valid = Builder.CreateICmpULE(
4535 LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS, RHSIsSigned));
4536 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::ShiftExponent), Info: Ops);
4537 }
4538
4539 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4540 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4541 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4542}
4543
4544enum IntrinsicType { VCMPEQ, VCMPGT };
4545// return corresponding comparison intrinsic for given vector type
4546static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4547 BuiltinType::Kind ElemKind) {
4548 switch (ElemKind) {
4549 default: llvm_unreachable("unexpected element type");
4550 case BuiltinType::Char_U:
4551 case BuiltinType::UChar:
4552 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4553 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4554 case BuiltinType::Char_S:
4555 case BuiltinType::SChar:
4556 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4557 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4558 case BuiltinType::UShort:
4559 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4560 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4561 case BuiltinType::Short:
4562 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4563 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4564 case BuiltinType::UInt:
4565 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4566 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4567 case BuiltinType::Int:
4568 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4569 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4570 case BuiltinType::ULong:
4571 case BuiltinType::ULongLong:
4572 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4573 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4574 case BuiltinType::Long:
4575 case BuiltinType::LongLong:
4576 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4577 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4578 case BuiltinType::Float:
4579 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4580 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4581 case BuiltinType::Double:
4582 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4583 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4584 case BuiltinType::UInt128:
4585 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4586 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4587 case BuiltinType::Int128:
4588 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4589 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4590 }
4591}
4592
4593Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4594 llvm::CmpInst::Predicate UICmpOpc,
4595 llvm::CmpInst::Predicate SICmpOpc,
4596 llvm::CmpInst::Predicate FCmpOpc,
4597 bool IsSignaling) {
4598 TestAndClearIgnoreResultAssign();
4599 Value *Result;
4600 QualType LHSTy = E->getLHS()->getType();
4601 QualType RHSTy = E->getRHS()->getType();
4602 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4603 assert(E->getOpcode() == BO_EQ ||
4604 E->getOpcode() == BO_NE);
4605 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4606 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4607 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4608 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4609 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4610 BinOpInfo BOInfo = EmitBinOps(E);
4611 Value *LHS = BOInfo.LHS;
4612 Value *RHS = BOInfo.RHS;
4613
4614 // If AltiVec, the comparison results in a numeric type, so we use
4615 // intrinsics comparing vectors and giving 0 or 1 as a result
4616 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4617 // constants for mapping CR6 register bits to predicate result
4618 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4619
4620 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4621
4622 // in several cases vector arguments order will be reversed
4623 Value *FirstVecArg = LHS,
4624 *SecondVecArg = RHS;
4625
4626 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4627 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4628
4629 switch(E->getOpcode()) {
4630 default: llvm_unreachable("is not a comparison operation");
4631 case BO_EQ:
4632 CR6 = CR6_LT;
4633 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4634 break;
4635 case BO_NE:
4636 CR6 = CR6_EQ;
4637 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4638 break;
4639 case BO_LT:
4640 CR6 = CR6_LT;
4641 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4642 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4643 break;
4644 case BO_GT:
4645 CR6 = CR6_LT;
4646 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4647 break;
4648 case BO_LE:
4649 if (ElementKind == BuiltinType::Float) {
4650 CR6 = CR6_LT;
4651 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4652 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4653 }
4654 else {
4655 CR6 = CR6_EQ;
4656 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4657 }
4658 break;
4659 case BO_GE:
4660 if (ElementKind == BuiltinType::Float) {
4661 CR6 = CR6_LT;
4662 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4663 }
4664 else {
4665 CR6 = CR6_EQ;
4666 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4667 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4668 }
4669 break;
4670 }
4671
4672 Value *CR6Param = Builder.getInt32(C: CR6);
4673 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4674 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4675
4676 // The result type of intrinsic may not be same as E->getType().
4677 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4678 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4679 // do nothing, if ResultTy is not i1 at the same time, it will cause
4680 // crash later.
4681 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
4682 if (ResultTy->getBitWidth() > 1 &&
4683 E->getType() == CGF.getContext().BoolTy)
4684 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
4685 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4686 Loc: E->getExprLoc());
4687 }
4688
4689 if (BOInfo.isFixedPointOp()) {
4690 Result = EmitFixedPointBinOp(op: BOInfo);
4691 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4692 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4693 if (!IsSignaling)
4694 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
4695 else
4696 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
4697 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4698 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
4699 } else {
4700 // Unsigned integers and pointers.
4701
4702 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4703 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
4704 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
4705
4706 // Dynamic information is required to be stripped for comparisons,
4707 // because it could leak the dynamic information. Based on comparisons
4708 // of pointers to dynamic objects, the optimizer can replace one pointer
4709 // with another, which might be incorrect in presence of invariant
4710 // groups. Comparison with null is safe because null does not carry any
4711 // dynamic information.
4712 if (LHSTy.mayBeDynamicClass())
4713 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
4714 if (RHSTy.mayBeDynamicClass())
4715 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
4716 }
4717
4718 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
4719 }
4720
4721 // If this is a vector comparison, sign extend the result to the appropriate
4722 // vector integer type and return it (don't convert to bool).
4723 if (LHSTy->isVectorType())
4724 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
4725
4726 } else {
4727 // Complex Comparison: can only be an equality comparison.
4728 CodeGenFunction::ComplexPairTy LHS, RHS;
4729 QualType CETy;
4730 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4731 LHS = CGF.EmitComplexExpr(E: E->getLHS());
4732 CETy = CTy->getElementType();
4733 } else {
4734 LHS.first = Visit(E: E->getLHS());
4735 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
4736 CETy = LHSTy;
4737 }
4738 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4739 RHS = CGF.EmitComplexExpr(E: E->getRHS());
4740 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4741 CTy->getElementType()) &&
4742 "The element types must always match.");
4743 (void)CTy;
4744 } else {
4745 RHS.first = Visit(E: E->getRHS());
4746 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
4747 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4748 "The element types must always match.");
4749 }
4750
4751 Value *ResultR, *ResultI;
4752 if (CETy->isRealFloatingType()) {
4753 // As complex comparisons can only be equality comparisons, they
4754 // are never signaling comparisons.
4755 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4756 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4757 } else {
4758 // Complex comparisons can only be equality comparisons. As such, signed
4759 // and unsigned opcodes are the same.
4760 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4761 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4762 }
4763
4764 if (E->getOpcode() == BO_EQ) {
4765 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
4766 } else {
4767 assert(E->getOpcode() == BO_NE &&
4768 "Complex comparison other than == or != ?");
4769 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
4770 }
4771 }
4772
4773 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4774 Loc: E->getExprLoc());
4775}
4776
4777llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
4778 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
4779 // In case we have the integer or bitfield sanitizer checks enabled
4780 // we want to get the expression before scalar conversion.
4781 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E->getRHS())) {
4782 CastKind Kind = ICE->getCastKind();
4783 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
4784 *SrcType = ICE->getSubExpr()->getType();
4785 *Previous = EmitScalarExpr(E: ICE->getSubExpr());
4786 // Pass default ScalarConversionOpts to avoid emitting
4787 // integer sanitizer checks as E refers to bitfield.
4788 return EmitScalarConversion(Src: *Previous, SrcTy: *SrcType, DstTy: ICE->getType(),
4789 Loc: ICE->getExprLoc());
4790 }
4791 }
4792 return EmitScalarExpr(E: E->getRHS());
4793}
4794
4795Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4796 bool Ignore = TestAndClearIgnoreResultAssign();
4797
4798 Value *RHS;
4799 LValue LHS;
4800
4801 switch (E->getLHS()->getType().getObjCLifetime()) {
4802 case Qualifiers::OCL_Strong:
4803 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(e: E, ignored: Ignore);
4804 break;
4805
4806 case Qualifiers::OCL_Autoreleasing:
4807 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
4808 break;
4809
4810 case Qualifiers::OCL_ExplicitNone:
4811 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
4812 break;
4813
4814 case Qualifiers::OCL_Weak:
4815 RHS = Visit(E: E->getRHS());
4816 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4817 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(), value: RHS, ignored: Ignore);
4818 break;
4819
4820 case Qualifiers::OCL_None:
4821 // __block variables need to have the rhs evaluated first, plus
4822 // this should improve codegen just a little.
4823 Value *Previous = nullptr;
4824 QualType SrcType = E->getRHS()->getType();
4825 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4826 // we want to extract that value and potentially (if the bitfield sanitizer
4827 // is enabled) use it to check for an implicit conversion.
4828 if (E->getLHS()->refersToBitField())
4829 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, Previous: &Previous, SrcType: &SrcType);
4830 else
4831 RHS = Visit(E: E->getRHS());
4832
4833 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4834
4835 // Store the value into the LHS. Bit-fields are handled specially
4836 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4837 // 'An assignment expression has the value of the left operand after
4838 // the assignment...'.
4839 if (LHS.isBitField()) {
4840 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
4841 // If the expression contained an implicit conversion, make sure
4842 // to use the value before the scalar conversion.
4843 Value *Src = Previous ? Previous : RHS;
4844 QualType DstType = E->getLHS()->getType();
4845 CGF.EmitBitfieldConversionCheck(Src, SrcType, Dst: RHS, DstType,
4846 Info: LHS.getBitFieldInfo(), Loc: E->getExprLoc());
4847 } else {
4848 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
4849 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
4850 }
4851 }
4852
4853 // If the result is clearly ignored, return now.
4854 if (Ignore)
4855 return nullptr;
4856
4857 // The result of an assignment in C is the assigned r-value.
4858 if (!CGF.getLangOpts().CPlusPlus)
4859 return RHS;
4860
4861 // If the lvalue is non-volatile, return the computed value of the assignment.
4862 if (!LHS.isVolatileQualified())
4863 return RHS;
4864
4865 // Otherwise, reload the value.
4866 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
4867}
4868
4869Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4870 // Perform vector logical and on comparisons with zero vectors.
4871 if (E->getType()->isVectorType()) {
4872 CGF.incrementProfileCounter(S: E);
4873
4874 Value *LHS = Visit(E: E->getLHS());
4875 Value *RHS = Visit(E: E->getRHS());
4876 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
4877 if (LHS->getType()->isFPOrFPVectorTy()) {
4878 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4879 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
4880 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
4881 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
4882 } else {
4883 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
4884 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
4885 }
4886 Value *And = Builder.CreateAnd(LHS, RHS);
4887 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
4888 }
4889
4890 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4891 llvm::Type *ResTy = ConvertType(T: E->getType());
4892
4893 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4894 // If we have 1 && X, just emit X without inserting the control flow.
4895 bool LHSCondVal;
4896 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
4897 if (LHSCondVal) { // If we have 1 && X, just emit X.
4898 CGF.incrementProfileCounter(S: E);
4899
4900 // If the top of the logical operator nest, reset the MCDC temp to 0.
4901 if (CGF.MCDCLogOpStack.empty())
4902 CGF.maybeResetMCDCCondBitmap(E);
4903
4904 CGF.MCDCLogOpStack.push_back(Elt: E);
4905
4906 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4907
4908 // If we're generating for profiling or coverage, generate a branch to a
4909 // block that increments the RHS counter needed to track branch condition
4910 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4911 // "FalseBlock" after the increment is done.
4912 if (InstrumentRegions &&
4913 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4914 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4915 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
4916 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4917 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
4918 CGF.EmitBlock(BB: RHSBlockCnt);
4919 CGF.incrementProfileCounter(S: E->getRHS());
4920 CGF.EmitBranch(Block: FBlock);
4921 CGF.EmitBlock(BB: FBlock);
4922 }
4923
4924 CGF.MCDCLogOpStack.pop_back();
4925 // If the top of the logical operator nest, update the MCDC bitmap.
4926 if (CGF.MCDCLogOpStack.empty())
4927 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4928
4929 // ZExt result to int or bool.
4930 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
4931 }
4932
4933 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4934 if (!CGF.ContainsLabel(S: E->getRHS()))
4935 return llvm::Constant::getNullValue(Ty: ResTy);
4936 }
4937
4938 // If the top of the logical operator nest, reset the MCDC temp to 0.
4939 if (CGF.MCDCLogOpStack.empty())
4940 CGF.maybeResetMCDCCondBitmap(E);
4941
4942 CGF.MCDCLogOpStack.push_back(Elt: E);
4943
4944 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
4945 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
4946
4947 CodeGenFunction::ConditionalEvaluation eval(CGF);
4948
4949 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4950 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
4951 TrueCount: CGF.getProfileCount(S: E->getRHS()));
4952
4953 // Any edges into the ContBlock are now from an (indeterminate number of)
4954 // edges from this first condition. All of these values will be false. Start
4955 // setting up the PHI node in the Cont Block for this.
4956 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
4957 NameStr: "", InsertBefore: ContBlock);
4958 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
4959 PI != PE; ++PI)
4960 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
4961
4962 eval.begin(CGF);
4963 CGF.EmitBlock(BB: RHSBlock);
4964 CGF.incrementProfileCounter(S: E);
4965 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4966 eval.end(CGF);
4967
4968 // Reaquire the RHS block, as there may be subblocks inserted.
4969 RHSBlock = Builder.GetInsertBlock();
4970
4971 // If we're generating for profiling or coverage, generate a branch on the
4972 // RHS to a block that increments the RHS true counter needed to track branch
4973 // condition coverage.
4974 if (InstrumentRegions &&
4975 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4976 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4977 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4978 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
4979 CGF.EmitBlock(BB: RHSBlockCnt);
4980 CGF.incrementProfileCounter(S: E->getRHS());
4981 CGF.EmitBranch(Block: ContBlock);
4982 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
4983 }
4984
4985 // Emit an unconditional branch from this block to ContBlock.
4986 {
4987 // There is no need to emit line number for unconditional branch.
4988 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4989 CGF.EmitBlock(BB: ContBlock);
4990 }
4991 // Insert an entry into the phi node for the edge with the value of RHSCond.
4992 PN->addIncoming(V: RHSCond, BB: RHSBlock);
4993
4994 CGF.MCDCLogOpStack.pop_back();
4995 // If the top of the logical operator nest, update the MCDC bitmap.
4996 if (CGF.MCDCLogOpStack.empty())
4997 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4998
4999 // Artificial location to preserve the scope information
5000 {
5001 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
5002 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5003 }
5004
5005 // ZExt result to int.
5006 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
5007}
5008
5009Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5010 // Perform vector logical or on comparisons with zero vectors.
5011 if (E->getType()->isVectorType()) {
5012 CGF.incrementProfileCounter(S: E);
5013
5014 Value *LHS = Visit(E: E->getLHS());
5015 Value *RHS = Visit(E: E->getRHS());
5016 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
5017 if (LHS->getType()->isFPOrFPVectorTy()) {
5018 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5019 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
5020 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
5021 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
5022 } else {
5023 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
5024 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
5025 }
5026 Value *Or = Builder.CreateOr(LHS, RHS);
5027 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
5028 }
5029
5030 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5031 llvm::Type *ResTy = ConvertType(T: E->getType());
5032
5033 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5034 // If we have 0 || X, just emit X without inserting the control flow.
5035 bool LHSCondVal;
5036 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
5037 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5038 CGF.incrementProfileCounter(S: E);
5039
5040 // If the top of the logical operator nest, reset the MCDC temp to 0.
5041 if (CGF.MCDCLogOpStack.empty())
5042 CGF.maybeResetMCDCCondBitmap(E);
5043
5044 CGF.MCDCLogOpStack.push_back(Elt: E);
5045
5046 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5047
5048 // If we're generating for profiling or coverage, generate a branch to a
5049 // block that increments the RHS counter need to track branch condition
5050 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5051 // "FalseBlock" after the increment is done.
5052 if (InstrumentRegions &&
5053 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5054 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5055 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
5056 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5057 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
5058 CGF.EmitBlock(BB: RHSBlockCnt);
5059 CGF.incrementProfileCounter(S: E->getRHS());
5060 CGF.EmitBranch(Block: FBlock);
5061 CGF.EmitBlock(BB: FBlock);
5062 }
5063
5064 CGF.MCDCLogOpStack.pop_back();
5065 // If the top of the logical operator nest, update the MCDC bitmap.
5066 if (CGF.MCDCLogOpStack.empty())
5067 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5068
5069 // ZExt result to int or bool.
5070 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
5071 }
5072
5073 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5074 if (!CGF.ContainsLabel(S: E->getRHS()))
5075 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
5076 }
5077
5078 // If the top of the logical operator nest, reset the MCDC temp to 0.
5079 if (CGF.MCDCLogOpStack.empty())
5080 CGF.maybeResetMCDCCondBitmap(E);
5081
5082 CGF.MCDCLogOpStack.push_back(Elt: E);
5083
5084 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
5085 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
5086
5087 CodeGenFunction::ConditionalEvaluation eval(CGF);
5088
5089 // Branch on the LHS first. If it is true, go to the success (cont) block.
5090 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
5091 TrueCount: CGF.getCurrentProfileCount() -
5092 CGF.getProfileCount(S: E->getRHS()));
5093
5094 // Any edges into the ContBlock are now from an (indeterminate number of)
5095 // edges from this first condition. All of these values will be true. Start
5096 // setting up the PHI node in the Cont Block for this.
5097 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
5098 NameStr: "", InsertBefore: ContBlock);
5099 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
5100 PI != PE; ++PI)
5101 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
5102
5103 eval.begin(CGF);
5104
5105 // Emit the RHS condition as a bool value.
5106 CGF.EmitBlock(BB: RHSBlock);
5107 CGF.incrementProfileCounter(S: E);
5108 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
5109
5110 eval.end(CGF);
5111
5112 // Reaquire the RHS block, as there may be subblocks inserted.
5113 RHSBlock = Builder.GetInsertBlock();
5114
5115 // If we're generating for profiling or coverage, generate a branch on the
5116 // RHS to a block that increments the RHS true counter needed to track branch
5117 // condition coverage.
5118 if (InstrumentRegions &&
5119 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
5120 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
5121 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
5122 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
5123 CGF.EmitBlock(BB: RHSBlockCnt);
5124 CGF.incrementProfileCounter(S: E->getRHS());
5125 CGF.EmitBranch(Block: ContBlock);
5126 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
5127 }
5128
5129 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5130 // into the phi node for the edge with the value of RHSCond.
5131 CGF.EmitBlock(BB: ContBlock);
5132 PN->addIncoming(V: RHSCond, BB: RHSBlock);
5133
5134 CGF.MCDCLogOpStack.pop_back();
5135 // If the top of the logical operator nest, update the MCDC bitmap.
5136 if (CGF.MCDCLogOpStack.empty())
5137 CGF.maybeUpdateMCDCTestVectorBitmap(E);
5138
5139 // ZExt result to int.
5140 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
5141}
5142
5143Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5144 CGF.EmitIgnoredExpr(E: E->getLHS());
5145 CGF.EnsureInsertPoint();
5146 return Visit(E: E->getRHS());
5147}
5148
5149//===----------------------------------------------------------------------===//
5150// Other Operators
5151//===----------------------------------------------------------------------===//
5152
5153/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5154/// expression is cheap enough and side-effect-free enough to evaluate
5155/// unconditionally instead of conditionally. This is used to convert control
5156/// flow into selects in some cases.
5157static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
5158 CodeGenFunction &CGF) {
5159 // Anything that is an integer or floating point constant is fine.
5160 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
5161
5162 // Even non-volatile automatic variables can't be evaluated unconditionally.
5163 // Referencing a thread_local may cause non-trivial initialization work to
5164 // occur. If we're inside a lambda and one of the variables is from the scope
5165 // outside the lambda, that function may have returned already. Reading its
5166 // locals is a bad idea. Also, these reads may introduce races there didn't
5167 // exist in the source-level program.
5168}
5169
5170
5171Value *ScalarExprEmitter::
5172VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5173 TestAndClearIgnoreResultAssign();
5174
5175 // Bind the common expression if necessary.
5176 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5177
5178 Expr *condExpr = E->getCond();
5179 Expr *lhsExpr = E->getTrueExpr();
5180 Expr *rhsExpr = E->getFalseExpr();
5181
5182 // If the condition constant folds and can be elided, try to avoid emitting
5183 // the condition and the dead arm.
5184 bool CondExprBool;
5185 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
5186 Expr *live = lhsExpr, *dead = rhsExpr;
5187 if (!CondExprBool) std::swap(a&: live, b&: dead);
5188
5189 // If the dead side doesn't have labels we need, just emit the Live part.
5190 if (!CGF.ContainsLabel(S: dead)) {
5191 if (CondExprBool) {
5192 if (llvm::EnableSingleByteCoverage) {
5193 CGF.incrementProfileCounter(S: lhsExpr);
5194 CGF.incrementProfileCounter(S: rhsExpr);
5195 }
5196 CGF.incrementProfileCounter(S: E);
5197 }
5198 Value *Result = Visit(E: live);
5199
5200 // If the live part is a throw expression, it acts like it has a void
5201 // type, so evaluating it returns a null Value*. However, a conditional
5202 // with non-void type must return a non-null Value*.
5203 if (!Result && !E->getType()->isVoidType())
5204 Result = llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
5205
5206 return Result;
5207 }
5208 }
5209
5210 // OpenCL: If the condition is a vector, we can treat this condition like
5211 // the select function.
5212 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5213 condExpr->getType()->isExtVectorType()) {
5214 CGF.incrementProfileCounter(S: E);
5215
5216 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5217 llvm::Value *LHS = Visit(E: lhsExpr);
5218 llvm::Value *RHS = Visit(E: rhsExpr);
5219
5220 llvm::Type *condType = ConvertType(T: condExpr->getType());
5221 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
5222
5223 unsigned numElem = vecTy->getNumElements();
5224 llvm::Type *elemType = vecTy->getElementType();
5225
5226 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
5227 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
5228 llvm::Value *tmp = Builder.CreateSExt(
5229 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
5230 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
5231
5232 // Cast float to int to perform ANDs if necessary.
5233 llvm::Value *RHSTmp = RHS;
5234 llvm::Value *LHSTmp = LHS;
5235 bool wasCast = false;
5236 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
5237 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5238 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
5239 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
5240 wasCast = true;
5241 }
5242
5243 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
5244 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
5245 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
5246 if (wasCast)
5247 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
5248
5249 return tmp5;
5250 }
5251
5252 if (condExpr->getType()->isVectorType() ||
5253 condExpr->getType()->isSveVLSBuiltinType()) {
5254 CGF.incrementProfileCounter(S: E);
5255
5256 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
5257 llvm::Value *LHS = Visit(E: lhsExpr);
5258 llvm::Value *RHS = Visit(E: rhsExpr);
5259
5260 llvm::Type *CondType = ConvertType(T: condExpr->getType());
5261 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
5262 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
5263
5264 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
5265 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
5266 }
5267
5268 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5269 // select instead of as control flow. We can only do this if it is cheap and
5270 // safe to evaluate the LHS and RHS unconditionally.
5271 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
5272 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
5273 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
5274 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
5275
5276 if (llvm::EnableSingleByteCoverage) {
5277 CGF.incrementProfileCounter(S: lhsExpr);
5278 CGF.incrementProfileCounter(S: rhsExpr);
5279 CGF.incrementProfileCounter(S: E);
5280 } else
5281 CGF.incrementProfileCounter(S: E, StepV);
5282
5283 llvm::Value *LHS = Visit(E: lhsExpr);
5284 llvm::Value *RHS = Visit(E: rhsExpr);
5285 if (!LHS) {
5286 // If the conditional has void type, make sure we return a null Value*.
5287 assert(!RHS && "LHS and RHS types must match");
5288 return nullptr;
5289 }
5290 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
5291 }
5292
5293 // If the top of the logical operator nest, reset the MCDC temp to 0.
5294 if (CGF.MCDCLogOpStack.empty())
5295 CGF.maybeResetMCDCCondBitmap(E: condExpr);
5296
5297 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
5298 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
5299 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
5300
5301 CodeGenFunction::ConditionalEvaluation eval(CGF);
5302 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
5303 TrueCount: CGF.getProfileCount(S: lhsExpr));
5304
5305 CGF.EmitBlock(BB: LHSBlock);
5306
5307 // If the top of the logical operator nest, update the MCDC bitmap for the
5308 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5309 // may also contain a boolean expression.
5310 if (CGF.MCDCLogOpStack.empty())
5311 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5312
5313 if (llvm::EnableSingleByteCoverage)
5314 CGF.incrementProfileCounter(S: lhsExpr);
5315 else
5316 CGF.incrementProfileCounter(S: E);
5317
5318 eval.begin(CGF);
5319 Value *LHS = Visit(E: lhsExpr);
5320 eval.end(CGF);
5321
5322 LHSBlock = Builder.GetInsertBlock();
5323 Builder.CreateBr(Dest: ContBlock);
5324
5325 CGF.EmitBlock(BB: RHSBlock);
5326
5327 // If the top of the logical operator nest, update the MCDC bitmap for the
5328 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5329 // may also contain a boolean expression.
5330 if (CGF.MCDCLogOpStack.empty())
5331 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
5332
5333 if (llvm::EnableSingleByteCoverage)
5334 CGF.incrementProfileCounter(S: rhsExpr);
5335
5336 eval.begin(CGF);
5337 Value *RHS = Visit(E: rhsExpr);
5338 eval.end(CGF);
5339
5340 RHSBlock = Builder.GetInsertBlock();
5341 CGF.EmitBlock(BB: ContBlock);
5342
5343 // If the LHS or RHS is a throw expression, it will be legitimately null.
5344 if (!LHS)
5345 return RHS;
5346 if (!RHS)
5347 return LHS;
5348
5349 // Create a PHI node for the real part.
5350 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5351 PN->addIncoming(V: LHS, BB: LHSBlock);
5352 PN->addIncoming(V: RHS, BB: RHSBlock);
5353
5354 // When single byte coverage mode is enabled, add a counter to continuation
5355 // block.
5356 if (llvm::EnableSingleByteCoverage)
5357 CGF.incrementProfileCounter(S: E);
5358
5359 return PN;
5360}
5361
5362Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5363 return Visit(E: E->getChosenSubExpr());
5364}
5365
5366Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5367 QualType Ty = VE->getType();
5368
5369 if (Ty->isVariablyModifiedType())
5370 CGF.EmitVariablyModifiedType(Ty);
5371
5372 Address ArgValue = Address::invalid();
5373 RValue ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5374
5375 return ArgPtr.getScalarVal();
5376}
5377
5378Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5379 return CGF.EmitBlockLiteral(block);
5380}
5381
5382// Convert a vec3 to vec4, or vice versa.
5383static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5384 Value *Src, unsigned NumElementsDst) {
5385 static constexpr int Mask[] = {0, 1, 2, -1};
5386 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5387}
5388
5389// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5390// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5391// but could be scalar or vectors of different lengths, and either can be
5392// pointer.
5393// There are 4 cases:
5394// 1. non-pointer -> non-pointer : needs 1 bitcast
5395// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5396// 3. pointer -> non-pointer
5397// a) pointer -> intptr_t : needs 1 ptrtoint
5398// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5399// 4. non-pointer -> pointer
5400// a) intptr_t -> pointer : needs 1 inttoptr
5401// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5402// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5403// allow casting directly between pointer types and non-integer non-pointer
5404// types.
5405static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5406 const llvm::DataLayout &DL,
5407 Value *Src, llvm::Type *DstTy,
5408 StringRef Name = "") {
5409 auto SrcTy = Src->getType();
5410
5411 // Case 1.
5412 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5413 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5414
5415 // Case 2.
5416 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5417 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5418
5419 // Case 3.
5420 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5421 // Case 3b.
5422 if (!DstTy->isIntegerTy())
5423 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5424 // Cases 3a and 3b.
5425 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5426 }
5427
5428 // Case 4b.
5429 if (!SrcTy->isIntegerTy())
5430 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5431 // Cases 4a and 4b.
5432 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5433}
5434
5435Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5436 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5437 llvm::Type *DstTy = ConvertType(T: E->getType());
5438
5439 llvm::Type *SrcTy = Src->getType();
5440 unsigned NumElementsSrc =
5441 isa<llvm::VectorType>(Val: SrcTy)
5442 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5443 : 0;
5444 unsigned NumElementsDst =
5445 isa<llvm::VectorType>(Val: DstTy)
5446 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5447 : 0;
5448
5449 // Use bit vector expansion for ext_vector_type boolean vectors.
5450 if (E->getType()->isExtVectorBoolType())
5451 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5452
5453 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5454 // vector to get a vec4, then a bitcast if the target type is different.
5455 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5456 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5457 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5458 DstTy);
5459
5460 Src->setName("astype");
5461 return Src;
5462 }
5463
5464 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5465 // to vec4 if the original type is not vec4, then a shuffle vector to
5466 // get a vec3.
5467 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5468 auto *Vec4Ty = llvm::FixedVectorType::get(
5469 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5470 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5471 DstTy: Vec4Ty);
5472
5473 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5474 Src->setName("astype");
5475 return Src;
5476 }
5477
5478 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5479 Src, DstTy, Name: "astype");
5480}
5481
5482Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5483 return CGF.EmitAtomicExpr(E).getScalarVal();
5484}
5485
5486//===----------------------------------------------------------------------===//
5487// Entry Point into this File
5488//===----------------------------------------------------------------------===//
5489
5490/// Emit the computation of the specified expression of scalar type, ignoring
5491/// the result.
5492Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5493 assert(E && hasScalarEvaluationKind(E->getType()) &&
5494 "Invalid scalar expression to emit");
5495
5496 return ScalarExprEmitter(*this, IgnoreResultAssign)
5497 .Visit(E: const_cast<Expr *>(E));
5498}
5499
5500/// Emit a conversion from the specified type to the specified destination type,
5501/// both of which are LLVM scalar types.
5502Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5503 QualType DstTy,
5504 SourceLocation Loc) {
5505 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5506 "Invalid scalar expression to emit");
5507 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5508}
5509
5510/// Emit a conversion from the specified complex type to the specified
5511/// destination type, where the destination type is an LLVM scalar type.
5512Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5513 QualType SrcTy,
5514 QualType DstTy,
5515 SourceLocation Loc) {
5516 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5517 "Invalid complex -> scalar conversion");
5518 return ScalarExprEmitter(*this)
5519 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5520}
5521
5522
5523Value *
5524CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5525 QualType PromotionType) {
5526 if (!PromotionType.isNull())
5527 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5528 else
5529 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5530}
5531
5532
5533llvm::Value *CodeGenFunction::
5534EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5535 bool isInc, bool isPre) {
5536 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5537}
5538
5539LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5540 // object->isa or (*object).isa
5541 // Generate code as for: *(Class*)object
5542
5543 Expr *BaseExpr = E->getBase();
5544 Address Addr = Address::invalid();
5545 if (BaseExpr->isPRValue()) {
5546 llvm::Type *BaseTy =
5547 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5548 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5549 } else {
5550 Addr = EmitLValue(E: BaseExpr).getAddress();
5551 }
5552
5553 // Cast the address to Class*.
5554 Addr = Addr.withElementType(ElemTy: ConvertType(T: E->getType()));
5555 return MakeAddrLValue(Addr, T: E->getType());
5556}
5557
5558
5559LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5560 const CompoundAssignOperator *E) {
5561 ScalarExprEmitter Scalar(*this);
5562 Value *Result = nullptr;
5563 switch (E->getOpcode()) {
5564#define COMPOUND_OP(Op) \
5565 case BO_##Op##Assign: \
5566 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5567 Result)
5568 COMPOUND_OP(Mul);
5569 COMPOUND_OP(Div);
5570 COMPOUND_OP(Rem);
5571 COMPOUND_OP(Add);
5572 COMPOUND_OP(Sub);
5573 COMPOUND_OP(Shl);
5574 COMPOUND_OP(Shr);
5575 COMPOUND_OP(And);
5576 COMPOUND_OP(Xor);
5577 COMPOUND_OP(Or);
5578#undef COMPOUND_OP
5579
5580 case BO_PtrMemD:
5581 case BO_PtrMemI:
5582 case BO_Mul:
5583 case BO_Div:
5584 case BO_Rem:
5585 case BO_Add:
5586 case BO_Sub:
5587 case BO_Shl:
5588 case BO_Shr:
5589 case BO_LT:
5590 case BO_GT:
5591 case BO_LE:
5592 case BO_GE:
5593 case BO_EQ:
5594 case BO_NE:
5595 case BO_Cmp:
5596 case BO_And:
5597 case BO_Xor:
5598 case BO_Or:
5599 case BO_LAnd:
5600 case BO_LOr:
5601 case BO_Assign:
5602 case BO_Comma:
5603 llvm_unreachable("Not valid compound assignment operators");
5604 }
5605
5606 llvm_unreachable("Unhandled compound assignment operator");
5607}
5608
5609struct GEPOffsetAndOverflow {
5610 // The total (signed) byte offset for the GEP.
5611 llvm::Value *TotalOffset;
5612 // The offset overflow flag - true if the total offset overflows.
5613 llvm::Value *OffsetOverflows;
5614};
5615
5616/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5617/// and compute the total offset it applies from it's base pointer BasePtr.
5618/// Returns offset in bytes and a boolean flag whether an overflow happened
5619/// during evaluation.
5620static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5621 llvm::LLVMContext &VMContext,
5622 CodeGenModule &CGM,
5623 CGBuilderTy &Builder) {
5624 const auto &DL = CGM.getDataLayout();
5625
5626 // The total (signed) byte offset for the GEP.
5627 llvm::Value *TotalOffset = nullptr;
5628
5629 // Was the GEP already reduced to a constant?
5630 if (isa<llvm::Constant>(Val: GEPVal)) {
5631 // Compute the offset by casting both pointers to integers and subtracting:
5632 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5633 Value *BasePtr_int =
5634 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5635 Value *GEPVal_int =
5636 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5637 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5638 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5639 }
5640
5641 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5642 assert(GEP->getPointerOperand() == BasePtr &&
5643 "BasePtr must be the base of the GEP.");
5644 assert(GEP->isInBounds() && "Expected inbounds GEP");
5645
5646 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5647
5648 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5649 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5650 auto *SAddIntrinsic =
5651 CGM.getIntrinsic(IID: llvm::Intrinsic::sadd_with_overflow, Tys: IntPtrTy);
5652 auto *SMulIntrinsic =
5653 CGM.getIntrinsic(IID: llvm::Intrinsic::smul_with_overflow, Tys: IntPtrTy);
5654
5655 // The offset overflow flag - true if the total offset overflows.
5656 llvm::Value *OffsetOverflows = Builder.getFalse();
5657
5658 /// Return the result of the given binary operation.
5659 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5660 llvm::Value *RHS) -> llvm::Value * {
5661 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5662
5663 // If the operands are constants, return a constant result.
5664 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
5665 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
5666 llvm::APInt N;
5667 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
5668 /*Signed=*/true, Result&: N);
5669 if (HasOverflow)
5670 OffsetOverflows = Builder.getTrue();
5671 return llvm::ConstantInt::get(Context&: VMContext, V: N);
5672 }
5673 }
5674
5675 // Otherwise, compute the result with checked arithmetic.
5676 auto *ResultAndOverflow = Builder.CreateCall(
5677 Callee: (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, Args: {LHS, RHS});
5678 OffsetOverflows = Builder.CreateOr(
5679 LHS: Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), RHS: OffsetOverflows);
5680 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
5681 };
5682
5683 // Determine the total byte offset by looking at each GEP operand.
5684 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5685 GTI != GTE; ++GTI) {
5686 llvm::Value *LocalOffset;
5687 auto *Index = GTI.getOperand();
5688 // Compute the local offset contributed by this indexing step:
5689 if (auto *STy = GTI.getStructTypeOrNull()) {
5690 // For struct indexing, the local offset is the byte position of the
5691 // specified field.
5692 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
5693 LocalOffset = llvm::ConstantInt::get(
5694 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
5695 } else {
5696 // Otherwise this is array-like indexing. The local offset is the index
5697 // multiplied by the element size.
5698 auto *ElementSize =
5699 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
5700 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
5701 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5702 }
5703
5704 // If this is the first offset, set it as the total offset. Otherwise, add
5705 // the local offset into the running total.
5706 if (!TotalOffset || TotalOffset == Zero)
5707 TotalOffset = LocalOffset;
5708 else
5709 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5710 }
5711
5712 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
5713}
5714
5715Value *
5716CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5717 ArrayRef<Value *> IdxList,
5718 bool SignedIndices, bool IsSubtraction,
5719 SourceLocation Loc, const Twine &Name) {
5720 llvm::Type *PtrTy = Ptr->getType();
5721 Value *GEPVal = Builder.CreateInBoundsGEP(Ty: ElemTy, Ptr, IdxList, Name);
5722
5723 // If the pointer overflow sanitizer isn't enabled, do nothing.
5724 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
5725 return GEPVal;
5726
5727 // Perform nullptr-and-offset check unless the nullptr is defined.
5728 bool PerformNullCheck = !NullPointerIsDefined(
5729 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
5730 // Check for overflows unless the GEP got constant-folded,
5731 // and only in the default address space
5732 bool PerformOverflowCheck =
5733 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5734
5735 if (!(PerformNullCheck || PerformOverflowCheck))
5736 return GEPVal;
5737
5738 const auto &DL = CGM.getDataLayout();
5739
5740 SanitizerScope SanScope(this);
5741 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5742
5743 GEPOffsetAndOverflow EvaluatedGEP =
5744 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
5745
5746 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5747 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5748 "If the offset got constant-folded, we don't expect that there was an "
5749 "overflow.");
5750
5751 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5752
5753 // Common case: if the total offset is zero, and we are using C++ semantics,
5754 // where nullptr+0 is defined, don't emit a check.
5755 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5756 return GEPVal;
5757
5758 // Now that we've computed the total offset, add it to the base pointer (with
5759 // wrapping semantics).
5760 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
5761 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
5762
5763 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5764
5765 if (PerformNullCheck) {
5766 // In C++, if the base pointer evaluates to a null pointer value,
5767 // the only valid pointer this inbounds GEP can produce is also
5768 // a null pointer, so the offset must also evaluate to zero.
5769 // Likewise, if we have non-zero base pointer, we can not get null pointer
5770 // as a result, so the offset can not be -intptr_t(BasePtr).
5771 // In other words, both pointers are either null, or both are non-null,
5772 // or the behaviour is undefined.
5773 //
5774 // C, however, is more strict in this regard, and gives more
5775 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5776 // So both the input to the 'gep inbounds' AND the output must not be null.
5777 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
5778 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
5779 auto *Valid =
5780 CGM.getLangOpts().CPlusPlus
5781 ? Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr)
5782 : Builder.CreateAnd(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
5783 Checks.emplace_back(Args&: Valid, Args: SanitizerKind::PointerOverflow);
5784 }
5785
5786 if (PerformOverflowCheck) {
5787 // The GEP is valid if:
5788 // 1) The total offset doesn't overflow, and
5789 // 2) The sign of the difference between the computed address and the base
5790 // pointer matches the sign of the total offset.
5791 llvm::Value *ValidGEP;
5792 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
5793 if (SignedIndices) {
5794 // GEP is computed as `unsigned base + signed offset`, therefore:
5795 // * If offset was positive, then the computed pointer can not be
5796 // [unsigned] less than the base pointer, unless it overflowed.
5797 // * If offset was negative, then the computed pointer can not be
5798 // [unsigned] greater than the bas pointere, unless it overflowed.
5799 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5800 auto *PosOrZeroOffset =
5801 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
5802 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
5803 ValidGEP =
5804 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
5805 } else if (!IsSubtraction) {
5806 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5807 // computed pointer can not be [unsigned] less than base pointer,
5808 // unless there was an overflow.
5809 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5810 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5811 } else {
5812 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5813 // computed pointer can not be [unsigned] greater than base pointer,
5814 // unless there was an overflow.
5815 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5816 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
5817 }
5818 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
5819 Checks.emplace_back(Args&: ValidGEP, Args: SanitizerKind::PointerOverflow);
5820 }
5821
5822 assert(!Checks.empty() && "Should have produced some checks.");
5823
5824 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5825 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5826 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5827 EmitCheck(Checked: Checks, Check: SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5828
5829 return GEPVal;
5830}
5831
5832Address CodeGenFunction::EmitCheckedInBoundsGEP(
5833 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
5834 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
5835 const Twine &Name) {
5836 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
5837 return Builder.CreateInBoundsGEP(Addr, IdxList, ElementType: elementType, Align, Name);
5838
5839 return RawAddress(
5840 EmitCheckedInBoundsGEP(ElemTy: Addr.getElementType(), Ptr: Addr.emitRawPointer(CGF&: *this),
5841 IdxList, SignedIndices, IsSubtraction, Loc, Name),
5842 elementType, Align);
5843}
5844