1//===- InstCombineAndOrXor.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitAnd, visitOr, and visitXor functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/SmallBitVector.h"
15#include "llvm/Analysis/CmpInstAnalysis.h"
16#include "llvm/Analysis/FloatingPointPredicateUtils.h"
17#include "llvm/Analysis/InstructionSimplify.h"
18#include "llvm/IR/ConstantRange.h"
19#include "llvm/IR/DerivedTypes.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/PatternMatch.h"
23#include "llvm/IR/ProfDataUtils.h"
24#include "llvm/Transforms/InstCombine/InstCombiner.h"
25#include "llvm/Transforms/Utils/Local.h"
26
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "instcombine"
31
32namespace llvm {
33extern cl::opt<bool> ProfcheckDisableMetadataFixes;
34}
35
36/// This is the complement of getICmpCode, which turns an opcode and two
37/// operands into either a constant true or false, or a brand new ICmp
38/// instruction. The sign is passed in to determine which kind of predicate to
39/// use in the new icmp instruction.
40static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
41 InstCombiner::BuilderTy &Builder) {
42 ICmpInst::Predicate NewPred;
43 if (Constant *TorF = getPredForICmpCode(Code, Sign, OpTy: LHS->getType(), Pred&: NewPred))
44 return TorF;
45 return Builder.CreateICmp(P: NewPred, LHS, RHS);
46}
47
48/// This is the complement of getFCmpCode, which turns an opcode and two
49/// operands into either a FCmp instruction, or a true/false constant.
50static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
51 InstCombiner::BuilderTy &Builder, FMFSource FMF) {
52 FCmpInst::Predicate NewPred;
53 if (Constant *TorF = getPredForFCmpCode(Code, OpTy: LHS->getType(), Pred&: NewPred))
54 return TorF;
55 return Builder.CreateFCmpFMF(P: NewPred, LHS, RHS, FMFSource: FMF);
56}
57
58/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
59/// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
60/// whether to treat V, Lo, and Hi as signed or not.
61Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
62 const APInt &Hi, bool isSigned,
63 bool Inside) {
64 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
65 "Lo is not < Hi in range emission code!");
66
67 Type *Ty = V->getType();
68
69 // V >= Min && V < Hi --> V < Hi
70 // V < Min || V >= Hi --> V >= Hi
71 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
72 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
73 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
74 return Builder.CreateICmp(P: Pred, LHS: V, RHS: ConstantInt::get(Ty, V: Hi));
75 }
76
77 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
78 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
79 Value *VMinusLo =
80 Builder.CreateSub(LHS: V, RHS: ConstantInt::get(Ty, V: Lo), Name: V->getName() + ".off");
81 Constant *HiMinusLo = ConstantInt::get(Ty, V: Hi - Lo);
82 return Builder.CreateICmp(P: Pred, LHS: VMinusLo, RHS: HiMinusLo);
83}
84
85/// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
86/// that can be simplified.
87/// One of A and B is considered the mask. The other is the value. This is
88/// described as the "AMask" or "BMask" part of the enum. If the enum contains
89/// only "Mask", then both A and B can be considered masks. If A is the mask,
90/// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
91/// If both A and C are constants, this proof is also easy.
92/// For the following explanations, we assume that A is the mask.
93///
94/// "AllOnes" declares that the comparison is true only if (A & B) == A or all
95/// bits of A are set in B.
96/// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
97///
98/// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
99/// bits of A are cleared in B.
100/// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
101///
102/// "Mixed" declares that (A & B) == C and C might or might not contain any
103/// number of one bits and zero bits.
104/// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
105///
106/// "Not" means that in above descriptions "==" should be replaced by "!=".
107/// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
108///
109/// If the mask A contains a single bit, then the following is equivalent:
110/// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
111/// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
112enum MaskedICmpType {
113 AMask_AllOnes = 1,
114 AMask_NotAllOnes = 2,
115 BMask_AllOnes = 4,
116 BMask_NotAllOnes = 8,
117 Mask_AllZeros = 16,
118 Mask_NotAllZeros = 32,
119 AMask_Mixed = 64,
120 AMask_NotMixed = 128,
121 BMask_Mixed = 256,
122 BMask_NotMixed = 512
123};
124
125/// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
126/// satisfies.
127static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
128 ICmpInst::Predicate Pred) {
129 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr;
130 match(V: A, P: m_APInt(Res&: ConstA));
131 match(V: B, P: m_APInt(Res&: ConstB));
132 match(V: C, P: m_APInt(Res&: ConstC));
133 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
134 bool IsAPow2 = ConstA && ConstA->isPowerOf2();
135 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
136 unsigned MaskVal = 0;
137 if (ConstC && ConstC->isZero()) {
138 // if C is zero, then both A and B qualify as mask
139 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
140 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
141 if (IsAPow2)
142 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
143 : (AMask_AllOnes | AMask_Mixed));
144 if (IsBPow2)
145 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
146 : (BMask_AllOnes | BMask_Mixed));
147 return MaskVal;
148 }
149
150 if (A == C) {
151 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
152 : (AMask_NotAllOnes | AMask_NotMixed));
153 if (IsAPow2)
154 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
155 : (Mask_AllZeros | AMask_Mixed));
156 } else if (ConstA && ConstC && ConstC->isSubsetOf(RHS: *ConstA)) {
157 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
158 }
159
160 if (B == C) {
161 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
162 : (BMask_NotAllOnes | BMask_NotMixed));
163 if (IsBPow2)
164 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
165 : (Mask_AllZeros | BMask_Mixed));
166 } else if (ConstB && ConstC && ConstC->isSubsetOf(RHS: *ConstB)) {
167 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
168 }
169
170 return MaskVal;
171}
172
173/// Convert an analysis of a masked ICmp into its equivalent if all boolean
174/// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
175/// is adjacent to the corresponding normal flag (recording ==), this just
176/// involves swapping those bits over.
177static unsigned conjugateICmpMask(unsigned Mask) {
178 unsigned NewMask;
179 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
180 AMask_Mixed | BMask_Mixed))
181 << 1;
182
183 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
184 AMask_NotMixed | BMask_NotMixed))
185 >> 1;
186
187 return NewMask;
188}
189
190// Adapts the external decomposeBitTest for local use.
191static bool decomposeBitTest(Value *Cond, CmpInst::Predicate &Pred, Value *&X,
192 Value *&Y, Value *&Z) {
193 auto Res =
194 llvm::decomposeBitTest(Cond, /*LookThroughTrunc=*/true,
195 /*AllowNonZeroC=*/true, /*DecomposeAnd=*/true);
196 if (!Res)
197 return false;
198
199 Pred = Res->Pred;
200 X = Res->X;
201 Y = ConstantInt::get(Ty: X->getType(), V: Res->Mask);
202 Z = ConstantInt::get(Ty: X->getType(), V: Res->C);
203 return true;
204}
205
206/// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
207/// Return the pattern classes (from MaskedICmpType) for the left hand side and
208/// the right hand side as a pair.
209/// LHS and RHS are the left hand side and the right hand side ICmps and PredL
210/// and PredR are their predicates, respectively.
211static std::optional<std::pair<unsigned, unsigned>>
212getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E,
213 Value *LHS, Value *RHS, ICmpInst::Predicate &PredL,
214 ICmpInst::Predicate &PredR) {
215
216 // Here comes the tricky part:
217 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
218 // and L11 & L12 == L21 & L22. The same goes for RHS.
219 // Now we must find those components L** and R**, that are equal, so
220 // that we can extract the parameters A, B, C, D, and E for the canonical
221 // above.
222
223 // Check whether the icmp can be decomposed into a bit test.
224 Value *L1, *L11, *L12, *L2, *L21, *L22;
225 if (decomposeBitTest(Cond: LHS, Pred&: PredL, X&: L11, Y&: L12, Z&: L2)) {
226 L21 = L22 = L1 = nullptr;
227 } else {
228 auto *LHSCMP = dyn_cast<ICmpInst>(Val: LHS);
229 if (!LHSCMP)
230 return std::nullopt;
231
232 // Don't allow pointers. Splat vectors are fine.
233 if (!LHSCMP->getOperand(i_nocapture: 0)->getType()->isIntOrIntVectorTy())
234 return std::nullopt;
235
236 PredL = LHSCMP->getPredicate();
237 L1 = LHSCMP->getOperand(i_nocapture: 0);
238 L2 = LHSCMP->getOperand(i_nocapture: 1);
239 // Look for ANDs in the LHS icmp.
240 if (!match(V: L1, P: m_And(L: m_Value(V&: L11), R: m_Value(V&: L12)))) {
241 // Any icmp can be viewed as being trivially masked; if it allows us to
242 // remove one, it's worth it.
243 L11 = L1;
244 L12 = Constant::getAllOnesValue(Ty: L1->getType());
245 }
246
247 if (!match(V: L2, P: m_And(L: m_Value(V&: L21), R: m_Value(V&: L22)))) {
248 L21 = L2;
249 L22 = Constant::getAllOnesValue(Ty: L2->getType());
250 }
251 }
252
253 // Bail if LHS was a icmp that can't be decomposed into an equality.
254 if (!ICmpInst::isEquality(P: PredL))
255 return std::nullopt;
256
257 Value *R11, *R12, *R2;
258 if (decomposeBitTest(Cond: RHS, Pred&: PredR, X&: R11, Y&: R12, Z&: R2)) {
259 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
260 A = R11;
261 D = R12;
262 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
263 A = R12;
264 D = R11;
265 } else {
266 return std::nullopt;
267 }
268 E = R2;
269 } else {
270 auto *RHSCMP = dyn_cast<ICmpInst>(Val: RHS);
271 if (!RHSCMP)
272 return std::nullopt;
273 // Don't allow pointers. Splat vectors are fine.
274 if (!RHSCMP->getOperand(i_nocapture: 0)->getType()->isIntOrIntVectorTy())
275 return std::nullopt;
276
277 PredR = RHSCMP->getPredicate();
278
279 Value *R1 = RHSCMP->getOperand(i_nocapture: 0);
280 R2 = RHSCMP->getOperand(i_nocapture: 1);
281 bool Ok = false;
282 if (!match(V: R1, P: m_And(L: m_Value(V&: R11), R: m_Value(V&: R12)))) {
283 // As before, model no mask as a trivial mask if it'll let us do an
284 // optimization.
285 R11 = R1;
286 R12 = Constant::getAllOnesValue(Ty: R1->getType());
287 }
288
289 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
290 A = R11;
291 D = R12;
292 E = R2;
293 Ok = true;
294 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
295 A = R12;
296 D = R11;
297 E = R2;
298 Ok = true;
299 }
300
301 // Avoid matching against the -1 value we created for unmasked operand.
302 if (Ok && match(V: A, P: m_AllOnes()))
303 Ok = false;
304
305 // Look for ANDs on the right side of the RHS icmp.
306 if (!Ok) {
307 if (!match(V: R2, P: m_And(L: m_Value(V&: R11), R: m_Value(V&: R12)))) {
308 R11 = R2;
309 R12 = Constant::getAllOnesValue(Ty: R2->getType());
310 }
311
312 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
313 A = R11;
314 D = R12;
315 E = R1;
316 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
317 A = R12;
318 D = R11;
319 E = R1;
320 } else {
321 return std::nullopt;
322 }
323 }
324 }
325
326 // Bail if RHS was a icmp that can't be decomposed into an equality.
327 if (!ICmpInst::isEquality(P: PredR))
328 return std::nullopt;
329
330 if (L11 == A) {
331 B = L12;
332 C = L2;
333 } else if (L12 == A) {
334 B = L11;
335 C = L2;
336 } else if (L21 == A) {
337 B = L22;
338 C = L1;
339 } else if (L22 == A) {
340 B = L21;
341 C = L1;
342 }
343
344 unsigned LeftType = getMaskedICmpType(A, B, C, Pred: PredL);
345 unsigned RightType = getMaskedICmpType(A, B: D, C: E, Pred: PredR);
346 return std::optional<std::pair<unsigned, unsigned>>(
347 std::make_pair(x&: LeftType, y&: RightType));
348}
349
350/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
351/// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
352/// and the right hand side is of type BMask_Mixed. For example,
353/// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
354/// Also used for logical and/or, must be poison safe.
355static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
356 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *D, Value *E,
357 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
358 InstCombiner::BuilderTy &Builder) {
359 // We are given the canonical form:
360 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
361 // where D & E == E.
362 //
363 // If IsAnd is false, we get it in negated form:
364 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
365 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
366 //
367 // We currently handle the case of B, C, D, E are constant.
368 //
369 const APInt *BCst, *DCst, *OrigECst;
370 if (!match(V: B, P: m_APInt(Res&: BCst)) || !match(V: D, P: m_APInt(Res&: DCst)) ||
371 !match(V: E, P: m_APInt(Res&: OrigECst)))
372 return nullptr;
373
374 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
375
376 // Update E to the canonical form when D is a power of two and RHS is
377 // canonicalized as,
378 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
379 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
380 APInt ECst = *OrigECst;
381 if (PredR != NewCC)
382 ECst ^= *DCst;
383
384 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
385 // other folding rules and this pattern won't apply any more.
386 if (*BCst == 0 || *DCst == 0)
387 return nullptr;
388
389 // If B and D don't intersect, ie. (B & D) == 0, try to fold isNaN idiom:
390 // (icmp ne (A & FractionBits), 0) & (icmp eq (A & ExpBits), ExpBits)
391 // -> isNaN(A)
392 // Otherwise, we cannot deduce anything from it.
393 if (!BCst->intersects(RHS: *DCst)) {
394 Value *Src;
395 if (*DCst == ECst && match(V: A, P: m_ElementWiseBitCast(Op: m_Value(V&: Src))) &&
396 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
397 Kind: Attribute::StrictFP)) {
398 Type *Ty = Src->getType()->getScalarType();
399 if (!Ty->isIEEELikeFPTy())
400 return nullptr;
401
402 APInt ExpBits = APFloat::getInf(Sem: Ty->getFltSemantics()).bitcastToAPInt();
403 if (ECst != ExpBits)
404 return nullptr;
405 APInt FractionBits = ~ExpBits;
406 FractionBits.clearSignBit();
407 if (*BCst != FractionBits)
408 return nullptr;
409
410 return Builder.CreateFCmp(P: IsAnd ? FCmpInst::FCMP_UNO : FCmpInst::FCMP_ORD,
411 LHS: Src, RHS: ConstantFP::getZero(Ty: Src->getType()));
412 }
413 return nullptr;
414 }
415
416 // If the following two conditions are met:
417 //
418 // 1. mask B covers only a single bit that's not covered by mask D, that is,
419 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
420 // B and D has only one bit set) and,
421 //
422 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
423 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
424 //
425 // then that single bit in B must be one and thus the whole expression can be
426 // folded to
427 // (A & (B | D)) == (B & (B ^ D)) | E.
428 //
429 // For example,
430 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
431 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
432 if ((((*BCst & *DCst) & ECst) == 0) &&
433 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
434 APInt BorD = *BCst | *DCst;
435 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
436 Value *NewMask = ConstantInt::get(Ty: A->getType(), V: BorD);
437 Value *NewMaskedValue = ConstantInt::get(Ty: A->getType(), V: BandBxorDorE);
438 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewMask);
439 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: NewMaskedValue);
440 }
441
442 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) {
443 return (*C1 & *C2) == *C1;
444 };
445 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) {
446 return (*C1 & *C2) == *C2;
447 };
448
449 // In the following, we consider only the cases where B is a superset of D, B
450 // is a subset of D, or B == D because otherwise there's at least one bit
451 // covered by B but not D, in which case we can't deduce much from it, so
452 // no folding (aside from the single must-be-one bit case right above.)
453 // For example,
454 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
455 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
456 return nullptr;
457
458 // At this point, either B is a superset of D, B is a subset of D or B == D.
459
460 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
461 // and the whole expression becomes false (or true if negated), otherwise, no
462 // folding.
463 // For example,
464 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
465 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
466 if (ECst.isZero()) {
467 if (IsSubSetOrEqual(BCst, DCst))
468 return ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
469 return nullptr;
470 }
471
472 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
473 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
474 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
475 // RHS. For example,
476 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
477 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
478 if (IsSuperSetOrEqual(BCst, DCst)) {
479 // We can't guarantee that samesign hold after this fold.
480 if (auto *ICmp = dyn_cast<ICmpInst>(Val: RHS))
481 ICmp->setSameSign(false);
482 return RHS;
483 }
484 // Otherwise, B is a subset of D. If B and E have a common bit set,
485 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
486 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
487 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
488 if ((*BCst & ECst) != 0) {
489 // We can't guarantee that samesign hold after this fold.
490 if (auto *ICmp = dyn_cast<ICmpInst>(Val: RHS))
491 ICmp->setSameSign(false);
492 return RHS;
493 }
494 // Otherwise, LHS and RHS contradict and the whole expression becomes false
495 // (or true if negated.) For example,
496 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
497 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
498 return ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
499}
500
501/// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
502/// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
503/// aren't of the common mask pattern type.
504/// Also used for logical and/or, must be poison safe.
505static Value *foldLogOpOfMaskedICmpsAsymmetric(
506 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D,
507 Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
508 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {
509 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
510 "Expected equality predicates for masked type of icmps.");
511 // Handle Mask_NotAllZeros-BMask_Mixed cases.
512 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
513 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
514 // which gets swapped to
515 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
516 if (!IsAnd) {
517 LHSMask = conjugateICmpMask(Mask: LHSMask);
518 RHSMask = conjugateICmpMask(Mask: RHSMask);
519 }
520 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
521 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
522 LHS, RHS, IsAnd, A, B, D, E, PredL, PredR, Builder)) {
523 return V;
524 }
525 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
526 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
527 LHS: RHS, RHS: LHS, IsAnd, A, B: D, D: B, E: C, PredL: PredR, PredR: PredL, Builder)) {
528 return V;
529 }
530 }
531 return nullptr;
532}
533
534/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
535/// into a single (icmp(A & X) ==/!= Y).
536static Value *foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd,
537 bool IsLogical,
538 InstCombiner::BuilderTy &Builder,
539 const SimplifyQuery &Q) {
540 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
541 ICmpInst::Predicate PredL, PredR;
542 std::optional<std::pair<unsigned, unsigned>> MaskPair =
543 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
544 if (!MaskPair)
545 return nullptr;
546 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
547 "Expected equality predicates for masked type of icmps.");
548 unsigned LHSMask = MaskPair->first;
549 unsigned RHSMask = MaskPair->second;
550 unsigned Mask = LHSMask & RHSMask;
551 if (Mask == 0) {
552 // Even if the two sides don't share a common pattern, check if folding can
553 // still happen.
554 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
555 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
556 Builder))
557 return V;
558 return nullptr;
559 }
560
561 // In full generality:
562 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
563 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
564 //
565 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
566 // equivalent to (icmp (A & X) !Op Y).
567 //
568 // Therefore, we can pretend for the rest of this function that we're dealing
569 // with the conjunction, provided we flip the sense of any comparisons (both
570 // input and output).
571
572 // In most cases we're going to produce an EQ for the "&&" case.
573 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
574 if (!IsAnd) {
575 // Convert the masking analysis into its equivalent with negated
576 // comparisons.
577 Mask = conjugateICmpMask(Mask);
578 }
579
580 if (Mask & Mask_AllZeros) {
581 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
582 // -> (icmp eq (A & (B|D)), 0)
583 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
584 return nullptr; // TODO: Use freeze?
585 Value *NewOr = Builder.CreateOr(LHS: B, RHS: D);
586 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewOr);
587 // We can't use C as zero because we might actually handle
588 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
589 // with B and D, having a single bit set.
590 Value *Zero = Constant::getNullValue(Ty: A->getType());
591 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: Zero);
592 }
593 if (Mask & BMask_AllOnes) {
594 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
595 // -> (icmp eq (A & (B|D)), (B|D))
596 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
597 return nullptr; // TODO: Use freeze?
598 Value *NewOr = Builder.CreateOr(LHS: B, RHS: D);
599 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewOr);
600 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: NewOr);
601 }
602 if (Mask & AMask_AllOnes) {
603 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
604 // -> (icmp eq (A & (B&D)), A)
605 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
606 return nullptr; // TODO: Use freeze?
607 Value *NewAnd1 = Builder.CreateAnd(LHS: B, RHS: D);
608 Value *NewAnd2 = Builder.CreateAnd(LHS: A, RHS: NewAnd1);
609 return Builder.CreateICmp(P: NewCC, LHS: NewAnd2, RHS: A);
610 }
611
612 const APInt *ConstB, *ConstD;
613 if (match(V: B, P: m_APInt(Res&: ConstB)) && match(V: D, P: m_APInt(Res&: ConstD))) {
614 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
615 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
616 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
617 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
618 // Only valid if one of the masks is a superset of the other (check "B&D"
619 // is the same as either B or D).
620 APInt NewMask = *ConstB & *ConstD;
621 if (NewMask == *ConstB)
622 return LHS;
623 if (NewMask == *ConstD) {
624 if (IsLogical) {
625 if (auto *RHSI = dyn_cast<Instruction>(Val: RHS))
626 RHSI->dropPoisonGeneratingFlags();
627 }
628 return RHS;
629 }
630 }
631
632 if (Mask & AMask_NotAllOnes) {
633 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
634 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
635 // Only valid if one of the masks is a superset of the other (check "B|D"
636 // is the same as either B or D).
637 APInt NewMask = *ConstB | *ConstD;
638 if (NewMask == *ConstB)
639 return LHS;
640 if (NewMask == *ConstD)
641 return RHS;
642 }
643
644 if (Mask & (BMask_Mixed | BMask_NotMixed)) {
645 // Mixed:
646 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
647 // We already know that B & C == C && D & E == E.
648 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
649 // C and E, which are shared by both the mask B and the mask D, don't
650 // contradict, then we can transform to
651 // -> (icmp eq (A & (B|D)), (C|E))
652 // Currently, we only handle the case of B, C, D, and E being constant.
653 // We can't simply use C and E because we might actually handle
654 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
655 // with B and D, having a single bit set.
656
657 // NotMixed:
658 // (icmp ne (A & B), C) & (icmp ne (A & D), E)
659 // -> (icmp ne (A & (B & D)), (C & E))
660 // Check the intersection (B & D) for inequality.
661 // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
662 // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both
663 // the B and the D, don't contradict. Note that we can assume (~B & C) ==
664 // 0 && (~D & E) == 0, previous operation should delete these icmps if it
665 // hadn't been met.
666
667 const APInt *OldConstC, *OldConstE;
668 if (!match(V: C, P: m_APInt(Res&: OldConstC)) || !match(V: E, P: m_APInt(Res&: OldConstE)))
669 return nullptr;
670
671 auto FoldBMixed = [&](ICmpInst::Predicate CC, bool IsNot) -> Value * {
672 CC = IsNot ? CmpInst::getInversePredicate(pred: CC) : CC;
673 const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
674 const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
675
676 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
677 return IsNot ? nullptr : ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
678
679 if (IsNot && !ConstB->isSubsetOf(RHS: *ConstD) &&
680 !ConstD->isSubsetOf(RHS: *ConstB))
681 return nullptr;
682
683 APInt BD, CE;
684 if (IsNot) {
685 BD = *ConstB & *ConstD;
686 CE = ConstC & ConstE;
687 } else {
688 BD = *ConstB | *ConstD;
689 CE = ConstC | ConstE;
690 }
691 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: BD);
692 Value *CEVal = ConstantInt::get(Ty: A->getType(), V: CE);
693 return Builder.CreateICmp(P: CC, LHS: NewAnd, RHS: CEVal);
694 };
695
696 if (Mask & BMask_Mixed)
697 return FoldBMixed(NewCC, false);
698 if (Mask & BMask_NotMixed) // can be else also
699 return FoldBMixed(NewCC, true);
700 }
701 }
702
703 // (icmp eq (A & B), 0) | (icmp eq (A & D), 0)
704 // -> (icmp ne (A & (B|D)), (B|D))
705 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0)
706 // -> (icmp eq (A & (B|D)), (B|D))
707 // iff B and D is known to be a power of two
708 if (Mask & Mask_NotAllZeros &&
709 isKnownToBeAPowerOfTwo(V: B, /*OrZero=*/false, Q) &&
710 isKnownToBeAPowerOfTwo(V: D, /*OrZero=*/false, Q)) {
711 // If this is a logical and/or, then we must prevent propagation of a
712 // poison value from the RHS by inserting freeze.
713 if (IsLogical)
714 D = Builder.CreateFreeze(V: D);
715 Value *Mask = Builder.CreateOr(LHS: B, RHS: D);
716 Value *Masked = Builder.CreateAnd(LHS: A, RHS: Mask);
717 return Builder.CreateICmp(P: NewCC, LHS: Masked, RHS: Mask);
718 }
719 return nullptr;
720}
721
722/// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
723/// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
724/// If \p Inverted is true then the check is for the inverted range, e.g.
725/// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
726Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
727 bool Inverted) {
728 // Check the lower range comparison, e.g. x >= 0
729 // InstCombine already ensured that if there is a constant it's on the RHS.
730 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Val: Cmp0->getOperand(i_nocapture: 1));
731 if (!RangeStart)
732 return nullptr;
733
734 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
735 Cmp0->getPredicate());
736
737 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
738 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
739 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
740 return nullptr;
741
742 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
743 Cmp1->getPredicate());
744
745 Value *Input = Cmp0->getOperand(i_nocapture: 0);
746 Value *Cmp1Op0 = Cmp1->getOperand(i_nocapture: 0);
747 Value *Cmp1Op1 = Cmp1->getOperand(i_nocapture: 1);
748 Value *RangeEnd;
749 if (match(V: Cmp1Op0, P: m_SExtOrSelf(Op: m_Specific(V: Input)))) {
750 // For the upper range compare we have: icmp x, n
751 Input = Cmp1Op0;
752 RangeEnd = Cmp1Op1;
753 } else if (match(V: Cmp1Op1, P: m_SExtOrSelf(Op: m_Specific(V: Input)))) {
754 // For the upper range compare we have: icmp n, x
755 Input = Cmp1Op1;
756 RangeEnd = Cmp1Op0;
757 Pred1 = ICmpInst::getSwappedPredicate(pred: Pred1);
758 } else {
759 return nullptr;
760 }
761
762 // Check the upper range comparison, e.g. x < n
763 ICmpInst::Predicate NewPred;
764 switch (Pred1) {
765 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
766 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
767 default: return nullptr;
768 }
769
770 // This simplification is only valid if the upper range is not negative.
771 KnownBits Known = computeKnownBits(V: RangeEnd, CxtI: Cmp1);
772 if (!Known.isNonNegative())
773 return nullptr;
774
775 if (Inverted)
776 NewPred = ICmpInst::getInversePredicate(pred: NewPred);
777
778 return Builder.CreateICmp(P: NewPred, LHS: Input, RHS: RangeEnd);
779}
780
781// (or (icmp eq X, 0), (icmp eq X, Pow2OrZero))
782// -> (icmp eq (and X, Pow2OrZero), X)
783// (and (icmp ne X, 0), (icmp ne X, Pow2OrZero))
784// -> (icmp ne (and X, Pow2OrZero), X)
785static Value *
786foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
787 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
788 const SimplifyQuery &Q) {
789 CmpPredicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
790 // Make sure we have right compares for our op.
791 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
792 return nullptr;
793
794 // Make it so we can match LHS against the (icmp eq/ne X, 0) just for
795 // simplicity.
796 if (match(V: RHS->getOperand(i_nocapture: 1), P: m_Zero()))
797 std::swap(a&: LHS, b&: RHS);
798
799 Value *Pow2, *Op;
800 // Match the desired pattern:
801 // LHS: (icmp eq/ne X, 0)
802 // RHS: (icmp eq/ne X, Pow2OrZero)
803 // Skip if Pow2OrZero is 1. Either way it gets folded to (icmp ugt X, 1) but
804 // this form ends up slightly less canonical.
805 // We could potentially be more sophisticated than requiring LHS/RHS
806 // be one-use. We don't create additional instructions if only one
807 // of them is one-use. So cases where one is one-use and the other
808 // is two-use might be profitable.
809 if (!match(V: LHS, P: m_OneUse(SubPattern: m_ICmp(Pred, L: m_Value(V&: Op), R: m_Zero()))) ||
810 !match(V: RHS, P: m_OneUse(SubPattern: m_c_ICmp(Pred, L: m_Specific(V: Op), R: m_Value(V&: Pow2)))) ||
811 match(V: Pow2, P: m_One()) ||
812 !isKnownToBeAPowerOfTwo(V: Pow2, DL: Q.DL, /*OrZero=*/true, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT))
813 return nullptr;
814
815 Value *And = Builder.CreateAnd(LHS: Op, RHS: Pow2);
816 return Builder.CreateICmp(P: Pred, LHS: And, RHS: Op);
817}
818
819/// General pattern:
820/// X & Y
821///
822/// Where Y is checking that all the high bits (covered by a mask 4294967168)
823/// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
824/// Pattern can be one of:
825/// %t = add i32 %arg, 128
826/// %r = icmp ult i32 %t, 256
827/// Or
828/// %t0 = shl i32 %arg, 24
829/// %t1 = ashr i32 %t0, 24
830/// %r = icmp eq i32 %t1, %arg
831/// Or
832/// %t0 = trunc i32 %arg to i8
833/// %t1 = sext i8 %t0 to i32
834/// %r = icmp eq i32 %t1, %arg
835/// This pattern is a signed truncation check.
836///
837/// And X is checking that some bit in that same mask is zero.
838/// I.e. can be one of:
839/// %r = icmp sgt i32 %arg, -1
840/// Or
841/// %t = and i32 %arg, 2147483648
842/// %r = icmp eq i32 %t, 0
843///
844/// Since we are checking that all the bits in that mask are the same,
845/// and a particular bit is zero, what we are really checking is that all the
846/// masked bits are zero.
847/// So this should be transformed to:
848/// %r = icmp ult i32 %arg, 128
849static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
850 Instruction &CxtI,
851 InstCombiner::BuilderTy &Builder) {
852 assert(CxtI.getOpcode() == Instruction::And);
853
854 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
855 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
856 APInt &SignBitMask) -> bool {
857 const APInt *I01, *I1; // powers of two; I1 == I01 << 1
858 if (!(match(V: ICmp, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_ULT,
859 L: m_Add(L: m_Value(V&: X), R: m_Power2(V&: I01)),
860 R: m_Power2(V&: I1))) &&
861 I1->ugt(RHS: *I01) && I01->shl(shiftAmt: 1) == *I1))
862 return false;
863 // Which bit is the new sign bit as per the 'signed truncation' pattern?
864 SignBitMask = *I01;
865 return true;
866 };
867
868 // One icmp needs to be 'signed truncation check'.
869 // We need to match this first, else we will mismatch commutative cases.
870 Value *X1;
871 APInt HighestBit;
872 ICmpInst *OtherICmp;
873 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
874 OtherICmp = ICmp0;
875 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
876 OtherICmp = ICmp1;
877 else
878 return nullptr;
879
880 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
881
882 // Try to match/decompose into: icmp eq (X & Mask), 0
883 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
884 APInt &UnsetBitsMask) -> bool {
885 CmpPredicate Pred = ICmp->getPredicate();
886 // Can it be decomposed into icmp eq (X & Mask), 0 ?
887 auto Res = llvm::decomposeBitTestICmp(
888 LHS: ICmp->getOperand(i_nocapture: 0), RHS: ICmp->getOperand(i_nocapture: 1), Pred,
889 /*LookThroughTrunc=*/false, /*AllowNonZeroC=*/false,
890 /*DecomposeAnd=*/true);
891 if (Res && Res->Pred == ICmpInst::ICMP_EQ) {
892 X = Res->X;
893 UnsetBitsMask = Res->Mask;
894 return true;
895 }
896
897 return false;
898 };
899
900 // And the other icmp needs to be decomposable into a bit test.
901 Value *X0;
902 APInt UnsetBitsMask;
903 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
904 return nullptr;
905
906 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense.");
907
908 // Are they working on the same value?
909 Value *X;
910 if (X1 == X0) {
911 // Ok as is.
912 X = X1;
913 } else if (match(V: X0, P: m_Trunc(Op: m_Specific(V: X1)))) {
914 UnsetBitsMask = UnsetBitsMask.zext(width: X1->getType()->getScalarSizeInBits());
915 X = X1;
916 } else
917 return nullptr;
918
919 // So which bits should be uniform as per the 'signed truncation check'?
920 // (all the bits starting with (i.e. including) HighestBit)
921 APInt SignBitsMask = ~(HighestBit - 1U);
922
923 // UnsetBitsMask must have some common bits with SignBitsMask,
924 if (!UnsetBitsMask.intersects(RHS: SignBitsMask))
925 return nullptr;
926
927 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
928 if (!UnsetBitsMask.isSubsetOf(RHS: SignBitsMask)) {
929 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
930 if (!OtherHighestBit.isPowerOf2())
931 return nullptr;
932 HighestBit = APIntOps::umin(A: HighestBit, B: OtherHighestBit);
933 }
934 // Else, if it does not, then all is ok as-is.
935
936 // %r = icmp ult %X, SignBit
937 return Builder.CreateICmpULT(LHS: X, RHS: ConstantInt::get(Ty: X->getType(), V: HighestBit),
938 Name: CxtI.getName() + ".simplified");
939}
940
941/// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
942/// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
943/// Also used for logical and/or, must be poison safe if range attributes are
944/// dropped.
945static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
946 InstCombiner::BuilderTy &Builder,
947 InstCombinerImpl &IC) {
948 CmpPredicate Pred0, Pred1;
949 Value *X;
950 if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: X)),
951 R: m_SpecificInt(V: 1))) ||
952 !match(V: Cmp1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V: X), R: m_ZeroInt())))
953 return nullptr;
954
955 auto *CtPop = cast<Instruction>(Val: Cmp0->getOperand(i_nocapture: 0));
956 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE) {
957 // Drop range attributes and re-infer them in the next iteration.
958 CtPop->dropPoisonGeneratingAnnotations();
959 IC.addToWorklist(I: CtPop);
960 return Builder.CreateICmpUGT(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
961 }
962 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ) {
963 // Drop range attributes and re-infer them in the next iteration.
964 CtPop->dropPoisonGeneratingAnnotations();
965 IC.addToWorklist(I: CtPop);
966 return Builder.CreateICmpULT(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 2));
967 }
968
969 return nullptr;
970}
971
972/// Reduce a pair of compares that check if a value has exactly 1 bit set.
973/// Also used for logical and/or, must be poison safe if range attributes are
974/// dropped.
975static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
976 InstCombiner::BuilderTy &Builder,
977 InstCombinerImpl &IC) {
978 // Handle 'and' / 'or' commutation: make the equality check the first operand.
979 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
980 std::swap(a&: Cmp0, b&: Cmp1);
981 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
982 std::swap(a&: Cmp0, b&: Cmp1);
983
984 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
985 Value *X;
986 if (JoinedByAnd &&
987 match(V: Cmp0, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_NE, L: m_Value(V&: X), R: m_ZeroInt())) &&
988 match(V: Cmp1, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_ULT,
989 L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Specific(V: X)),
990 R: m_SpecificInt(V: 2)))) {
991 auto *CtPop = cast<Instruction>(Val: Cmp1->getOperand(i_nocapture: 0));
992 // Drop range attributes and re-infer them in the next iteration.
993 CtPop->dropPoisonGeneratingAnnotations();
994 IC.addToWorklist(I: CtPop);
995 return Builder.CreateICmpEQ(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
996 }
997 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
998 if (!JoinedByAnd &&
999 match(V: Cmp0, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_EQ, L: m_Value(V&: X), R: m_ZeroInt())) &&
1000 match(V: Cmp1, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_UGT,
1001 L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Specific(V: X)),
1002 R: m_SpecificInt(V: 1)))) {
1003 auto *CtPop = cast<Instruction>(Val: Cmp1->getOperand(i_nocapture: 0));
1004 // Drop range attributes and re-infer them in the next iteration.
1005 CtPop->dropPoisonGeneratingAnnotations();
1006 IC.addToWorklist(I: CtPop);
1007 return Builder.CreateICmpNE(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
1008 }
1009 return nullptr;
1010}
1011
1012/// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
1013/// B is a contiguous set of ones starting from the most significant bit
1014/// (negative power of 2), D and E are equal, and D is a contiguous set of ones
1015/// starting at the most significant zero bit in B. Parameter B supports masking
1016/// using undef/poison in either scalar or vector values.
1017static Value *foldNegativePower2AndShiftedMask(
1018 Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
1019 ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {
1020 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
1021 "Expected equality predicates for masked type of icmps.");
1022 if (PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE)
1023 return nullptr;
1024
1025 if (!match(V: B, P: m_NegatedPower2()) || !match(V: D, P: m_ShiftedMask()) ||
1026 !match(V: E, P: m_ShiftedMask()))
1027 return nullptr;
1028
1029 // Test scalar arguments for conversion. B has been validated earlier to be a
1030 // negative power of two and thus is guaranteed to have one or more contiguous
1031 // ones starting from the MSB followed by zero or more contiguous zeros. D has
1032 // been validated earlier to be a shifted set of one or more contiguous ones.
1033 // In order to match, B leading ones and D leading zeros should be equal. The
1034 // predicate that B be a negative power of 2 prevents the condition of there
1035 // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
1036 // D always be a shifted mask prevents the condition of D equaling 0. This
1037 // prevents matching the condition where B contains the maximum number of
1038 // leading one bits (-1) and D contains the maximum number of leading zero
1039 // bits (0).
1040 auto isReducible = [](const Value *B, const Value *D, const Value *E) {
1041 const APInt *BCst, *DCst, *ECst;
1042 return match(V: B, P: m_APIntAllowPoison(Res&: BCst)) && match(V: D, P: m_APInt(Res&: DCst)) &&
1043 match(V: E, P: m_APInt(Res&: ECst)) && *DCst == *ECst &&
1044 (isa<PoisonValue>(Val: B) ||
1045 (BCst->countLeadingOnes() == DCst->countLeadingZeros()));
1046 };
1047
1048 // Test vector type arguments for conversion.
1049 if (const auto *BVTy = dyn_cast<VectorType>(Val: B->getType())) {
1050 const auto *BFVTy = dyn_cast<FixedVectorType>(Val: BVTy);
1051 const auto *BConst = dyn_cast<Constant>(Val: B);
1052 const auto *DConst = dyn_cast<Constant>(Val: D);
1053 const auto *EConst = dyn_cast<Constant>(Val: E);
1054
1055 if (!BFVTy || !BConst || !DConst || !EConst)
1056 return nullptr;
1057
1058 for (unsigned I = 0; I != BFVTy->getNumElements(); ++I) {
1059 const auto *BElt = BConst->getAggregateElement(Elt: I);
1060 const auto *DElt = DConst->getAggregateElement(Elt: I);
1061 const auto *EElt = EConst->getAggregateElement(Elt: I);
1062
1063 if (!BElt || !DElt || !EElt)
1064 return nullptr;
1065 if (!isReducible(BElt, DElt, EElt))
1066 return nullptr;
1067 }
1068 } else {
1069 // Test scalar type arguments for conversion.
1070 if (!isReducible(B, D, E))
1071 return nullptr;
1072 }
1073 return Builder.CreateICmp(P: ICmpInst::ICMP_ULT, LHS: A, RHS: D);
1074}
1075
1076/// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1077/// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1078/// M is a contiguous shifted mask starting at the right most significant zero
1079/// bit in P. SGT is supported as when P is the largest representable power of
1080/// 2, an earlier optimization converts the expression into (icmp X s> -1).
1081/// Parameter P supports masking using undef/poison in either scalar or vector
1082/// values.
1083static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
1084 bool JoinedByAnd,
1085 InstCombiner::BuilderTy &Builder) {
1086 if (!JoinedByAnd)
1087 return nullptr;
1088 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
1089 ICmpInst::Predicate CmpPred0, CmpPred1;
1090 // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1091 // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1092 // SignMask) == 0).
1093 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1094 getMaskedTypeForICmpPair(A, B, C, D, E, LHS: Cmp0, RHS: Cmp1, PredL&: CmpPred0, PredR&: CmpPred1);
1095 if (!MaskPair)
1096 return nullptr;
1097
1098 const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes;
1099 unsigned CmpMask0 = MaskPair->first;
1100 unsigned CmpMask1 = MaskPair->second;
1101 if ((CmpMask0 & Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1102 if (Value *V = foldNegativePower2AndShiftedMask(A, B, D, E, PredL: CmpPred0,
1103 PredR: CmpPred1, Builder))
1104 return V;
1105 } else if ((CmpMask0 == compareBMask) && (CmpMask1 & Mask_AllZeros)) {
1106 if (Value *V = foldNegativePower2AndShiftedMask(A, B: D, D: B, E: C, PredL: CmpPred1,
1107 PredR: CmpPred0, Builder))
1108 return V;
1109 }
1110 return nullptr;
1111}
1112
1113/// Commuted variants are assumed to be handled by calling this function again
1114/// with the parameters swapped.
1115static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1116 ICmpInst *UnsignedICmp, bool IsAnd,
1117 const SimplifyQuery &Q,
1118 InstCombiner::BuilderTy &Builder) {
1119 Value *ZeroCmpOp;
1120 CmpPredicate EqPred;
1121 if (!match(V: ZeroICmp, P: m_ICmp(Pred&: EqPred, L: m_Value(V&: ZeroCmpOp), R: m_Zero())) ||
1122 !ICmpInst::isEquality(P: EqPred))
1123 return nullptr;
1124
1125 CmpPredicate UnsignedPred;
1126
1127 Value *A, *B;
1128 if (match(V: UnsignedICmp,
1129 P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: ZeroCmpOp), R: m_Value(V&: A))) &&
1130 match(V: ZeroCmpOp, P: m_c_Add(L: m_Specific(V: A), R: m_Value(V&: B))) &&
1131 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1132 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1133 if (!isKnownNonZero(V: NonZero, Q))
1134 std::swap(a&: NonZero, b&: Other);
1135 return isKnownNonZero(V: NonZero, Q);
1136 };
1137
1138 // Given ZeroCmpOp = (A + B)
1139 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
1140 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
1141 // with X being the value (A/B) that is known to be non-zero,
1142 // and Y being remaining value.
1143 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1144 IsAnd && GetKnownNonZeroAndOther(B, A))
1145 return Builder.CreateICmpULT(LHS: Builder.CreateNeg(V: B), RHS: A);
1146 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1147 !IsAnd && GetKnownNonZeroAndOther(B, A))
1148 return Builder.CreateICmpUGE(LHS: Builder.CreateNeg(V: B), RHS: A);
1149 }
1150
1151 return nullptr;
1152}
1153
1154struct IntPart {
1155 Value *From;
1156 unsigned StartBit;
1157 unsigned NumBits;
1158};
1159
1160/// Match an extraction of bits from an integer.
1161static std::optional<IntPart> matchIntPart(Value *V) {
1162 Value *X;
1163 if (!match(V, P: m_OneUse(SubPattern: m_Trunc(Op: m_Value(V&: X)))))
1164 return std::nullopt;
1165
1166 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits();
1167 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1168 Value *Y;
1169 const APInt *Shift;
1170 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1171 // from Y, not any shifted-in zeroes.
1172 if (match(V: X, P: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: Y), R: m_APInt(Res&: Shift)))) &&
1173 Shift->ule(RHS: NumOriginalBits - NumExtractedBits))
1174 return {{.From: Y, .StartBit: (unsigned)Shift->getZExtValue(), .NumBits: NumExtractedBits}};
1175 return {{.From: X, .StartBit: 0, .NumBits: NumExtractedBits}};
1176}
1177
1178/// Materialize an extraction of bits from an integer in IR.
1179static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {
1180 Value *V = P.From;
1181 if (P.StartBit)
1182 V = Builder.CreateLShr(LHS: V, RHS: P.StartBit);
1183 Type *TruncTy = V->getType()->getWithNewBitWidth(NewBitWidth: P.NumBits);
1184 if (TruncTy != V->getType())
1185 V = Builder.CreateTrunc(V, DestTy: TruncTy);
1186 return V;
1187}
1188
1189/// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1190/// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1191/// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
1192Value *InstCombinerImpl::foldEqOfParts(Value *Cmp0, Value *Cmp1, bool IsAnd) {
1193 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
1194 return nullptr;
1195
1196 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1197 auto GetMatchPart = [&](Value *CmpV,
1198 unsigned OpNo) -> std::optional<IntPart> {
1199 assert(CmpV->getType()->isIntOrIntVectorTy(1) && "Must be bool");
1200
1201 Value *X, *Y;
1202 // icmp ne (and x, 1), (and y, 1) <=> trunc (xor x, y) to i1
1203 // icmp eq (and x, 1), (and y, 1) <=> not (trunc (xor x, y) to i1)
1204 if (Pred == CmpInst::ICMP_NE
1205 ? match(V: CmpV, P: m_Trunc(Op: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))
1206 : match(V: CmpV, P: m_Not(V: m_Trunc(Op: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))))
1207 return {{.From: OpNo == 0 ? X : Y, .StartBit: 0, .NumBits: 1}};
1208
1209 auto *Cmp = dyn_cast<ICmpInst>(Val: CmpV);
1210 if (!Cmp)
1211 return std::nullopt;
1212
1213 if (Pred == Cmp->getPredicate())
1214 return matchIntPart(V: Cmp->getOperand(i_nocapture: OpNo));
1215
1216 const APInt *C;
1217 // (icmp eq (lshr x, C), (lshr y, C)) gets optimized to:
1218 // (icmp ult (xor x, y), 1 << C) so also look for that.
1219 if (Pred == CmpInst::ICMP_EQ && Cmp->getPredicate() == CmpInst::ICMP_ULT) {
1220 if (!match(V: Cmp->getOperand(i_nocapture: 1), P: m_Power2(V&: C)) ||
1221 !match(V: Cmp->getOperand(i_nocapture: 0), P: m_Xor(L: m_Value(), R: m_Value())))
1222 return std::nullopt;
1223 }
1224
1225 // (icmp ne (lshr x, C), (lshr y, C)) gets optimized to:
1226 // (icmp ugt (xor x, y), (1 << C) - 1) so also look for that.
1227 else if (Pred == CmpInst::ICMP_NE &&
1228 Cmp->getPredicate() == CmpInst::ICMP_UGT) {
1229 if (!match(V: Cmp->getOperand(i_nocapture: 1), P: m_LowBitMask(V&: C)) ||
1230 !match(V: Cmp->getOperand(i_nocapture: 0), P: m_Xor(L: m_Value(), R: m_Value())))
1231 return std::nullopt;
1232 } else {
1233 return std::nullopt;
1234 }
1235
1236 unsigned From = Pred == CmpInst::ICMP_NE ? C->popcount() : C->countr_zero();
1237 Instruction *I = cast<Instruction>(Val: Cmp->getOperand(i_nocapture: 0));
1238 return {{.From: I->getOperand(i: OpNo), .StartBit: From, .NumBits: C->getBitWidth() - From}};
1239 };
1240
1241 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1242 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1243 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1244 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1245 if (!L0 || !R0 || !L1 || !R1)
1246 return nullptr;
1247
1248 // Make sure the LHS/RHS compare a part of the same value, possibly after
1249 // an operand swap.
1250 if (L0->From != L1->From || R0->From != R1->From) {
1251 if (L0->From != R1->From || R0->From != L1->From)
1252 return nullptr;
1253 std::swap(lhs&: L1, rhs&: R1);
1254 }
1255
1256 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1257 // the low part and L1/R1 being the high part.
1258 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1259 R0->StartBit + R0->NumBits != R1->StartBit) {
1260 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1261 R1->StartBit + R1->NumBits != R0->StartBit)
1262 return nullptr;
1263 std::swap(lhs&: L0, rhs&: L1);
1264 std::swap(lhs&: R0, rhs&: R1);
1265 }
1266
1267 // We can simplify to a comparison of these larger parts of the integers.
1268 IntPart L = {.From: L0->From, .StartBit: L0->StartBit, .NumBits: L0->NumBits + L1->NumBits};
1269 IntPart R = {.From: R0->From, .StartBit: R0->StartBit, .NumBits: R0->NumBits + R1->NumBits};
1270 Value *LValue = extractIntPart(P: L, Builder);
1271 Value *RValue = extractIntPart(P: R, Builder);
1272 return Builder.CreateICmp(P: Pred, LHS: LValue, RHS: RValue);
1273}
1274
1275/// Reduce logic-of-compares with equality to a constant by substituting a
1276/// common operand with the constant. Callers are expected to call this with
1277/// Cmp0/Cmp1 switched to handle logic op commutativity.
1278static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
1279 bool IsAnd, bool IsLogical,
1280 InstCombiner::BuilderTy &Builder,
1281 const SimplifyQuery &Q,
1282 Instruction &I) {
1283 // Match an equality compare with a non-poison constant as Cmp0.
1284 // Also, give up if the compare can be constant-folded to avoid looping.
1285 CmpPredicate Pred0;
1286 Value *X;
1287 Constant *C;
1288 if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Value(V&: X), R: m_Constant(C))) ||
1289 !isGuaranteedNotToBeUndefOrPoison(V: C) || isa<Constant>(Val: X))
1290 return nullptr;
1291 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) ||
1292 (!IsAnd && Pred0 != ICmpInst::ICMP_NE))
1293 return nullptr;
1294
1295 // The other compare must include a common operand (X). Canonicalize the
1296 // common operand as operand 1 (Pred1 is swapped if the common operand was
1297 // operand 0).
1298 Value *Y;
1299 CmpPredicate Pred1;
1300 if (!match(V: Cmp1, P: m_c_ICmp(Pred&: Pred1, L: m_Value(V&: Y), R: m_Specific(V: X))))
1301 return nullptr;
1302
1303 // Replace variable with constant value equivalence to remove a variable use:
1304 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1305 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1306 // Can think of the 'or' substitution with the 'and' bool equivalent:
1307 // A || B --> A || (!A && B)
1308 Value *SubstituteCmp = simplifyICmpInst(Pred: Pred1, LHS: Y, RHS: C, Q);
1309 if (!SubstituteCmp) {
1310 // If we need to create a new instruction, require that the old compare can
1311 // be removed.
1312 if (!Cmp1->hasOneUse())
1313 return nullptr;
1314 SubstituteCmp = Builder.CreateICmp(P: Pred1, LHS: Y, RHS: C);
1315 }
1316 if (IsLogical) {
1317 Instruction *MDFrom =
1318 ProfcheckDisableMetadataFixes && isa<SelectInst>(Val: I) ? nullptr : &I;
1319 return IsAnd ? Builder.CreateLogicalAnd(Cond1: Cmp0, Cond2: SubstituteCmp, Name: "", MDFrom)
1320 : Builder.CreateLogicalOr(Cond1: Cmp0, Cond2: SubstituteCmp, Name: "", MDFrom);
1321 }
1322 return Builder.CreateBinOp(Opc: IsAnd ? Instruction::And : Instruction::Or, LHS: Cmp0,
1323 RHS: SubstituteCmp);
1324}
1325
1326/// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1327/// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1328/// into a single comparison using range-based reasoning.
1329/// NOTE: This is also used for logical and/or, must be poison-safe!
1330Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
1331 ICmpInst *ICmp2,
1332 bool IsAnd) {
1333 // Return (V, CR) for a range check idiom V in CR.
1334 auto MatchExactRangeCheck =
1335 [](ICmpInst *ICmp) -> std::optional<std::pair<Value *, ConstantRange>> {
1336 const APInt *C;
1337 if (!match(V: ICmp->getOperand(i_nocapture: 1), P: m_APInt(Res&: C)))
1338 return std::nullopt;
1339 Value *LHS = ICmp->getOperand(i_nocapture: 0);
1340 CmpPredicate Pred = ICmp->getPredicate();
1341 Value *X;
1342 // Match (x & NegPow2) ==/!= C
1343 const APInt *Mask;
1344 if (ICmpInst::isEquality(P: Pred) &&
1345 match(V: LHS, P: m_OneUse(SubPattern: m_And(L: m_Value(V&: X), R: m_NegatedPower2(V&: Mask)))) &&
1346 C->countr_zero() >= Mask->countr_zero()) {
1347 ConstantRange CR(*C, *C - *Mask);
1348 if (Pred == ICmpInst::ICMP_NE)
1349 CR = CR.inverse();
1350 return std::make_pair(x&: X, y&: CR);
1351 }
1352 ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred, Other: *C);
1353 // Match (add X, C1) pred C
1354 // TODO: investigate whether we should apply the one-use check on m_AddLike.
1355 const APInt *C1;
1356 if (match(V: LHS, P: m_AddLike(L: m_Value(V&: X), R: m_APInt(Res&: C1))))
1357 return std::make_pair(x&: X, y: CR.subtract(CI: *C1));
1358 return std::make_pair(x&: LHS, y&: CR);
1359 };
1360
1361 auto RC1 = MatchExactRangeCheck(ICmp1);
1362 if (!RC1)
1363 return nullptr;
1364
1365 auto RC2 = MatchExactRangeCheck(ICmp2);
1366 if (!RC2)
1367 return nullptr;
1368
1369 auto &[V1, CR1] = *RC1;
1370 auto &[V2, CR2] = *RC2;
1371 if (V1 != V2)
1372 return nullptr;
1373
1374 // For 'and', we use the De Morgan's Laws to simplify the implementation.
1375 if (IsAnd) {
1376 CR1 = CR1.inverse();
1377 CR2 = CR2.inverse();
1378 }
1379
1380 Type *Ty = V1->getType();
1381 Value *NewV = V1;
1382 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR: CR2);
1383 if (!CR) {
1384 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
1385 CR2.isWrappedSet())
1386 return nullptr;
1387
1388 // Check whether we have equal-size ranges that only differ by one bit.
1389 // In that case we can apply a mask to map one range onto the other.
1390 APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1391 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1392 APInt CR1Size = CR1.getUpper() - CR1.getLower();
1393 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
1394 CR1Size != CR2.getUpper() - CR2.getLower())
1395 return nullptr;
1396
1397 CR = CR1.getLower().ult(RHS: CR2.getLower()) ? CR1 : CR2;
1398 NewV = Builder.CreateAnd(LHS: NewV, RHS: ConstantInt::get(Ty, V: ~LowerDiff));
1399 }
1400
1401 if (IsAnd)
1402 CR = CR->inverse();
1403
1404 CmpInst::Predicate NewPred;
1405 APInt NewC, Offset;
1406 CR->getEquivalentICmp(Pred&: NewPred, RHS&: NewC, Offset);
1407
1408 if (Offset != 0)
1409 NewV = Builder.CreateAdd(LHS: NewV, RHS: ConstantInt::get(Ty, V: Offset));
1410 return Builder.CreateICmp(P: NewPred, LHS: NewV, RHS: ConstantInt::get(Ty, V: NewC));
1411}
1412
1413/// Matches canonical form of isnan, fcmp ord x, 0
1414static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {
1415 return P == FCmpInst::FCMP_ORD && match(V: RHS, P: m_AnyZeroFP());
1416}
1417
1418/// Matches fcmp u__ x, +/-inf
1419static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
1420 Value *RHS) {
1421 return FCmpInst::isUnordered(predicate: P) && match(V: RHS, P: m_Inf());
1422}
1423
1424/// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1425///
1426/// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
1427static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
1428 FCmpInst *RHS) {
1429 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
1430 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
1431 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1432
1433 if (!matchIsNotNaN(P: PredL, LHS: LHS0, RHS: LHS1) ||
1434 !matchUnorderedInfCompare(P: PredR, LHS: RHS0, RHS: RHS1))
1435 return nullptr;
1436
1437 return Builder.CreateFCmpFMF(P: FCmpInst::getOrderedPredicate(Pred: PredR), LHS: RHS0, RHS: RHS1,
1438 FMFSource: FMFSource::intersect(A: LHS, B: RHS));
1439}
1440
1441Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
1442 bool IsAnd, bool IsLogicalSelect) {
1443 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
1444 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
1445 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1446
1447 if (LHS0 == RHS1 && RHS0 == LHS1) {
1448 // Swap RHS operands to match LHS.
1449 PredR = FCmpInst::getSwappedPredicate(pred: PredR);
1450 std::swap(a&: RHS0, b&: RHS1);
1451 }
1452
1453 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1454 // Suppose the relation between x and y is R, where R is one of
1455 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1456 // testing the desired relations.
1457 //
1458 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1459 // bool(R & CC0) && bool(R & CC1)
1460 // = bool((R & CC0) & (R & CC1))
1461 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1462 //
1463 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1464 // bool(R & CC0) || bool(R & CC1)
1465 // = bool((R & CC0) | (R & CC1))
1466 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1467 if (LHS0 == RHS0 && LHS1 == RHS1) {
1468 unsigned FCmpCodeL = getFCmpCode(CC: PredL);
1469 unsigned FCmpCodeR = getFCmpCode(CC: PredR);
1470 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1471
1472 // Intersect the fast math flags.
1473 // TODO: We can union the fast math flags unless this is a logical select.
1474 return getFCmpValue(Code: NewPred, LHS: LHS0, RHS: LHS1, Builder,
1475 FMF: FMFSource::intersect(A: LHS, B: RHS));
1476 }
1477
1478 // This transform is not valid for a logical select.
1479 if (!IsLogicalSelect &&
1480 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1481 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO &&
1482 !IsAnd))) {
1483 if (LHS0->getType() != RHS0->getType())
1484 return nullptr;
1485
1486 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1487 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1488 if (match(V: LHS1, P: m_PosZeroFP()) && match(V: RHS1, P: m_PosZeroFP())) {
1489 // Ignore the constants because they are obviously not NANs:
1490 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1491 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1492 return Builder.CreateFCmpFMF(P: PredL, LHS: LHS0, RHS: RHS0,
1493 FMFSource: FMFSource::intersect(A: LHS, B: RHS));
1494 }
1495 }
1496
1497 // This transform is not valid for a logical select.
1498 if (!IsLogicalSelect && IsAnd &&
1499 stripSignOnlyFPOps(Val: LHS0) == stripSignOnlyFPOps(Val: RHS0)) {
1500 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1501 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1502 if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS))
1503 return Left;
1504 if (Value *Right = matchIsFiniteTest(Builder, LHS: RHS, RHS: LHS))
1505 return Right;
1506 }
1507
1508 // Turn at least two fcmps with constants into llvm.is.fpclass.
1509 //
1510 // If we can represent a combined value test with one class call, we can
1511 // potentially eliminate 4-6 instructions. If we can represent a test with a
1512 // single fcmp with fneg and fabs, that's likely a better canonical form.
1513 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1514 auto [ClassValRHS, ClassMaskRHS] =
1515 fcmpToClassTest(Pred: PredR, F: *RHS->getFunction(), LHS: RHS0, RHS: RHS1);
1516 if (ClassValRHS) {
1517 auto [ClassValLHS, ClassMaskLHS] =
1518 fcmpToClassTest(Pred: PredL, F: *LHS->getFunction(), LHS: LHS0, RHS: LHS1);
1519 if (ClassValLHS == ClassValRHS) {
1520 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1521 : (ClassMaskLHS | ClassMaskRHS);
1522 return Builder.CreateIntrinsic(
1523 ID: Intrinsic::is_fpclass, Types: {ClassValLHS->getType()},
1524 Args: {ClassValLHS, Builder.getInt32(C: CombinedMask)});
1525 }
1526 }
1527 }
1528
1529 // Canonicalize the range check idiom:
1530 // and (fcmp olt/ole/ult/ule x, C), (fcmp ogt/oge/ugt/uge x, -C)
1531 // --> fabs(x) olt/ole/ult/ule C
1532 // or (fcmp ogt/oge/ugt/uge x, C), (fcmp olt/ole/ult/ule x, -C)
1533 // --> fabs(x) ogt/oge/ugt/uge C
1534 // TODO: Generalize to handle a negated variable operand?
1535 const APFloat *LHSC, *RHSC;
1536 if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
1537 FCmpInst::getSwappedPredicate(pred: PredL) == PredR &&
1538 match(V: LHS1, P: m_APFloatAllowPoison(Res&: LHSC)) &&
1539 match(V: RHS1, P: m_APFloatAllowPoison(Res&: RHSC)) &&
1540 LHSC->bitwiseIsEqual(RHS: neg(X: *RHSC))) {
1541 auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
1542 switch (Pred) {
1543 case FCmpInst::FCMP_OLT:
1544 case FCmpInst::FCMP_OLE:
1545 case FCmpInst::FCMP_ULT:
1546 case FCmpInst::FCMP_ULE:
1547 return true;
1548 default:
1549 return false;
1550 }
1551 };
1552 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1553 std::swap(a&: LHSC, b&: RHSC);
1554 std::swap(a&: PredL, b&: PredR);
1555 }
1556 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1557 FastMathFlags NewFlag = LHS->getFastMathFlags();
1558 if (!IsLogicalSelect)
1559 NewFlag |= RHS->getFastMathFlags();
1560
1561 Value *FAbs =
1562 Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: LHS0, FMFSource: NewFlag);
1563 return Builder.CreateFCmpFMF(
1564 P: PredL, LHS: FAbs, RHS: ConstantFP::get(Ty: LHS0->getType(), V: *LHSC), FMFSource: NewFlag);
1565 }
1566 }
1567
1568 return nullptr;
1569}
1570
1571/// Match an fcmp against a special value that performs a test possible by
1572/// llvm.is.fpclass.
1573static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
1574 uint64_t &ClassMask) {
1575 auto *FCmp = dyn_cast<FCmpInst>(Val: Op);
1576 if (!FCmp || !FCmp->hasOneUse())
1577 return false;
1578
1579 std::tie(args&: ClassVal, args&: ClassMask) =
1580 fcmpToClassTest(Pred: FCmp->getPredicate(), F: *FCmp->getParent()->getParent(),
1581 LHS: FCmp->getOperand(i_nocapture: 0), RHS: FCmp->getOperand(i_nocapture: 1));
1582 return ClassVal != nullptr;
1583}
1584
1585/// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1586/// -> is_fpclass x, (mask0 | mask1)
1587/// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1588/// -> is_fpclass x, (mask0 & mask1)
1589/// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1590/// -> is_fpclass x, (mask0 ^ mask1)
1591Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
1592 Value *Op0, Value *Op1) {
1593 Value *ClassVal0 = nullptr;
1594 Value *ClassVal1 = nullptr;
1595 uint64_t ClassMask0, ClassMask1;
1596
1597 // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1598 // new class.
1599 //
1600 // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1601 // better.
1602
1603 bool IsLHSClass =
1604 match(V: Op0, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::is_fpclass>(
1605 Op0: m_Value(V&: ClassVal0), Op1: m_ConstantInt(V&: ClassMask0))));
1606 bool IsRHSClass =
1607 match(V: Op1, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::is_fpclass>(
1608 Op0: m_Value(V&: ClassVal1), Op1: m_ConstantInt(V&: ClassMask1))));
1609 if ((((IsLHSClass || matchIsFPClassLikeFCmp(Op: Op0, ClassVal&: ClassVal0, ClassMask&: ClassMask0)) &&
1610 (IsRHSClass || matchIsFPClassLikeFCmp(Op: Op1, ClassVal&: ClassVal1, ClassMask&: ClassMask1)))) &&
1611 ClassVal0 == ClassVal1) {
1612 unsigned NewClassMask;
1613 switch (BO.getOpcode()) {
1614 case Instruction::And:
1615 NewClassMask = ClassMask0 & ClassMask1;
1616 break;
1617 case Instruction::Or:
1618 NewClassMask = ClassMask0 | ClassMask1;
1619 break;
1620 case Instruction::Xor:
1621 NewClassMask = ClassMask0 ^ ClassMask1;
1622 break;
1623 default:
1624 llvm_unreachable("not a binary logic operator");
1625 }
1626
1627 if (IsLHSClass) {
1628 auto *II = cast<IntrinsicInst>(Val: Op0);
1629 II->setArgOperand(
1630 i: 1, v: ConstantInt::get(Ty: II->getArgOperand(i: 1)->getType(), V: NewClassMask));
1631 return replaceInstUsesWith(I&: BO, V: II);
1632 }
1633
1634 if (IsRHSClass) {
1635 auto *II = cast<IntrinsicInst>(Val: Op1);
1636 II->setArgOperand(
1637 i: 1, v: ConstantInt::get(Ty: II->getArgOperand(i: 1)->getType(), V: NewClassMask));
1638 return replaceInstUsesWith(I&: BO, V: II);
1639 }
1640
1641 CallInst *NewClass =
1642 Builder.CreateIntrinsic(ID: Intrinsic::is_fpclass, Types: {ClassVal0->getType()},
1643 Args: {ClassVal0, Builder.getInt32(C: NewClassMask)});
1644 return replaceInstUsesWith(I&: BO, V: NewClass);
1645 }
1646
1647 return nullptr;
1648}
1649
1650/// Look for the pattern that conditionally negates a value via math operations:
1651/// cond.splat = sext i1 cond
1652/// sub = add cond.splat, x
1653/// xor = xor sub, cond.splat
1654/// and rewrite it to do the same, but via logical operations:
1655/// value.neg = sub 0, value
1656/// cond = select i1 neg, value.neg, value
1657Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1658 BinaryOperator &I) {
1659 assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!");
1660 Value *Cond, *X;
1661 // As per complexity ordering, `xor` is not commutative here.
1662 if (!match(V: &I, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value())) ||
1663 !match(V: I.getOperand(i_nocapture: 1), P: m_SExt(Op: m_Value(V&: Cond))) ||
1664 !Cond->getType()->isIntOrIntVectorTy(BitWidth: 1) ||
1665 !match(V: I.getOperand(i_nocapture: 0), P: m_c_Add(L: m_SExt(Op: m_Specific(V: Cond)), R: m_Value(V&: X))))
1666 return nullptr;
1667 return createSelectInstWithUnknownProfile(
1668 C: Cond, S1: Builder.CreateNeg(V: X, Name: X->getName() + ".neg"), S2: X);
1669}
1670
1671/// This a limited reassociation for a special case (see above) where we are
1672/// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1673/// This could be handled more generally in '-reassociation', but it seems like
1674/// an unlikely pattern for a large number of logic ops and fcmps.
1675static Instruction *reassociateFCmps(BinaryOperator &BO,
1676 InstCombiner::BuilderTy &Builder) {
1677 Instruction::BinaryOps Opcode = BO.getOpcode();
1678 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1679 "Expecting and/or op for fcmp transform");
1680
1681 // There are 4 commuted variants of the pattern. Canonicalize operands of this
1682 // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1683 Value *Op0 = BO.getOperand(i_nocapture: 0), *Op1 = BO.getOperand(i_nocapture: 1), *X;
1684 if (match(V: Op1, P: m_FCmp(L: m_Value(), R: m_AnyZeroFP())))
1685 std::swap(a&: Op0, b&: Op1);
1686
1687 // Match inner binop and the predicate for combining 2 NAN checks into 1.
1688 Value *BO10, *BO11;
1689 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1690 : FCmpInst::FCMP_UNO;
1691 if (!match(V: Op0, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: X), R: m_AnyZeroFP())) ||
1692 !match(V: Op1, P: m_BinOp(Opcode, L: m_Value(V&: BO10), R: m_Value(V&: BO11))))
1693 return nullptr;
1694
1695 // The inner logic op must have a matching fcmp operand.
1696 Value *Y;
1697 if (!match(V: BO10, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: Y), R: m_AnyZeroFP())) ||
1698 X->getType() != Y->getType())
1699 std::swap(a&: BO10, b&: BO11);
1700
1701 if (!match(V: BO10, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: Y), R: m_AnyZeroFP())) ||
1702 X->getType() != Y->getType())
1703 return nullptr;
1704
1705 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1706 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z
1707 // Intersect FMF from the 2 source fcmps.
1708 Value *NewFCmp =
1709 Builder.CreateFCmpFMF(P: NanPred, LHS: X, RHS: Y, FMFSource: FMFSource::intersect(A: Op0, B: BO10));
1710 return BinaryOperator::Create(Op: Opcode, S1: NewFCmp, S2: BO11);
1711}
1712
1713/// Match variations of De Morgan's Laws:
1714/// (~A & ~B) == (~(A | B))
1715/// (~A | ~B) == (~(A & B))
1716static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1717 InstCombiner &IC) {
1718 const Instruction::BinaryOps Opcode = I.getOpcode();
1719 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1720 "Trying to match De Morgan's Laws with something other than and/or");
1721
1722 // Flip the logic operation.
1723 const Instruction::BinaryOps FlippedOpcode =
1724 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1725
1726 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1727 Value *A, *B;
1728 if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: A)))) &&
1729 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: B)))) &&
1730 !IC.isFreeToInvert(V: A, WillInvertAllUses: A->hasOneUse()) &&
1731 !IC.isFreeToInvert(V: B, WillInvertAllUses: B->hasOneUse())) {
1732 Value *AndOr =
1733 IC.Builder.CreateBinOp(Opc: FlippedOpcode, LHS: A, RHS: B, Name: I.getName() + ".demorgan");
1734 return BinaryOperator::CreateNot(Op: AndOr);
1735 }
1736
1737 // The 'not' ops may require reassociation.
1738 // (A & ~B) & ~C --> A & ~(B | C)
1739 // (~B & A) & ~C --> A & ~(B | C)
1740 // (A | ~B) | ~C --> A | ~(B & C)
1741 // (~B | A) | ~C --> A | ~(B & C)
1742 Value *C;
1743 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(Opcode, L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))))) &&
1744 match(V: Op1, P: m_Not(V: m_Value(V&: C)))) {
1745 Value *FlippedBO = IC.Builder.CreateBinOp(Opc: FlippedOpcode, LHS: B, RHS: C);
1746 return BinaryOperator::Create(Op: Opcode, S1: A, S2: IC.Builder.CreateNot(V: FlippedBO));
1747 }
1748
1749 return nullptr;
1750}
1751
1752bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {
1753 Value *CastSrc = CI->getOperand(i_nocapture: 0);
1754
1755 // Noop casts and casts of constants should be eliminated trivially.
1756 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(Val: CastSrc))
1757 return false;
1758
1759 // If this cast is paired with another cast that can be eliminated, we prefer
1760 // to have it eliminated.
1761 if (const auto *PrecedingCI = dyn_cast<CastInst>(Val: CastSrc))
1762 if (isEliminableCastPair(CI1: PrecedingCI, CI2: CI))
1763 return false;
1764
1765 return true;
1766}
1767
1768/// Fold {and,or,xor} (cast X), C.
1769static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1770 InstCombinerImpl &IC) {
1771 Constant *C = dyn_cast<Constant>(Val: Logic.getOperand(i_nocapture: 1));
1772 if (!C)
1773 return nullptr;
1774
1775 auto LogicOpc = Logic.getOpcode();
1776 Type *DestTy = Logic.getType();
1777 Type *SrcTy = Cast->getSrcTy();
1778
1779 // Move the logic operation ahead of a zext or sext if the constant is
1780 // unchanged in the smaller source type. Performing the logic in a smaller
1781 // type may provide more information to later folds, and the smaller logic
1782 // instruction may be cheaper (particularly in the case of vectors).
1783 Value *X;
1784 auto &DL = IC.getDataLayout();
1785 if (match(V: Cast, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))) {
1786 PreservedCastFlags Flags;
1787 if (Constant *TruncC = getLosslessUnsignedTrunc(C, DestTy: SrcTy, DL, Flags: &Flags)) {
1788 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1789 Value *NewOp = IC.Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: TruncC);
1790 auto *ZExt = new ZExtInst(NewOp, DestTy);
1791 ZExt->setNonNeg(Flags.NNeg);
1792 ZExt->andIRFlags(V: Cast);
1793 return ZExt;
1794 }
1795 }
1796
1797 if (match(V: Cast, P: m_OneUse(SubPattern: m_SExtLike(Op: m_Value(V&: X))))) {
1798 if (Constant *TruncC = getLosslessSignedTrunc(C, DestTy: SrcTy, DL)) {
1799 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1800 Value *NewOp = IC.Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: TruncC);
1801 return new SExtInst(NewOp, DestTy);
1802 }
1803 }
1804
1805 return nullptr;
1806}
1807
1808/// Fold {and,or,xor} (cast X), Y.
1809Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1810 auto LogicOpc = I.getOpcode();
1811 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1812
1813 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1814
1815 // fold bitwise(A >> BW - 1, zext(icmp)) (BW is the scalar bits of the
1816 // type of A)
1817 // -> bitwise(zext(A < 0), zext(icmp))
1818 // -> zext(bitwise(A < 0, icmp))
1819 auto FoldBitwiseICmpZeroWithICmp = [&](Value *Op0,
1820 Value *Op1) -> Instruction * {
1821 Value *A;
1822 bool IsMatched =
1823 match(V: Op0,
1824 P: m_OneUse(SubPattern: m_LShr(
1825 L: m_Value(V&: A),
1826 R: m_SpecificInt(V: Op0->getType()->getScalarSizeInBits() - 1)))) &&
1827 match(V: Op1, P: m_OneUse(SubPattern: m_ZExt(Op: m_ICmp(L: m_Value(), R: m_Value()))));
1828
1829 if (!IsMatched)
1830 return nullptr;
1831
1832 auto *ICmpL =
1833 Builder.CreateICmpSLT(LHS: A, RHS: Constant::getNullValue(Ty: A->getType()));
1834 auto *ICmpR = cast<ZExtInst>(Val: Op1)->getOperand(i_nocapture: 0);
1835 auto *BitwiseOp = Builder.CreateBinOp(Opc: LogicOpc, LHS: ICmpL, RHS: ICmpR);
1836
1837 return new ZExtInst(BitwiseOp, Op0->getType());
1838 };
1839
1840 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1841 return Ret;
1842
1843 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1844 return Ret;
1845
1846 CastInst *Cast0 = dyn_cast<CastInst>(Val: Op0);
1847 if (!Cast0)
1848 return nullptr;
1849
1850 // This must be a cast from an integer or integer vector source type to allow
1851 // transformation of the logic operation to the source type.
1852 Type *DestTy = I.getType();
1853 Type *SrcTy = Cast0->getSrcTy();
1854 if (!SrcTy->isIntOrIntVectorTy())
1855 return nullptr;
1856
1857 if (Instruction *Ret = foldLogicCastConstant(Logic&: I, Cast: Cast0, IC&: *this))
1858 return Ret;
1859
1860 CastInst *Cast1 = dyn_cast<CastInst>(Val: Op1);
1861 if (!Cast1)
1862 return nullptr;
1863
1864 // Both operands of the logic operation are casts. The casts must be the
1865 // same kind for reduction.
1866 Instruction::CastOps CastOpcode = Cast0->getOpcode();
1867 if (CastOpcode != Cast1->getOpcode())
1868 return nullptr;
1869
1870 // Can't fold it profitably if no one of casts has one use.
1871 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
1872 return nullptr;
1873
1874 Value *X, *Y;
1875 if (match(V: Cast0, P: m_ZExtOrSExt(Op: m_Value(V&: X))) &&
1876 match(V: Cast1, P: m_ZExtOrSExt(Op: m_Value(V&: Y)))) {
1877 // Cast the narrower source to the wider source type.
1878 unsigned XNumBits = X->getType()->getScalarSizeInBits();
1879 unsigned YNumBits = Y->getType()->getScalarSizeInBits();
1880 if (XNumBits != YNumBits) {
1881 // Cast the narrower source to the wider source type only if both of casts
1882 // have one use to avoid creating an extra instruction.
1883 if (!Cast0->hasOneUse() || !Cast1->hasOneUse())
1884 return nullptr;
1885
1886 // If the source types do not match, but the casts are matching extends,
1887 // we can still narrow the logic op.
1888 if (XNumBits < YNumBits) {
1889 X = Builder.CreateCast(Op: CastOpcode, V: X, DestTy: Y->getType());
1890 } else if (YNumBits < XNumBits) {
1891 Y = Builder.CreateCast(Op: CastOpcode, V: Y, DestTy: X->getType());
1892 }
1893 }
1894
1895 // Do the logic op in the intermediate width, then widen more.
1896 Value *NarrowLogic = Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: Y, Name: I.getName());
1897 auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: &I);
1898 auto *NewDisjoint = dyn_cast<PossiblyDisjointInst>(Val: NarrowLogic);
1899 if (Disjoint && NewDisjoint)
1900 NewDisjoint->setIsDisjoint(Disjoint->isDisjoint());
1901 return CastInst::Create(CastOpcode, S: NarrowLogic, Ty: DestTy);
1902 }
1903
1904 // If the src type of casts are different, give up for other cast opcodes.
1905 if (SrcTy != Cast1->getSrcTy())
1906 return nullptr;
1907
1908 Value *Cast0Src = Cast0->getOperand(i_nocapture: 0);
1909 Value *Cast1Src = Cast1->getOperand(i_nocapture: 0);
1910
1911 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1912 if (shouldOptimizeCast(CI: Cast0) && shouldOptimizeCast(CI: Cast1)) {
1913 Value *NewOp = Builder.CreateBinOp(Opc: LogicOpc, LHS: Cast0Src, RHS: Cast1Src,
1914 Name: I.getName());
1915 return CastInst::Create(CastOpcode, S: NewOp, Ty: DestTy);
1916 }
1917
1918 return nullptr;
1919}
1920
1921static Instruction *foldAndToXor(BinaryOperator &I,
1922 InstCombiner::BuilderTy &Builder) {
1923 assert(I.getOpcode() == Instruction::And);
1924 Value *Op0 = I.getOperand(i_nocapture: 0);
1925 Value *Op1 = I.getOperand(i_nocapture: 1);
1926 Value *A, *B;
1927
1928 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1929 // (A | B) & ~(A & B) --> A ^ B
1930 // (A | B) & ~(B & A) --> A ^ B
1931 if (match(V: &I, P: m_BinOp(L: m_Or(L: m_Value(V&: A), R: m_Value(V&: B)),
1932 R: m_Not(V: m_c_And(L: m_Deferred(V: A), R: m_Deferred(V: B))))))
1933 return BinaryOperator::CreateXor(V1: A, V2: B);
1934
1935 // (A | ~B) & (~A | B) --> ~(A ^ B)
1936 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1937 // (~B | A) & (~A | B) --> ~(A ^ B)
1938 // (~B | A) & (B | ~A) --> ~(A ^ B)
1939 if (Op0->hasOneUse() || Op1->hasOneUse())
1940 if (match(V: &I, P: m_BinOp(L: m_c_Or(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
1941 R: m_c_Or(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
1942 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
1943
1944 return nullptr;
1945}
1946
1947static Instruction *foldOrToXor(BinaryOperator &I,
1948 InstCombiner::BuilderTy &Builder) {
1949 assert(I.getOpcode() == Instruction::Or);
1950 Value *Op0 = I.getOperand(i_nocapture: 0);
1951 Value *Op1 = I.getOperand(i_nocapture: 1);
1952 Value *A, *B;
1953
1954 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1955 // (A & B) | ~(A | B) --> ~(A ^ B)
1956 // (A & B) | ~(B | A) --> ~(A ^ B)
1957 if (Op0->hasOneUse() || Op1->hasOneUse())
1958 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
1959 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))))
1960 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
1961
1962 // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1963 // (A ^ B) | ~(A | B) --> ~(A & B)
1964 // (A ^ B) | ~(B | A) --> ~(A & B)
1965 if (Op0->hasOneUse() || Op1->hasOneUse())
1966 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
1967 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))))
1968 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
1969
1970 // (A & ~B) | (~A & B) --> A ^ B
1971 // (A & ~B) | (B & ~A) --> A ^ B
1972 // (~B & A) | (~A & B) --> A ^ B
1973 // (~B & A) | (B & ~A) --> A ^ B
1974 if (match(V: Op0, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) &&
1975 match(V: Op1, P: m_c_And(L: m_Not(V: m_Specific(V: A)), R: m_Specific(V: B))))
1976 return BinaryOperator::CreateXor(V1: A, V2: B);
1977
1978 return nullptr;
1979}
1980
1981/// Return true if a constant shift amount is always less than the specified
1982/// bit-width. If not, the shift could create poison in the narrower type.
1983static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1984 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth);
1985 return match(V: C, P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold));
1986}
1987
1988/// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1989/// a common zext operand: and (binop (zext X), C), (zext X).
1990Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {
1991 // This transform could also apply to {or, and, xor}, but there are better
1992 // folds for those cases, so we don't expect those patterns here. AShr is not
1993 // handled because it should always be transformed to LShr in this sequence.
1994 // The subtract transform is different because it has a constant on the left.
1995 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1996 Value *Op0 = And.getOperand(i_nocapture: 0), *Op1 = And.getOperand(i_nocapture: 1);
1997 Constant *C;
1998 if (!match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
1999 !match(V: Op0, P: m_OneUse(SubPattern: m_Mul(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
2000 !match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
2001 !match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
2002 !match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_Constant(C), R: m_Specific(V: Op1)))))
2003 return nullptr;
2004
2005 Value *X;
2006 if (!match(V: Op1, P: m_ZExt(Op: m_Value(V&: X))) || Op1->hasNUsesOrMore(N: 3))
2007 return nullptr;
2008
2009 Type *Ty = And.getType();
2010 if (!isa<VectorType>(Val: Ty) && !shouldChangeType(From: Ty, To: X->getType()))
2011 return nullptr;
2012
2013 // If we're narrowing a shift, the shift amount must be safe (less than the
2014 // width) in the narrower type. If the shift amount is greater, instsimplify
2015 // usually handles that case, but we can't guarantee/assert it.
2016 Instruction::BinaryOps Opc = cast<BinaryOperator>(Val: Op0)->getOpcode();
2017 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
2018 if (!canNarrowShiftAmt(C, BitWidth: X->getType()->getScalarSizeInBits()))
2019 return nullptr;
2020
2021 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
2022 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
2023 Value *NewC = ConstantExpr::getTrunc(C, Ty: X->getType());
2024 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, LHS: NewC, RHS: X)
2025 : Builder.CreateBinOp(Opc, LHS: X, RHS: NewC);
2026 return new ZExtInst(Builder.CreateAnd(LHS: NewBO, RHS: X), Ty);
2027}
2028
2029/// Try folding relatively complex patterns for both And and Or operations
2030/// with all And and Or swapped.
2031static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
2032 InstCombiner::BuilderTy &Builder) {
2033 const Instruction::BinaryOps Opcode = I.getOpcode();
2034 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
2035
2036 // Flip the logic operation.
2037 const Instruction::BinaryOps FlippedOpcode =
2038 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
2039
2040 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2041 Value *A, *B, *C, *X, *Y, *Dummy;
2042
2043 // Match following expressions:
2044 // (~(A | B) & C)
2045 // (~(A & B) | C)
2046 // Captures X = ~(A | B) or ~(A & B)
2047 const auto matchNotOrAnd =
2048 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
2049 Value *&X, bool CountUses = false) -> bool {
2050 if (CountUses && !Op->hasOneUse())
2051 return false;
2052
2053 if (match(Op,
2054 m_c_BinOp(FlippedOpcode,
2055 m_Value(X, m_Not(m_c_BinOp(Opcode, m_A, m_B))), m_C)))
2056 return !CountUses || X->hasOneUse();
2057
2058 return false;
2059 };
2060
2061 // (~(A | B) & C) | ... --> ...
2062 // (~(A & B) | C) & ... --> ...
2063 // TODO: One use checks are conservative. We just need to check that a total
2064 // number of multiple used values does not exceed reduction
2065 // in operations.
2066 if (matchNotOrAnd(Op0, m_Value(V&: A), m_Value(V&: B), m_Value(V&: C), X)) {
2067 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
2068 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
2069 if (matchNotOrAnd(Op1, m_Specific(V: A), m_Specific(V: C), m_Specific(V: B), Dummy,
2070 true)) {
2071 Value *Xor = Builder.CreateXor(LHS: B, RHS: C);
2072 return (Opcode == Instruction::Or)
2073 ? BinaryOperator::CreateAnd(V1: Xor, V2: Builder.CreateNot(V: A))
2074 : BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Xor, RHS: A));
2075 }
2076
2077 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
2078 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
2079 if (matchNotOrAnd(Op1, m_Specific(V: B), m_Specific(V: C), m_Specific(V: A), Dummy,
2080 true)) {
2081 Value *Xor = Builder.CreateXor(LHS: A, RHS: C);
2082 return (Opcode == Instruction::Or)
2083 ? BinaryOperator::CreateAnd(V1: Xor, V2: Builder.CreateNot(V: B))
2084 : BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Xor, RHS: B));
2085 }
2086
2087 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
2088 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
2089 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2090 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)))))))
2091 return BinaryOperator::CreateNot(Op: Builder.CreateBinOp(
2092 Opc: Opcode, LHS: Builder.CreateBinOp(Opc: FlippedOpcode, LHS: B, RHS: C), RHS: A));
2093
2094 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
2095 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
2096 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2097 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: B), R: m_Specific(V: C)))))))
2098 return BinaryOperator::CreateNot(Op: Builder.CreateBinOp(
2099 Opc: Opcode, LHS: Builder.CreateBinOp(Opc: FlippedOpcode, LHS: A, RHS: C), RHS: B));
2100
2101 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
2102 // Note, the pattern with swapped and/or is not handled because the
2103 // result is more undefined than a source:
2104 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
2105 if (Opcode == Instruction::Or && Op0->hasOneUse() &&
2106 match(V: Op1,
2107 P: m_OneUse(SubPattern: m_Not(V: m_Value(
2108 V&: Y, Match: m_c_BinOp(Opcode, L: m_Specific(V: C),
2109 R: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B)))))))) {
2110 // X = ~(A | B)
2111 // Y = (C | (A ^ B)
2112 Value *Or = cast<BinaryOperator>(Val: X)->getOperand(i_nocapture: 0);
2113 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Or, RHS: Y));
2114 }
2115 }
2116
2117 // (~A & B & C) | ... --> ...
2118 // (~A | B | C) | ... --> ...
2119 // TODO: One use checks are conservative. We just need to check that a total
2120 // number of multiple used values does not exceed reduction
2121 // in operations.
2122 if (match(V: Op0,
2123 P: m_OneUse(SubPattern: m_c_BinOp(Opcode: FlippedOpcode,
2124 L: m_BinOp(Opcode: FlippedOpcode, L: m_Value(V&: B), R: m_Value(V&: C)),
2125 R: m_Value(V&: X, Match: m_Not(V: m_Value(V&: A)))))) ||
2126 match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(Opcode: FlippedOpcode,
2127 L: m_c_BinOp(Opcode: FlippedOpcode, L: m_Value(V&: C),
2128 R: m_Value(V&: X, Match: m_Not(V: m_Value(V&: A)))),
2129 R: m_Value(V&: B))))) {
2130 // X = ~A
2131 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2132 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2133 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2134 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: B)),
2135 R: m_Specific(V: C))))) ||
2136 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2137 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: B), R: m_Specific(V: C)),
2138 R: m_Specific(V: A))))) ||
2139 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2140 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)),
2141 R: m_Specific(V: B)))))) {
2142 Value *Xor = Builder.CreateXor(LHS: B, RHS: C);
2143 return (Opcode == Instruction::Or)
2144 ? BinaryOperator::CreateNot(Op: Builder.CreateOr(LHS: Xor, RHS: A))
2145 : BinaryOperator::CreateOr(V1: Xor, V2: X);
2146 }
2147
2148 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2149 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2150 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2151 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: B)))))))
2152 return BinaryOperator::Create(
2153 Op: FlippedOpcode, S1: Builder.CreateBinOp(Opc: Opcode, LHS: C, RHS: Builder.CreateNot(V: B)),
2154 S2: X);
2155
2156 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2157 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2158 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2159 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)))))))
2160 return BinaryOperator::Create(
2161 Op: FlippedOpcode, S1: Builder.CreateBinOp(Opc: Opcode, LHS: B, RHS: Builder.CreateNot(V: C)),
2162 S2: X);
2163 }
2164
2165 return nullptr;
2166}
2167
2168/// Try to reassociate a pair of binops so that values with one use only are
2169/// part of the same instruction. This may enable folds that are limited with
2170/// multi-use restrictions and makes it more likely to match other patterns that
2171/// are looking for a common operand.
2172static Instruction *reassociateForUses(BinaryOperator &BO,
2173 InstCombinerImpl::BuilderTy &Builder) {
2174 Instruction::BinaryOps Opcode = BO.getOpcode();
2175 Value *X, *Y, *Z;
2176 if (match(V: &BO,
2177 P: m_c_BinOp(Opcode, L: m_OneUse(SubPattern: m_BinOp(Opcode, L: m_Value(V&: X), R: m_Value(V&: Y))),
2178 R: m_OneUse(SubPattern: m_Value(V&: Z))))) {
2179 if (!isa<Constant>(Val: X) && !isa<Constant>(Val: Y) && !isa<Constant>(Val: Z)) {
2180 // (X op Y) op Z --> (Y op Z) op X
2181 if (!X->hasOneUse()) {
2182 Value *YZ = Builder.CreateBinOp(Opc: Opcode, LHS: Y, RHS: Z);
2183 return BinaryOperator::Create(Op: Opcode, S1: YZ, S2: X);
2184 }
2185 // (X op Y) op Z --> (X op Z) op Y
2186 if (!Y->hasOneUse()) {
2187 Value *XZ = Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Z);
2188 return BinaryOperator::Create(Op: Opcode, S1: XZ, S2: Y);
2189 }
2190 }
2191 }
2192
2193 return nullptr;
2194}
2195
2196// Match
2197// (X + C2) | C
2198// (X + C2) ^ C
2199// (X + C2) & C
2200// and convert to do the bitwise logic first:
2201// (X | C) + C2
2202// (X ^ C) + C2
2203// (X & C) + C2
2204// iff bits affected by logic op are lower than last bit affected by math op
2205static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
2206 InstCombiner::BuilderTy &Builder) {
2207 Type *Ty = I.getType();
2208 Instruction::BinaryOps OpC = I.getOpcode();
2209 Value *Op0 = I.getOperand(i_nocapture: 0);
2210 Value *Op1 = I.getOperand(i_nocapture: 1);
2211 Value *X;
2212 const APInt *C, *C2;
2213
2214 if (!(match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C2)))) &&
2215 match(V: Op1, P: m_APInt(Res&: C))))
2216 return nullptr;
2217
2218 unsigned Width = Ty->getScalarSizeInBits();
2219 unsigned LastOneMath = Width - C2->countr_zero();
2220
2221 switch (OpC) {
2222 case Instruction::And:
2223 if (C->countl_one() < LastOneMath)
2224 return nullptr;
2225 break;
2226 case Instruction::Xor:
2227 case Instruction::Or:
2228 if (C->countl_zero() < LastOneMath)
2229 return nullptr;
2230 break;
2231 default:
2232 llvm_unreachable("Unexpected BinaryOp!");
2233 }
2234
2235 Value *NewBinOp = Builder.CreateBinOp(Opc: OpC, LHS: X, RHS: ConstantInt::get(Ty, V: *C));
2236 return BinaryOperator::CreateWithCopiedFlags(Opc: Instruction::Add, V1: NewBinOp,
2237 V2: ConstantInt::get(Ty, V: *C2), CopyO: Op0);
2238}
2239
2240// binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2241// shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2242// where both shifts are the same and AddC is a valid shift amount.
2243Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {
2244 assert((I.isBitwiseLogicOp() || I.getOpcode() == Instruction::Add) &&
2245 "Unexpected opcode");
2246
2247 Value *ShAmt;
2248 Constant *ShiftedC1, *ShiftedC2, *AddC;
2249 Type *Ty = I.getType();
2250 unsigned BitWidth = Ty->getScalarSizeInBits();
2251 if (!match(V: &I, P: m_c_BinOp(L: m_Shift(L: m_ImmConstant(C&: ShiftedC1), R: m_Value(V&: ShAmt)),
2252 R: m_Shift(L: m_ImmConstant(C&: ShiftedC2),
2253 R: m_AddLike(L: m_Deferred(V: ShAmt),
2254 R: m_ImmConstant(C&: AddC))))))
2255 return nullptr;
2256
2257 // Make sure the add constant is a valid shift amount.
2258 if (!match(V: AddC,
2259 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(BitWidth, BitWidth))))
2260 return nullptr;
2261
2262 // Avoid constant expressions.
2263 auto *Op0Inst = dyn_cast<Instruction>(Val: I.getOperand(i_nocapture: 0));
2264 auto *Op1Inst = dyn_cast<Instruction>(Val: I.getOperand(i_nocapture: 1));
2265 if (!Op0Inst || !Op1Inst)
2266 return nullptr;
2267
2268 // Both shifts must be the same.
2269 Instruction::BinaryOps ShiftOp =
2270 static_cast<Instruction::BinaryOps>(Op0Inst->getOpcode());
2271 if (ShiftOp != Op1Inst->getOpcode())
2272 return nullptr;
2273
2274 // For adds, only left shifts are supported.
2275 if (I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2276 return nullptr;
2277
2278 Value *NewC = Builder.CreateBinOp(
2279 Opc: I.getOpcode(), LHS: ShiftedC1, RHS: Builder.CreateBinOp(Opc: ShiftOp, LHS: ShiftedC2, RHS: AddC));
2280 return BinaryOperator::Create(Op: ShiftOp, S1: NewC, S2: ShAmt);
2281}
2282
2283// Fold and/or/xor with two equal intrinsic IDs:
2284// bitwise(fshl (A, B, ShAmt), fshl(C, D, ShAmt))
2285// -> fshl(bitwise(A, C), bitwise(B, D), ShAmt)
2286// bitwise(fshr (A, B, ShAmt), fshr(C, D, ShAmt))
2287// -> fshr(bitwise(A, C), bitwise(B, D), ShAmt)
2288// bitwise(bswap(A), bswap(B)) -> bswap(bitwise(A, B))
2289// bitwise(bswap(A), C) -> bswap(bitwise(A, bswap(C)))
2290// bitwise(bitreverse(A), bitreverse(B)) -> bitreverse(bitwise(A, B))
2291// bitwise(bitreverse(A), C) -> bitreverse(bitwise(A, bitreverse(C)))
2292static Instruction *
2293foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
2294 InstCombiner::BuilderTy &Builder) {
2295 assert(I.isBitwiseLogicOp() && "Should and/or/xor");
2296 if (!I.getOperand(i_nocapture: 0)->hasOneUse())
2297 return nullptr;
2298 IntrinsicInst *X = dyn_cast<IntrinsicInst>(Val: I.getOperand(i_nocapture: 0));
2299 if (!X)
2300 return nullptr;
2301
2302 IntrinsicInst *Y = dyn_cast<IntrinsicInst>(Val: I.getOperand(i_nocapture: 1));
2303 if (Y && (!Y->hasOneUse() || X->getIntrinsicID() != Y->getIntrinsicID()))
2304 return nullptr;
2305
2306 Intrinsic::ID IID = X->getIntrinsicID();
2307 const APInt *RHSC;
2308 // Try to match constant RHS.
2309 if (!Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2310 !match(V: I.getOperand(i_nocapture: 1), P: m_APInt(Res&: RHSC))))
2311 return nullptr;
2312
2313 switch (IID) {
2314 case Intrinsic::fshl:
2315 case Intrinsic::fshr: {
2316 if (X->getOperand(i_nocapture: 2) != Y->getOperand(i_nocapture: 2))
2317 return nullptr;
2318 Value *NewOp0 =
2319 Builder.CreateBinOp(Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 0), RHS: Y->getOperand(i_nocapture: 0));
2320 Value *NewOp1 =
2321 Builder.CreateBinOp(Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 1), RHS: Y->getOperand(i_nocapture: 1));
2322 Function *F =
2323 Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: IID, Tys: I.getType());
2324 return CallInst::Create(Func: F, Args: {NewOp0, NewOp1, X->getOperand(i_nocapture: 2)});
2325 }
2326 case Intrinsic::bswap:
2327 case Intrinsic::bitreverse: {
2328 Value *NewOp0 = Builder.CreateBinOp(
2329 Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 0),
2330 RHS: Y ? Y->getOperand(i_nocapture: 0)
2331 : ConstantInt::get(Ty: I.getType(), V: IID == Intrinsic::bswap
2332 ? RHSC->byteSwap()
2333 : RHSC->reverseBits()));
2334 Function *F =
2335 Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: IID, Tys: I.getType());
2336 return CallInst::Create(Func: F, Args: {NewOp0});
2337 }
2338 default:
2339 return nullptr;
2340 }
2341}
2342
2343// Try to simplify V by replacing occurrences of Op with RepOp, but only look
2344// through bitwise operations. In particular, for X | Y we try to replace Y with
2345// 0 inside X and for X & Y we try to replace Y with -1 inside X.
2346// Return the simplified result of X if successful, and nullptr otherwise.
2347// If SimplifyOnly is true, no new instructions will be created.
2348static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp,
2349 bool SimplifyOnly,
2350 InstCombinerImpl &IC,
2351 unsigned Depth = 0) {
2352 if (Op == RepOp)
2353 return nullptr;
2354
2355 if (V == Op)
2356 return RepOp;
2357
2358 auto *I = dyn_cast<BinaryOperator>(Val: V);
2359 if (!I || !I->isBitwiseLogicOp() || Depth >= 3)
2360 return nullptr;
2361
2362 if (!I->hasOneUse())
2363 SimplifyOnly = true;
2364
2365 Value *NewOp0 = simplifyAndOrWithOpReplaced(V: I->getOperand(i_nocapture: 0), Op, RepOp,
2366 SimplifyOnly, IC, Depth: Depth + 1);
2367 Value *NewOp1 = simplifyAndOrWithOpReplaced(V: I->getOperand(i_nocapture: 1), Op, RepOp,
2368 SimplifyOnly, IC, Depth: Depth + 1);
2369 if (!NewOp0 && !NewOp1)
2370 return nullptr;
2371
2372 if (!NewOp0)
2373 NewOp0 = I->getOperand(i_nocapture: 0);
2374 if (!NewOp1)
2375 NewOp1 = I->getOperand(i_nocapture: 1);
2376
2377 if (Value *Res = simplifyBinOp(Opcode: I->getOpcode(), LHS: NewOp0, RHS: NewOp1,
2378 Q: IC.getSimplifyQuery().getWithInstruction(I)))
2379 return Res;
2380
2381 if (SimplifyOnly)
2382 return nullptr;
2383 return IC.Builder.CreateBinOp(Opc: I->getOpcode(), LHS: NewOp0, RHS: NewOp1);
2384}
2385
2386/// Reassociate and/or expressions to see if we can fold the inner and/or ops.
2387/// TODO: Make this recursive; it's a little tricky because an arbitrary
2388/// number of and/or instructions might have to be created.
2389Value *InstCombinerImpl::reassociateBooleanAndOr(Value *LHS, Value *X, Value *Y,
2390 Instruction &I, bool IsAnd,
2391 bool RHSIsLogical) {
2392 Instruction::BinaryOps Opcode = IsAnd ? Instruction::And : Instruction::Or;
2393 Value *Folded = nullptr;
2394 // LHS bop (X lop Y) --> (LHS bop X) lop Y
2395 // LHS bop (X bop Y) --> (LHS bop X) bop Y
2396 if (Value *Res = foldBooleanAndOr(LHS, RHS: X, I, IsAnd, /*IsLogical=*/false))
2397 Folded = RHSIsLogical ? Builder.CreateLogicalOp(Opc: Opcode, Cond1: Res, Cond2: Y)
2398 : Builder.CreateBinOp(Opc: Opcode, LHS: Res, RHS: Y);
2399 // LHS bop (X bop Y) --> X bop (LHS bop Y)
2400 // LHS bop (X lop Y) --> X lop (LHS bop Y)
2401 else if (Value *Res = foldBooleanAndOr(LHS, RHS: Y, I, IsAnd, /*IsLogical=*/false))
2402 Folded = RHSIsLogical ? Builder.CreateLogicalOp(Opc: Opcode, Cond1: X, Cond2: Res)
2403 : Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Res);
2404 if (SelectInst *SI = dyn_cast_or_null<SelectInst>(Val: Folded);
2405 SI != nullptr && !ProfcheckDisableMetadataFixes)
2406 // If the bop I was originally a lop, we could recover branch weight
2407 // information using that lop's weights. However, InstCombine usually
2408 // replaces the lop with a bop by the time we get here, deleting the branch
2409 // weight information. Therefore, we can only assume unknown branch weights.
2410 // TODO: see if it's possible to recover branch weight information from the
2411 // original lop (https://github.com/llvm/llvm-project/issues/183864).
2412 setExplicitlyUnknownBranchWeightsIfProfiled(I&: *SI, DEBUG_TYPE,
2413 F: I.getFunction());
2414 return Folded;
2415}
2416
2417// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2418// here. We should standardize that construct where it is needed or choose some
2419// other way to ensure that commutated variants of patterns are not missed.
2420Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2421 Type *Ty = I.getType();
2422
2423 if (Value *V = simplifyAndInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
2424 Q: SQ.getWithInstruction(I: &I)))
2425 return replaceInstUsesWith(I, V);
2426
2427 if (SimplifyAssociativeOrCommutative(I))
2428 return &I;
2429
2430 if (Instruction *X = foldVectorBinop(Inst&: I))
2431 return X;
2432
2433 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
2434 return Phi;
2435
2436 // See if we can simplify any instructions used by the instruction whose sole
2437 // purpose is to compute bits we don't care about.
2438 if (SimplifyDemandedInstructionBits(Inst&: I))
2439 return &I;
2440
2441 // Do this before using distributive laws to catch simple and/or/not patterns.
2442 if (Instruction *Xor = foldAndToXor(I, Builder))
2443 return Xor;
2444
2445 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
2446 return X;
2447
2448 // (A|B)&(A|C) -> A|(B&C) etc
2449 if (Value *V = foldUsingDistributiveLaws(I))
2450 return replaceInstUsesWith(I, V);
2451
2452 if (Instruction *R = foldBinOpShiftWithShift(I))
2453 return R;
2454
2455 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2456
2457 Value *X, *Y;
2458 const APInt *C;
2459 if ((match(V: Op0, P: m_OneUse(SubPattern: m_LogicalShift(L: m_One(), R: m_Value(V&: X)))) ||
2460 (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_APInt(Res&: C), R: m_Value(V&: X)))) && (*C)[0])) &&
2461 match(V: Op1, P: m_One())) {
2462 // (1 >> X) & 1 --> zext(X == 0)
2463 // (C << X) & 1 --> zext(X == 0), when C is odd
2464 Value *IsZero = Builder.CreateICmpEQ(LHS: X, RHS: ConstantInt::get(Ty, V: 0));
2465 return new ZExtInst(IsZero, Ty);
2466 }
2467
2468 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2469 Value *Neg;
2470 if (match(V: &I,
2471 P: m_c_And(L: m_Value(V&: Neg, Match: m_OneUse(SubPattern: m_Neg(V: m_And(L: m_Value(), R: m_One())))),
2472 R: m_Value(V&: Y)))) {
2473 Value *Cmp = Builder.CreateIsNull(Arg: Neg);
2474 return createSelectInstWithUnknownProfile(C: Cmp,
2475 S1: ConstantInt::getNullValue(Ty), S2: Y);
2476 }
2477
2478 // Canonicalize:
2479 // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
2480 if (match(V: &I, P: m_c_And(L: m_Value(V&: Y), R: m_OneUse(SubPattern: m_CombineOr(
2481 L: m_c_Add(L: m_Value(V&: X), R: m_Deferred(V: Y)),
2482 R: m_Sub(L: m_Value(V&: X), R: m_Deferred(V: Y)))))) &&
2483 isKnownToBeAPowerOfTwo(V: Y, /*OrZero*/ true, CxtI: &I))
2484 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: X), V2: Y);
2485
2486 if (match(V: Op1, P: m_APInt(Res&: C))) {
2487 const APInt *XorC;
2488 if (match(V: Op0, P: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: XorC))))) {
2489 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2490 Constant *NewC = ConstantInt::get(Ty, V: *C & *XorC);
2491 Value *And = Builder.CreateAnd(LHS: X, RHS: Op1);
2492 And->takeName(V: Op0);
2493 return BinaryOperator::CreateXor(V1: And, V2: NewC);
2494 }
2495
2496 const APInt *OrC;
2497 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: OrC))))) {
2498 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2499 // NOTE: This reduces the number of bits set in the & mask, which
2500 // can expose opportunities for store narrowing for scalars.
2501 // NOTE: SimplifyDemandedBits should have already removed bits from C1
2502 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2503 // above, but this feels safer.
2504 APInt Together = *C & *OrC;
2505 Value *And = Builder.CreateAnd(LHS: X, RHS: ConstantInt::get(Ty, V: Together ^ *C));
2506 And->takeName(V: Op0);
2507 return BinaryOperator::CreateOr(V1: And, V2: ConstantInt::get(Ty, V: Together));
2508 }
2509
2510 unsigned Width = Ty->getScalarSizeInBits();
2511 const APInt *ShiftC;
2512 if (match(V: Op0, P: m_OneUse(SubPattern: m_SExt(Op: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShiftC))))) &&
2513 ShiftC->ult(RHS: Width)) {
2514 if (*C == APInt::getLowBitsSet(numBits: Width, loBitsSet: Width - ShiftC->getZExtValue())) {
2515 // We are clearing high bits that were potentially set by sext+ashr:
2516 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2517 Value *Sext = Builder.CreateSExt(V: X, DestTy: Ty);
2518 Constant *ShAmtC = ConstantInt::get(Ty, V: ShiftC->zext(width: Width));
2519 return BinaryOperator::CreateLShr(V1: Sext, V2: ShAmtC);
2520 }
2521 }
2522
2523 // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2524 // and (ashr X, ShiftC), C --> lshr X, ShiftC
2525 if (match(V: Op0, P: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShiftC))) && ShiftC->ult(RHS: Width) &&
2526 C->isMask(numBits: Width - ShiftC->getZExtValue()))
2527 return BinaryOperator::CreateLShr(V1: X, V2: ConstantInt::get(Ty, V: *ShiftC));
2528
2529 const APInt *AddC;
2530 if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: AddC)))) {
2531 // If we are masking the result of the add down to exactly one bit and
2532 // the constant we are adding has no bits set below that bit, then the
2533 // add is flipping a single bit. Example:
2534 // (X + 4) & 4 --> (X & 4) ^ 4
2535 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) {
2536 assert((*C & *AddC) != 0 && "Expected common bit");
2537 Value *NewAnd = Builder.CreateAnd(LHS: X, RHS: Op1);
2538 return BinaryOperator::CreateXor(V1: NewAnd, V2: Op1);
2539 }
2540 }
2541
2542 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2543 // bitwidth of X and OP behaves well when given trunc(C1) and X.
2544 auto isNarrowableBinOpcode = [](BinaryOperator *B) {
2545 switch (B->getOpcode()) {
2546 case Instruction::Xor:
2547 case Instruction::Or:
2548 case Instruction::Mul:
2549 case Instruction::Add:
2550 case Instruction::Sub:
2551 return true;
2552 default:
2553 return false;
2554 }
2555 };
2556 BinaryOperator *BO;
2557 if (match(V: Op0, P: m_OneUse(SubPattern: m_BinOp(I&: BO))) && isNarrowableBinOpcode(BO)) {
2558 Instruction::BinaryOps BOpcode = BO->getOpcode();
2559 Value *X;
2560 const APInt *C1;
2561 // TODO: The one-use restrictions could be relaxed a little if the AND
2562 // is going to be removed.
2563 // Try to narrow the 'and' and a binop with constant operand:
2564 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2565 if (match(V: BO, P: m_c_BinOp(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))), R: m_APInt(Res&: C1))) &&
2566 C->isIntN(N: X->getType()->getScalarSizeInBits())) {
2567 unsigned XWidth = X->getType()->getScalarSizeInBits();
2568 Constant *TruncC1 = ConstantInt::get(Ty: X->getType(), V: C1->trunc(width: XWidth));
2569 Value *BinOp = isa<ZExtInst>(Val: BO->getOperand(i_nocapture: 0))
2570 ? Builder.CreateBinOp(Opc: BOpcode, LHS: X, RHS: TruncC1)
2571 : Builder.CreateBinOp(Opc: BOpcode, LHS: TruncC1, RHS: X);
2572 Constant *TruncC = ConstantInt::get(Ty: X->getType(), V: C->trunc(width: XWidth));
2573 Value *And = Builder.CreateAnd(LHS: BinOp, RHS: TruncC);
2574 return new ZExtInst(And, Ty);
2575 }
2576
2577 // Similar to above: if the mask matches the zext input width, then the
2578 // 'and' can be eliminated, so we can truncate the other variable op:
2579 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2580 if (isa<Instruction>(Val: BO->getOperand(i_nocapture: 0)) &&
2581 match(V: BO->getOperand(i_nocapture: 0), P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
2582 C->isMask(numBits: X->getType()->getScalarSizeInBits())) {
2583 Y = BO->getOperand(i_nocapture: 1);
2584 Value *TrY = Builder.CreateTrunc(V: Y, DestTy: X->getType(), Name: Y->getName() + ".tr");
2585 Value *NewBO =
2586 Builder.CreateBinOp(Opc: BOpcode, LHS: X, RHS: TrY, Name: BO->getName() + ".narrow");
2587 return new ZExtInst(NewBO, Ty);
2588 }
2589 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2590 if (isa<Instruction>(Val: BO->getOperand(i_nocapture: 1)) &&
2591 match(V: BO->getOperand(i_nocapture: 1), P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
2592 C->isMask(numBits: X->getType()->getScalarSizeInBits())) {
2593 Y = BO->getOperand(i_nocapture: 0);
2594 Value *TrY = Builder.CreateTrunc(V: Y, DestTy: X->getType(), Name: Y->getName() + ".tr");
2595 Value *NewBO =
2596 Builder.CreateBinOp(Opc: BOpcode, LHS: TrY, RHS: X, Name: BO->getName() + ".narrow");
2597 return new ZExtInst(NewBO, Ty);
2598 }
2599 }
2600
2601 // This is intentionally placed after the narrowing transforms for
2602 // efficiency (transform directly to the narrow logic op if possible).
2603 // If the mask is only needed on one incoming arm, push the 'and' op up.
2604 if (match(V: Op0, P: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y)))) ||
2605 match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2606 APInt NotAndMask(~(*C));
2607 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Val: Op0)->getOpcode();
2608 if (MaskedValueIsZero(V: X, Mask: NotAndMask, CxtI: &I)) {
2609 // Not masking anything out for the LHS, move mask to RHS.
2610 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2611 Value *NewRHS = Builder.CreateAnd(LHS: Y, RHS: Op1, Name: Y->getName() + ".masked");
2612 return BinaryOperator::Create(Op: BinOp, S1: X, S2: NewRHS);
2613 }
2614 if (!isa<Constant>(Val: Y) && MaskedValueIsZero(V: Y, Mask: NotAndMask, CxtI: &I)) {
2615 // Not masking anything out for the RHS, move mask to LHS.
2616 // and ({x}or X, Y), C --> {x}or (and X, C), Y
2617 Value *NewLHS = Builder.CreateAnd(LHS: X, RHS: Op1, Name: X->getName() + ".masked");
2618 return BinaryOperator::Create(Op: BinOp, S1: NewLHS, S2: Y);
2619 }
2620 }
2621
2622 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2623 // constant, test if the shift amount equals the offset bit index:
2624 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2625 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2626 if (C->isPowerOf2() &&
2627 match(V: Op0, P: m_OneUse(SubPattern: m_LogicalShift(L: m_Power2(V&: ShiftC), R: m_Value(V&: X))))) {
2628 int Log2ShiftC = ShiftC->exactLogBase2();
2629 int Log2C = C->exactLogBase2();
2630 bool IsShiftLeft =
2631 cast<BinaryOperator>(Val: Op0)->getOpcode() == Instruction::Shl;
2632 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2633 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
2634 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: ConstantInt::get(Ty, V: BitNum));
2635 return createSelectInstWithUnknownProfile(C: Cmp, S1: ConstantInt::get(Ty, V: *C),
2636 S2: ConstantInt::getNullValue(Ty));
2637 }
2638
2639 Constant *C1, *C2;
2640 const APInt *C3 = C;
2641 Value *X;
2642 if (C3->isPowerOf2()) {
2643 Constant *Log2C3 = ConstantInt::get(Ty, V: C3->countr_zero());
2644 if (match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Shl(L: m_ImmConstant(C&: C1), R: m_Value(V&: X)),
2645 R: m_ImmConstant(C&: C2)))) &&
2646 match(V: C1, P: m_Power2())) {
2647 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C: C1);
2648 Constant *LshrC = ConstantExpr::getAdd(C1: C2, C2: Log2C3);
2649 KnownBits KnownLShrc = computeKnownBits(V: LshrC, CxtI: nullptr);
2650 if (KnownLShrc.getMaxValue().ult(RHS: Width)) {
2651 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2652 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2653 Constant *CmpC = ConstantExpr::getSub(C1: LshrC, C2: Log2C1);
2654 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: CmpC);
2655 return createSelectInstWithUnknownProfile(
2656 C: Cmp, S1: ConstantInt::get(Ty, V: *C3), S2: ConstantInt::getNullValue(Ty));
2657 }
2658 }
2659
2660 if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_LShr(L: m_ImmConstant(C&: C1), R: m_Value(V&: X)),
2661 R: m_ImmConstant(C&: C2)))) &&
2662 match(V: C1, P: m_Power2())) {
2663 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C: C1);
2664 Constant *Cmp =
2665 ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_ULT, LHS: Log2C3, RHS: C2, DL);
2666 if (Cmp && Cmp->isNullValue()) {
2667 // iff C1,C3 is pow2 and Log2(C3) >= C2:
2668 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2669 Constant *ShlC = ConstantExpr::getAdd(C1: C2, C2: Log2C1);
2670 Constant *CmpC = ConstantExpr::getSub(C1: ShlC, C2: Log2C3);
2671 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: CmpC);
2672 return createSelectInstWithUnknownProfile(
2673 C: Cmp, S1: ConstantInt::get(Ty, V: *C3), S2: ConstantInt::getNullValue(Ty));
2674 }
2675 }
2676 }
2677 }
2678
2679 // If we are clearing the sign bit of a floating-point value, convert this to
2680 // fabs, then cast back to integer.
2681 //
2682 // This is a generous interpretation for noimplicitfloat, this is not a true
2683 // floating-point operation.
2684 //
2685 // Assumes any IEEE-represented type has the sign bit in the high bit.
2686 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2687 Value *CastOp;
2688 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
2689 match(V: Op1, P: m_MaxSignedValue()) &&
2690 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2691 Kind: Attribute::NoImplicitFloat)) {
2692 Type *EltTy = CastOp->getType()->getScalarType();
2693 if (EltTy->isFloatingPointTy() &&
2694 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
2695 Value *FAbs = Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: CastOp);
2696 return new BitCastInst(FAbs, I.getType());
2697 }
2698 }
2699
2700 // and(shl(zext(X), Y), SignMask) -> and(sext(X), SignMask)
2701 // where Y is a valid shift amount.
2702 if (match(V: &I, P: m_And(L: m_OneUse(SubPattern: m_Shl(L: m_ZExt(Op: m_Value(V&: X)), R: m_Value(V&: Y))),
2703 R: m_SignMask())) &&
2704 match(V: Y, P: m_SpecificInt_ICMP(
2705 Predicate: ICmpInst::Predicate::ICMP_EQ,
2706 Threshold: APInt(Ty->getScalarSizeInBits(),
2707 Ty->getScalarSizeInBits() -
2708 X->getType()->getScalarSizeInBits())))) {
2709 auto *SExt = Builder.CreateSExt(V: X, DestTy: Ty, Name: X->getName() + ".signext");
2710 return BinaryOperator::CreateAnd(V1: SExt, V2: Op1);
2711 }
2712
2713 if (Instruction *Z = narrowMaskedBinOp(And&: I))
2714 return Z;
2715
2716 if (I.getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2717 if (auto *SI0 = dyn_cast<SelectInst>(Val: Op0)) {
2718 if (auto *R =
2719 foldAndOrOfSelectUsingImpliedCond(Op: Op1, SI&: *SI0, /* IsAnd */ true))
2720 return R;
2721 }
2722 if (auto *SI1 = dyn_cast<SelectInst>(Val: Op1)) {
2723 if (auto *R =
2724 foldAndOrOfSelectUsingImpliedCond(Op: Op0, SI&: *SI1, /* IsAnd */ true))
2725 return R;
2726 }
2727 }
2728
2729 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2730 return FoldedLogic;
2731
2732 if (Instruction *DeMorgan = matchDeMorgansLaws(I, IC&: *this))
2733 return DeMorgan;
2734
2735 {
2736 Value *A, *B, *C;
2737 // A & ~(A ^ B) --> A & B
2738 if (match(V: Op1, P: m_Not(V: m_c_Xor(L: m_Specific(V: Op0), R: m_Value(V&: B)))))
2739 return BinaryOperator::CreateAnd(V1: Op0, V2: B);
2740 // ~(A ^ B) & A --> A & B
2741 if (match(V: Op0, P: m_Not(V: m_c_Xor(L: m_Specific(V: Op1), R: m_Value(V&: B)))))
2742 return BinaryOperator::CreateAnd(V1: Op1, V2: B);
2743
2744 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2745 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
2746 match(V: Op1, P: m_Xor(L: m_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A)))) {
2747 Value *NotC = Op1->hasOneUse()
2748 ? Builder.CreateNot(V: C)
2749 : getFreelyInverted(V: C, WillInvertAllUses: C->hasOneUse(), Builder: &Builder);
2750 if (NotC != nullptr)
2751 return BinaryOperator::CreateAnd(V1: Op0, V2: NotC);
2752 }
2753
2754 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2755 if (match(V: Op0, P: m_Xor(L: m_Xor(L: m_Value(V&: A), R: m_Value(V&: C)), R: m_Value(V&: B))) &&
2756 match(V: Op1, P: m_Xor(L: m_Specific(V: B), R: m_Specific(V: A)))) {
2757 Value *NotC = Op0->hasOneUse()
2758 ? Builder.CreateNot(V: C)
2759 : getFreelyInverted(V: C, WillInvertAllUses: C->hasOneUse(), Builder: &Builder);
2760 if (NotC != nullptr)
2761 return BinaryOperator::CreateAnd(V1: Op1, V2: Builder.CreateNot(V: C));
2762 }
2763
2764 // (A | B) & (~A ^ B) -> A & B
2765 // (A | B) & (B ^ ~A) -> A & B
2766 // (B | A) & (~A ^ B) -> A & B
2767 // (B | A) & (B ^ ~A) -> A & B
2768 if (match(V: Op1, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2769 match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2770 return BinaryOperator::CreateAnd(V1: A, V2: B);
2771
2772 // (~A ^ B) & (A | B) -> A & B
2773 // (~A ^ B) & (B | A) -> A & B
2774 // (B ^ ~A) & (A | B) -> A & B
2775 // (B ^ ~A) & (B | A) -> A & B
2776 if (match(V: Op0, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2777 match(V: Op1, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2778 return BinaryOperator::CreateAnd(V1: A, V2: B);
2779
2780 // (~A | B) & (A ^ B) -> ~A & B
2781 // (~A | B) & (B ^ A) -> ~A & B
2782 // (B | ~A) & (A ^ B) -> ~A & B
2783 // (B | ~A) & (B ^ A) -> ~A & B
2784 if (match(V: Op0, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2785 match(V: Op1, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2786 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: A), V2: B);
2787
2788 // (A ^ B) & (~A | B) -> ~A & B
2789 // (B ^ A) & (~A | B) -> ~A & B
2790 // (A ^ B) & (B | ~A) -> ~A & B
2791 // (B ^ A) & (B | ~A) -> ~A & B
2792 if (match(V: Op1, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2793 match(V: Op0, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2794 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: A), V2: B);
2795 }
2796
2797 if (Value *Res =
2798 foldBooleanAndOr(LHS: Op0, RHS: Op1, I, /*IsAnd=*/true, /*IsLogical=*/false))
2799 return replaceInstUsesWith(I, V: Res);
2800
2801 if (match(V: Op1, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2802 bool IsLogical = isa<SelectInst>(Val: Op1);
2803 if (auto *V = reassociateBooleanAndOr(LHS: Op0, X, Y, I, /*IsAnd=*/true,
2804 /*RHSIsLogical=*/IsLogical))
2805 return replaceInstUsesWith(I, V);
2806 }
2807 if (match(V: Op0, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2808 bool IsLogical = isa<SelectInst>(Val: Op0);
2809 if (auto *V = reassociateBooleanAndOr(LHS: Op1, X, Y, I, /*IsAnd=*/true,
2810 /*RHSIsLogical=*/IsLogical))
2811 return replaceInstUsesWith(I, V);
2812 }
2813
2814 if (Instruction *FoldedFCmps = reassociateFCmps(BO&: I, Builder))
2815 return FoldedFCmps;
2816
2817 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
2818 return CastedAnd;
2819
2820 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
2821 return Sel;
2822
2823 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2824 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2825 // with binop identity constant. But creating a select with non-constant
2826 // arm may not be reversible due to poison semantics. Is that a good
2827 // canonicalization?
2828 Value *A, *B;
2829 if (match(V: &I, P: m_c_And(L: m_SExt(Op: m_Value(V&: A)), R: m_Value(V&: B))) &&
2830 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2831 return createSelectInstWithUnknownProfile(C: A, S1: B, S2: Constant::getNullValue(Ty));
2832
2833 // Similarly, a 'not' of the bool translates to a swap of the select arms:
2834 // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2835 if (match(V: &I, P: m_c_And(L: m_Not(V: m_SExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
2836 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2837 return createSelectInstWithUnknownProfile(C: A, S1: Constant::getNullValue(Ty), S2: B);
2838
2839 // and(zext(A), B) -> A ? (B & 1) : 0
2840 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
2841 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2842 return createSelectInstWithUnknownProfile(
2843 C: A, S1: Builder.CreateAnd(LHS: B, RHS: ConstantInt::get(Ty, V: 1)),
2844 S2: Constant::getNullValue(Ty));
2845
2846 // (-1 + A) & B --> A ? 0 : B where A is 0/1.
2847 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_Add(L: m_ZExtOrSelf(Op: m_Value(V&: A)), R: m_AllOnes())),
2848 R: m_Value(V&: B)))) {
2849 if (A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2850 return createSelectInstWithUnknownProfile(C: A, S1: Constant::getNullValue(Ty),
2851 S2: B);
2852 if (computeKnownBits(V: A, CxtI: &I).countMaxActiveBits() <= 1) {
2853 return createSelectInstWithUnknownProfile(
2854 C: Builder.CreateICmpEQ(LHS: A, RHS: Constant::getNullValue(Ty: A->getType())), S1: B,
2855 S2: Constant::getNullValue(Ty));
2856 }
2857 }
2858
2859 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2860 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_SExtOrSelf(
2861 Op: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: C)))),
2862 R: m_Value(V&: Y))) &&
2863 *C == X->getType()->getScalarSizeInBits() - 1) {
2864 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
2865 return createSelectInstWithUnknownProfile(C: IsNeg, S1: Y,
2866 S2: ConstantInt::getNullValue(Ty));
2867 }
2868 // If there's a 'not' of the shifted value, swap the select operands:
2869 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2870 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_SExtOrSelf(
2871 Op: m_Not(V: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: C))))),
2872 R: m_Value(V&: Y))) &&
2873 *C == X->getType()->getScalarSizeInBits() - 1) {
2874 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
2875 return createSelectInstWithUnknownProfile(C: IsNeg,
2876 S1: ConstantInt::getNullValue(Ty), S2: Y);
2877 }
2878
2879 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
2880 if (sinkNotIntoOtherHandOfLogicalOp(I))
2881 return &I;
2882
2883 // An and recurrence w/loop invariant step is equivelent to (and start, step)
2884 PHINode *PN = nullptr;
2885 Value *Start = nullptr, *Step = nullptr;
2886 if (matchSimpleRecurrence(I: &I, P&: PN, Start, Step) && DT.dominates(Def: Step, User: PN))
2887 return replaceInstUsesWith(I, V: Builder.CreateAnd(LHS: Start, RHS: Step));
2888
2889 if (Instruction *R = reassociateForUses(BO&: I, Builder))
2890 return R;
2891
2892 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
2893 return Canonicalized;
2894
2895 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
2896 return Folded;
2897
2898 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
2899 return Res;
2900
2901 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
2902 return Res;
2903
2904 if (Value *V =
2905 simplifyAndOrWithOpReplaced(V: Op0, Op: Op1, RepOp: Constant::getAllOnesValue(Ty),
2906 /*SimplifyOnly*/ false, IC&: *this))
2907 return BinaryOperator::CreateAnd(V1: V, V2: Op1);
2908 if (Value *V =
2909 simplifyAndOrWithOpReplaced(V: Op1, Op: Op0, RepOp: Constant::getAllOnesValue(Ty),
2910 /*SimplifyOnly*/ false, IC&: *this))
2911 return BinaryOperator::CreateAnd(V1: Op0, V2: V);
2912
2913 return nullptr;
2914}
2915
2916Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
2917 bool MatchBSwaps,
2918 bool MatchBitReversals) {
2919 SmallVector<Instruction *, 4> Insts;
2920 if (!recognizeBSwapOrBitReverseIdiom(I: &I, MatchBSwaps, MatchBitReversals,
2921 InsertedInsts&: Insts))
2922 return nullptr;
2923 Instruction *LastInst = Insts.pop_back_val();
2924 LastInst->removeFromParent();
2925
2926 for (auto *Inst : Insts) {
2927 Inst->setDebugLoc(I.getDebugLoc());
2928 Worklist.push(I: Inst);
2929 }
2930 return LastInst;
2931}
2932
2933std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
2934InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
2935 // TODO: Can we reduce the code duplication between this and the related
2936 // rotate matching code under visitSelect and visitTrunc?
2937 assert(Or.getOpcode() == BinaryOperator::Or && "Expecting or instruction");
2938
2939 unsigned Width = Or.getType()->getScalarSizeInBits();
2940
2941 Instruction *Or0, *Or1;
2942 if (!match(V: Or.getOperand(i: 0), P: m_Instruction(I&: Or0)) ||
2943 !match(V: Or.getOperand(i: 1), P: m_Instruction(I&: Or1)))
2944 return std::nullopt;
2945
2946 bool IsFshl = true; // Sub on LSHR.
2947 SmallVector<Value *, 3> FShiftArgs;
2948
2949 // First, find an or'd pair of opposite shifts:
2950 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2951 if (isa<BinaryOperator>(Val: Or0) && isa<BinaryOperator>(Val: Or1)) {
2952 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2953 if (!match(V: Or0,
2954 P: m_OneUse(SubPattern: m_LogicalShift(L: m_Value(V&: ShVal0), R: m_Value(V&: ShAmt0)))) ||
2955 !match(V: Or1,
2956 P: m_OneUse(SubPattern: m_LogicalShift(L: m_Value(V&: ShVal1), R: m_Value(V&: ShAmt1)))) ||
2957 Or0->getOpcode() == Or1->getOpcode())
2958 return std::nullopt;
2959
2960 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2961 if (Or0->getOpcode() == BinaryOperator::LShr) {
2962 std::swap(a&: Or0, b&: Or1);
2963 std::swap(a&: ShVal0, b&: ShVal1);
2964 std::swap(a&: ShAmt0, b&: ShAmt1);
2965 }
2966 assert(Or0->getOpcode() == BinaryOperator::Shl &&
2967 Or1->getOpcode() == BinaryOperator::LShr &&
2968 "Illegal or(shift,shift) pair");
2969
2970 // Match the shift amount operands for a funnel shift pattern. This always
2971 // matches a subtraction on the R operand.
2972 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
2973 // Check for constant shift amounts that sum to the bitwidth.
2974 const APInt *LI, *RI;
2975 if (match(V: L, P: m_APIntAllowPoison(Res&: LI)) && match(V: R, P: m_APIntAllowPoison(Res&: RI)))
2976 if (LI->ult(RHS: Width) && RI->ult(RHS: Width) && (*LI + *RI) == Width)
2977 return ConstantInt::get(Ty: L->getType(), V: *LI);
2978
2979 Constant *LC, *RC;
2980 if (match(V: L, P: m_Constant(C&: LC)) && match(V: R, P: m_Constant(C&: RC)) &&
2981 match(V: L,
2982 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(Width, Width))) &&
2983 match(V: R,
2984 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(Width, Width))) &&
2985 match(V: ConstantExpr::getAdd(C1: LC, C2: RC), P: m_SpecificIntAllowPoison(V: Width)))
2986 return ConstantExpr::mergeUndefsWith(C: LC, Other: RC);
2987
2988 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2989 // We limit this to X < Width in case the backend re-expands the
2990 // intrinsic, and has to reintroduce a shift modulo operation (InstCombine
2991 // might remove it after this fold). This still doesn't guarantee that the
2992 // final codegen will match this original pattern.
2993 if (match(V: R, P: m_OneUse(SubPattern: m_Sub(L: m_SpecificInt(V: Width), R: m_Specific(V: L))))) {
2994 KnownBits KnownL = computeKnownBits(V: L, CxtI: &Or);
2995 return KnownL.getMaxValue().ult(RHS: Width) ? L : nullptr;
2996 }
2997
2998 // For non-constant cases, the following patterns currently only work for
2999 // rotation patterns.
3000 // TODO: Add general funnel-shift compatible patterns.
3001 if (ShVal0 != ShVal1)
3002 return nullptr;
3003
3004 // For non-constant cases we don't support non-pow2 shift masks.
3005 // TODO: Is it worth matching urem as well?
3006 if (!isPowerOf2_32(Value: Width))
3007 return nullptr;
3008
3009 // The shift amount may be masked with negation:
3010 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
3011 Value *X;
3012 unsigned Mask = Width - 1;
3013 if (match(V: L, P: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask))) &&
3014 match(V: R, P: m_And(L: m_Neg(V: m_Specific(V: X)), R: m_SpecificInt(V: Mask))))
3015 return X;
3016
3017 // (shl ShVal, X) | (lshr ShVal, ((-X) & (Width - 1)))
3018 if (match(V: R, P: m_And(L: m_Neg(V: m_Specific(V: L)), R: m_SpecificInt(V: Mask))))
3019 return L;
3020
3021 // Similar to above, but the shift amount may be extended after masking,
3022 // so return the extended value as the parameter for the intrinsic.
3023 if (match(V: L, P: m_ZExt(Op: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask)))) &&
3024 match(V: R,
3025 P: m_And(L: m_Neg(V: m_ZExt(Op: m_And(L: m_Specific(V: X), R: m_SpecificInt(V: Mask)))),
3026 R: m_SpecificInt(V: Mask))))
3027 return L;
3028
3029 if (match(V: L, P: m_ZExt(Op: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask)))) &&
3030 match(V: R, P: m_ZExt(Op: m_And(L: m_Neg(V: m_Specific(V: X)), R: m_SpecificInt(V: Mask)))))
3031 return L;
3032
3033 return nullptr;
3034 };
3035
3036 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
3037 if (!ShAmt) {
3038 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
3039 IsFshl = false; // Sub on SHL.
3040 }
3041 if (!ShAmt)
3042 return std::nullopt;
3043
3044 FShiftArgs = {ShVal0, ShVal1, ShAmt};
3045 } else if (isa<ZExtInst>(Val: Or0) || isa<ZExtInst>(Val: Or1)) {
3046 // If there are two 'or' instructions concat variables in opposite order:
3047 //
3048 // Slot1 and Slot2 are all zero bits.
3049 // | Slot1 | Low | Slot2 | High |
3050 // LowHigh = or (shl (zext Low), ZextLowShlAmt), (zext High)
3051 // | Slot2 | High | Slot1 | Low |
3052 // HighLow = or (shl (zext High), ZextHighShlAmt), (zext Low)
3053 //
3054 // the latter 'or' can be safely convert to
3055 // -> HighLow = fshl LowHigh, LowHigh, ZextHighShlAmt
3056 // if ZextLowShlAmt + ZextHighShlAmt == Width.
3057 if (!isa<ZExtInst>(Val: Or1))
3058 std::swap(a&: Or0, b&: Or1);
3059
3060 Value *High, *ZextHigh, *Low;
3061 const APInt *ZextHighShlAmt;
3062 if (!match(V: Or0,
3063 P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: ZextHigh), R: m_APInt(Res&: ZextHighShlAmt)))))
3064 return std::nullopt;
3065
3066 if (!match(V: Or1, P: m_ZExt(Op: m_Value(V&: Low))) ||
3067 !match(V: ZextHigh, P: m_ZExt(Op: m_Value(V&: High))))
3068 return std::nullopt;
3069
3070 unsigned HighSize = High->getType()->getScalarSizeInBits();
3071 unsigned LowSize = Low->getType()->getScalarSizeInBits();
3072 // Make sure High does not overlap with Low and most significant bits of
3073 // High aren't shifted out.
3074 if (ZextHighShlAmt->ult(RHS: LowSize) || ZextHighShlAmt->ugt(RHS: Width - HighSize))
3075 return std::nullopt;
3076
3077 for (User *U : ZextHigh->users()) {
3078 Value *X, *Y;
3079 if (!match(V: U, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))))
3080 continue;
3081
3082 if (!isa<ZExtInst>(Val: Y))
3083 std::swap(a&: X, b&: Y);
3084
3085 const APInt *ZextLowShlAmt;
3086 if (!match(V: X, P: m_Shl(L: m_Specific(V: Or1), R: m_APInt(Res&: ZextLowShlAmt))) ||
3087 !match(V: Y, P: m_Specific(V: ZextHigh)) || !DT.dominates(Def: U, User: &Or))
3088 continue;
3089
3090 // HighLow is good concat. If sum of two shifts amount equals to Width,
3091 // LowHigh must also be a good concat.
3092 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3093 continue;
3094
3095 // Low must not overlap with High and most significant bits of Low must
3096 // not be shifted out.
3097 assert(ZextLowShlAmt->uge(HighSize) &&
3098 ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
3099
3100 // We cannot reuse the result if it may produce poison.
3101 // Drop poison generating flags in the expression tree.
3102 // Or
3103 cast<Instruction>(Val: U)->dropPoisonGeneratingFlags();
3104 // Shl
3105 cast<Instruction>(Val: X)->dropPoisonGeneratingFlags();
3106
3107 FShiftArgs = {U, U, ConstantInt::get(Ty: Or0->getType(), V: *ZextHighShlAmt)};
3108 break;
3109 }
3110 }
3111
3112 if (FShiftArgs.empty())
3113 return std::nullopt;
3114
3115 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3116 return std::make_pair(x&: IID, y&: FShiftArgs);
3117}
3118
3119/// Match UB-safe variants of the funnel shift intrinsic.
3120static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
3121 if (auto Opt = IC.convertOrOfShiftsToFunnelShift(Or)) {
3122 auto [IID, FShiftArgs] = *Opt;
3123 Function *F =
3124 Intrinsic::getOrInsertDeclaration(M: Or.getModule(), id: IID, Tys: Or.getType());
3125 return CallInst::Create(Func: F, Args: FShiftArgs);
3126 }
3127
3128 return nullptr;
3129}
3130
3131/// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
3132static Value *matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder) {
3133 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
3134 Value *Op0 = Or.getOperand(i: 0), *Op1 = Or.getOperand(i: 1);
3135 Type *Ty = Or.getType();
3136
3137 unsigned Width = Ty->getScalarSizeInBits();
3138 if ((Width & 1) != 0)
3139 return nullptr;
3140 unsigned HalfWidth = Width / 2;
3141
3142 // Canonicalize zext (lower half) to LHS.
3143 if (!isa<ZExtInst>(Val: Op0))
3144 std::swap(a&: Op0, b&: Op1);
3145
3146 // Find lower/upper half.
3147 Value *LowerSrc, *ShlVal, *UpperSrc;
3148 const APInt *C;
3149 if (!match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: LowerSrc)))) ||
3150 !match(V: Op1, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: ShlVal), R: m_APInt(Res&: C)))) ||
3151 !match(V: ShlVal, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: UpperSrc)))))
3152 return nullptr;
3153 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() ||
3154 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth)
3155 return nullptr;
3156
3157 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) {
3158 Value *NewLower = Builder.CreateZExt(V: Lo, DestTy: Ty);
3159 Value *NewUpper = Builder.CreateZExt(V: Hi, DestTy: Ty);
3160 NewUpper = Builder.CreateShl(LHS: NewUpper, RHS: HalfWidth);
3161 Value *BinOp = Builder.CreateOr(LHS: NewLower, RHS: NewUpper);
3162 return Builder.CreateIntrinsic(ID: id, Types: Ty, Args: BinOp);
3163 };
3164
3165 // BSWAP: Push the concat down, swapping the lower/upper sources.
3166 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
3167 Value *LowerBSwap, *UpperBSwap;
3168 if (match(V: LowerSrc, P: m_BSwap(Op0: m_Value(V&: LowerBSwap))) &&
3169 match(V: UpperSrc, P: m_BSwap(Op0: m_Value(V&: UpperBSwap))))
3170 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3171
3172 // BITREVERSE: Push the concat down, swapping the lower/upper sources.
3173 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
3174 Value *LowerBRev, *UpperBRev;
3175 if (match(V: LowerSrc, P: m_BitReverse(Op0: m_Value(V&: LowerBRev))) &&
3176 match(V: UpperSrc, P: m_BitReverse(Op0: m_Value(V&: UpperBRev))))
3177 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3178
3179 // iX ext split: extending or(zext(x),shl(zext(y),bw/2) pattern
3180 // to consume sext/ashr:
3181 // or(zext(sext(x)),shl(zext(sext(ashr(x,xbw-1))),bw/2)
3182 // or(zext(x),shl(zext(ashr(x,xbw-1)),bw/2)
3183 Value *X;
3184 if (match(V: LowerSrc, P: m_SExtOrSelf(Op: m_Value(V&: X))) &&
3185 match(V: UpperSrc,
3186 P: m_SExtOrSelf(Op: m_AShr(
3187 L: m_Specific(V: X),
3188 R: m_SpecificInt(V: X->getType()->getScalarSizeInBits() - 1)))))
3189 return Builder.CreateSExt(V: X, DestTy: Ty);
3190
3191 return nullptr;
3192}
3193
3194/// If all elements of two constant vectors are 0/-1 and inverses, return true.
3195static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
3196 unsigned NumElts = cast<FixedVectorType>(Val: C1->getType())->getNumElements();
3197 for (unsigned i = 0; i != NumElts; ++i) {
3198 Constant *EltC1 = C1->getAggregateElement(Elt: i);
3199 Constant *EltC2 = C2->getAggregateElement(Elt: i);
3200 if (!EltC1 || !EltC2)
3201 return false;
3202
3203 // One element must be all ones, and the other must be all zeros.
3204 if (!((match(V: EltC1, P: m_Zero()) && match(V: EltC2, P: m_AllOnes())) ||
3205 (match(V: EltC2, P: m_Zero()) && match(V: EltC1, P: m_AllOnes()))))
3206 return false;
3207 }
3208 return true;
3209}
3210
3211/// We have an expression of the form (A & C) | (B & D). If A is a scalar or
3212/// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
3213/// B, it can be used as the condition operand of a select instruction.
3214/// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
3215Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
3216 bool ABIsTheSame) {
3217 // We may have peeked through bitcasts in the caller.
3218 // Exit immediately if we don't have (vector) integer types.
3219 Type *Ty = A->getType();
3220 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
3221 return nullptr;
3222
3223 // If A is the 'not' operand of B and has enough signbits, we have our answer.
3224 if (ABIsTheSame ? (A == B) : match(V: B, P: m_Not(V: m_Specific(V: A)))) {
3225 // If these are scalars or vectors of i1, A can be used directly.
3226 if (Ty->isIntOrIntVectorTy(BitWidth: 1))
3227 return A;
3228
3229 // If we look through a vector bitcast, the caller will bitcast the operands
3230 // to match the condition's number of bits (N x i1).
3231 // To make this poison-safe, disallow bitcast from wide element to narrow
3232 // element. That could allow poison in lanes where it was not present in the
3233 // original code.
3234 A = peekThroughBitcast(V: A);
3235 if (A->getType()->isIntOrIntVectorTy()) {
3236 unsigned NumSignBits = ComputeNumSignBits(Op: A);
3237 if (NumSignBits == A->getType()->getScalarSizeInBits() &&
3238 NumSignBits <= Ty->getScalarSizeInBits())
3239 return Builder.CreateTrunc(V: A, DestTy: CmpInst::makeCmpResultType(opnd_type: A->getType()));
3240 }
3241 return nullptr;
3242 }
3243
3244 // TODO: add support for sext and constant case
3245 if (ABIsTheSame)
3246 return nullptr;
3247
3248 // If both operands are constants, see if the constants are inverse bitmasks.
3249 Constant *AConst, *BConst;
3250 if (match(V: A, P: m_Constant(C&: AConst)) && match(V: B, P: m_Constant(C&: BConst)))
3251 if (AConst == ConstantExpr::getNot(C: BConst) &&
3252 ComputeNumSignBits(Op: A) == Ty->getScalarSizeInBits())
3253 return Builder.CreateZExtOrTrunc(V: A, DestTy: CmpInst::makeCmpResultType(opnd_type: Ty));
3254
3255 // Look for more complex patterns. The 'not' op may be hidden behind various
3256 // casts. Look through sexts and bitcasts to find the booleans.
3257 Value *Cond;
3258 Value *NotB;
3259 if (match(V: A, P: m_SExt(Op: m_Value(V&: Cond))) &&
3260 Cond->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
3261 // A = sext i1 Cond; B = sext (not (i1 Cond))
3262 if (match(V: B, P: m_SExt(Op: m_Not(V: m_Specific(V: Cond)))))
3263 return Cond;
3264
3265 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
3266 // TODO: The one-use checks are unnecessary or misplaced. If the caller
3267 // checked for uses on logic ops/casts, that should be enough to
3268 // make this transform worthwhile.
3269 if (match(V: B, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: NotB))))) {
3270 NotB = peekThroughBitcast(V: NotB, OneUseOnly: true);
3271 if (match(V: NotB, P: m_SExt(Op: m_Specific(V: Cond))))
3272 return Cond;
3273 }
3274 }
3275
3276 // All scalar (and most vector) possibilities should be handled now.
3277 // Try more matches that only apply to non-splat constant vectors.
3278 if (!Ty->isVectorTy())
3279 return nullptr;
3280
3281 // If both operands are xor'd with constants using the same sexted boolean
3282 // operand, see if the constants are inverse bitmasks.
3283 // TODO: Use ConstantExpr::getNot()?
3284 if (match(V: A, P: (m_Xor(L: m_SExt(Op: m_Value(V&: Cond)), R: m_Constant(C&: AConst)))) &&
3285 match(V: B, P: (m_Xor(L: m_SExt(Op: m_Specific(V: Cond)), R: m_Constant(C&: BConst)))) &&
3286 Cond->getType()->isIntOrIntVectorTy(BitWidth: 1) &&
3287 areInverseVectorBitmasks(C1: AConst, C2: BConst)) {
3288 AConst = ConstantExpr::getTrunc(C: AConst, Ty: CmpInst::makeCmpResultType(opnd_type: Ty));
3289 return Builder.CreateXor(LHS: Cond, RHS: AConst);
3290 }
3291 return nullptr;
3292}
3293
3294/// We have an expression of the form (A & B) | (C & D). Try to simplify this
3295/// to "A' ? B : D", where A' is a boolean or vector of booleans.
3296/// When InvertFalseVal is set to true, we try to match the pattern
3297/// where we have peeked through a 'not' op and A and C are the same:
3298/// (A & B) | ~(A | D) --> (A & B) | (~A & ~D) --> A' ? B : ~D
3299Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *B, Value *C,
3300 Value *D, bool InvertFalseVal) {
3301 // The potential condition of the select may be bitcasted. In that case, look
3302 // through its bitcast and the corresponding bitcast of the 'not' condition.
3303 Type *OrigType = A->getType();
3304 A = peekThroughBitcast(V: A, OneUseOnly: true);
3305 C = peekThroughBitcast(V: C, OneUseOnly: true);
3306 if (Value *Cond = getSelectCondition(A, B: C, ABIsTheSame: InvertFalseVal)) {
3307 // ((bc Cond) & B) | ((bc ~Cond) & D) --> bc (select Cond, (bc B), (bc D))
3308 // If this is a vector, we may need to cast to match the condition's length.
3309 // The bitcasts will either all exist or all not exist. The builder will
3310 // not create unnecessary casts if the types already match.
3311 Type *SelTy = A->getType();
3312 if (auto *VecTy = dyn_cast<VectorType>(Val: Cond->getType())) {
3313 // For a fixed or scalable vector get N from <{vscale x} N x iM>
3314 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3315 // For a fixed or scalable vector, get the size in bits of N x iM; for a
3316 // scalar this is just M.
3317 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
3318 Type *EltTy = Builder.getIntNTy(N: SelEltSize / Elts);
3319 SelTy = VectorType::get(ElementType: EltTy, EC: VecTy->getElementCount());
3320 }
3321 Value *BitcastB = Builder.CreateBitCast(V: B, DestTy: SelTy);
3322 if (InvertFalseVal)
3323 D = Builder.CreateNot(V: D);
3324 Value *BitcastD = Builder.CreateBitCast(V: D, DestTy: SelTy);
3325 Value *Select = Builder.CreateSelect(C: Cond, True: BitcastB, False: BitcastD);
3326 return Builder.CreateBitCast(V: Select, DestTy: OrigType);
3327 }
3328
3329 return nullptr;
3330}
3331
3332// (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3333// (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
3334static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
3335 bool IsAnd, bool IsLogical,
3336 IRBuilderBase &Builder) {
3337 Value *LHS0 = LHS->getOperand(i_nocapture: 0);
3338 Value *RHS0 = RHS->getOperand(i_nocapture: 0);
3339 Value *RHS1 = RHS->getOperand(i_nocapture: 1);
3340
3341 ICmpInst::Predicate LPred =
3342 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate();
3343 ICmpInst::Predicate RPred =
3344 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate();
3345
3346 const APInt *CInt;
3347 if (LPred != ICmpInst::ICMP_EQ ||
3348 !match(V: LHS->getOperand(i_nocapture: 1), P: m_APIntAllowPoison(Res&: CInt)) ||
3349 !LHS0->getType()->isIntOrIntVectorTy() ||
3350 !(LHS->hasOneUse() || RHS->hasOneUse()))
3351 return nullptr;
3352
3353 auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
3354 return match(V: RHSOp,
3355 P: m_Add(L: m_Specific(V: LHS0), R: m_SpecificIntAllowPoison(V: -*CInt))) ||
3356 (CInt->isZero() && RHSOp == LHS0);
3357 };
3358
3359 Value *Other;
3360 if (RPred == ICmpInst::ICMP_ULT && MatchRHSOp(RHS1))
3361 Other = RHS0;
3362 else if (RPred == ICmpInst::ICMP_UGT && MatchRHSOp(RHS0))
3363 Other = RHS1;
3364 else
3365 return nullptr;
3366
3367 if (IsLogical)
3368 Other = Builder.CreateFreeze(V: Other);
3369
3370 return Builder.CreateICmp(
3371 P: IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE,
3372 LHS: Builder.CreateSub(LHS: LHS0, RHS: ConstantInt::get(Ty: LHS0->getType(), V: *CInt + 1)),
3373 RHS: Other);
3374}
3375
3376/// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3377/// If IsLogical is true, then the and/or is in select form and the transform
3378/// must be poison-safe.
3379Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3380 Instruction &I, bool IsAnd,
3381 bool IsLogical) {
3382 const SimplifyQuery Q = SQ.getWithInstruction(I: &I);
3383
3384 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3385 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *RHS0 = RHS->getOperand(i_nocapture: 0);
3386 Value *LHS1 = LHS->getOperand(i_nocapture: 1), *RHS1 = RHS->getOperand(i_nocapture: 1);
3387
3388 const APInt *LHSC = nullptr, *RHSC = nullptr;
3389 match(V: LHS1, P: m_APInt(Res&: LHSC));
3390 match(V: RHS1, P: m_APInt(Res&: RHSC));
3391
3392 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3393 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3394 if (predicatesFoldable(P1: PredL, P2: PredR)) {
3395 if (LHS0 == RHS1 && LHS1 == RHS0) {
3396 PredL = ICmpInst::getSwappedPredicate(pred: PredL);
3397 std::swap(a&: LHS0, b&: LHS1);
3398 }
3399 if (LHS0 == RHS0 && LHS1 == RHS1) {
3400 unsigned Code = IsAnd ? getICmpCode(Pred: PredL) & getICmpCode(Pred: PredR)
3401 : getICmpCode(Pred: PredL) | getICmpCode(Pred: PredR);
3402 bool IsSigned = LHS->isSigned() || RHS->isSigned();
3403 return getNewICmpValue(Code, Sign: IsSigned, LHS: LHS0, RHS: LHS1, Builder);
3404 }
3405 }
3406
3407 if (Value *V =
3408 foldAndOrOfICmpEqConstantAndICmp(LHS, RHS, IsAnd, IsLogical, Builder))
3409 return V;
3410 // We can treat logical like bitwise here, because both operands are used on
3411 // the LHS, and as such poison from both will propagate.
3412 if (Value *V = foldAndOrOfICmpEqConstantAndICmp(LHS: RHS, RHS: LHS, IsAnd,
3413 /*IsLogical*/ false, Builder))
3414 return V;
3415
3416 if (Value *V = foldAndOrOfICmpsWithConstEq(Cmp0: LHS, Cmp1: RHS, IsAnd, IsLogical,
3417 Builder, Q, I))
3418 return V;
3419 // We can convert this case to bitwise and, because both operands are used
3420 // on the LHS, and as such poison from both will propagate.
3421 if (Value *V = foldAndOrOfICmpsWithConstEq(
3422 Cmp0: RHS, Cmp1: LHS, IsAnd, /*IsLogical=*/false, Builder, Q, I)) {
3423 // If RHS is still used, we should drop samesign flag.
3424 if (IsLogical && RHS->hasSameSign() && !RHS->use_empty()) {
3425 RHS->setSameSign(false);
3426 addToWorklist(I: RHS);
3427 }
3428 return V;
3429 }
3430
3431 if (Value *V = foldIsPowerOf2OrZero(Cmp0: LHS, Cmp1: RHS, IsAnd, Builder, IC&: *this))
3432 return V;
3433 if (Value *V = foldIsPowerOf2OrZero(Cmp0: RHS, Cmp1: LHS, IsAnd, Builder, IC&: *this))
3434 return V;
3435
3436 // TODO: One of these directions is fine with logical and/or, the other could
3437 // be supported by inserting freeze.
3438 if (!IsLogical) {
3439 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3440 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3441 if (Value *V = simplifyRangeCheck(Cmp0: LHS, Cmp1: RHS, /*Inverted=*/!IsAnd))
3442 return V;
3443
3444 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3445 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3446 if (Value *V = simplifyRangeCheck(Cmp0: RHS, Cmp1: LHS, /*Inverted=*/!IsAnd))
3447 return V;
3448 }
3449
3450 // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3451 if (IsAnd && !IsLogical)
3452 if (Value *V = foldSignedTruncationCheck(ICmp0: LHS, ICmp1: RHS, CxtI&: I, Builder))
3453 return V;
3454
3455 if (Value *V = foldIsPowerOf2(Cmp0: LHS, Cmp1: RHS, JoinedByAnd: IsAnd, Builder, IC&: *this))
3456 return V;
3457
3458 if (Value *V = foldPowerOf2AndShiftedMask(Cmp0: LHS, Cmp1: RHS, JoinedByAnd: IsAnd, Builder))
3459 return V;
3460
3461 // TODO: Verify whether this is safe for logical and/or.
3462 if (!IsLogical) {
3463 if (Value *X = foldUnsignedUnderflowCheck(ZeroICmp: LHS, UnsignedICmp: RHS, IsAnd, Q, Builder))
3464 return X;
3465 if (Value *X = foldUnsignedUnderflowCheck(ZeroICmp: RHS, UnsignedICmp: LHS, IsAnd, Q, Builder))
3466 return X;
3467 }
3468
3469 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3470 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3471 // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3472 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3473 PredL == PredR && match(V: LHS1, P: m_ZeroInt()) && match(V: RHS1, P: m_ZeroInt()) &&
3474 LHS0->getType() == RHS0->getType() &&
3475 (!IsLogical || isGuaranteedNotToBePoison(V: RHS0))) {
3476 Value *NewOr = Builder.CreateOr(LHS: LHS0, RHS: RHS0);
3477 return Builder.CreateICmp(P: PredL, LHS: NewOr,
3478 RHS: Constant::getNullValue(Ty: NewOr->getType()));
3479 }
3480
3481 // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3482 // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3483 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3484 PredL == PredR && match(V: LHS1, P: m_AllOnes()) && match(V: RHS1, P: m_AllOnes()) &&
3485 LHS0->getType() == RHS0->getType() &&
3486 (!IsLogical || isGuaranteedNotToBePoison(V: RHS0))) {
3487 Value *NewAnd = Builder.CreateAnd(LHS: LHS0, RHS: RHS0);
3488 return Builder.CreateICmp(P: PredL, LHS: NewAnd,
3489 RHS: Constant::getAllOnesValue(Ty: LHS0->getType()));
3490 }
3491
3492 if (!IsLogical)
3493 if (Value *V =
3494 foldAndOrOfICmpsWithPow2AndWithZero(Builder, LHS, RHS, IsAnd, Q))
3495 return V;
3496
3497 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3498 if (!LHSC || !RHSC)
3499 return nullptr;
3500
3501 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3502 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3503 // where CMAX is the all ones value for the truncated type,
3504 // iff the lower bits of C2 and CA are zero.
3505 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3506 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) {
3507 Value *V;
3508 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
3509
3510 // (trunc x) == C1 & (and x, CA) == C2
3511 // (and x, CA) == C2 & (trunc x) == C1
3512 if (match(V: RHS0, P: m_Trunc(Op: m_Value(V))) &&
3513 match(V: LHS0, P: m_And(L: m_Specific(V), R: m_APInt(Res&: AndC)))) {
3514 SmallC = RHSC;
3515 BigC = LHSC;
3516 } else if (match(V: LHS0, P: m_Trunc(Op: m_Value(V))) &&
3517 match(V: RHS0, P: m_And(L: m_Specific(V), R: m_APInt(Res&: AndC)))) {
3518 SmallC = LHSC;
3519 BigC = RHSC;
3520 }
3521
3522 if (SmallC && BigC) {
3523 unsigned BigBitSize = BigC->getBitWidth();
3524 unsigned SmallBitSize = SmallC->getBitWidth();
3525
3526 // Check that the low bits are zero.
3527 APInt Low = APInt::getLowBitsSet(numBits: BigBitSize, loBitsSet: SmallBitSize);
3528 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
3529 Value *NewAnd = Builder.CreateAnd(LHS: V, RHS: Low | *AndC);
3530 APInt N = SmallC->zext(width: BigBitSize) | *BigC;
3531 Value *NewVal = ConstantInt::get(Ty: NewAnd->getType(), V: N);
3532 return Builder.CreateICmp(P: PredL, LHS: NewAnd, RHS: NewVal);
3533 }
3534 }
3535 }
3536
3537 // Match naive pattern (and its inverted form) for checking if two values
3538 // share same sign. An example of the pattern:
3539 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3540 // Inverted form (example):
3541 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3542 bool TrueIfSignedL, TrueIfSignedR;
3543 if (isSignBitCheck(Pred: PredL, RHS: *LHSC, TrueIfSigned&: TrueIfSignedL) &&
3544 isSignBitCheck(Pred: PredR, RHS: *RHSC, TrueIfSigned&: TrueIfSignedR) &&
3545 (RHS->hasOneUse() || LHS->hasOneUse())) {
3546 Value *X, *Y;
3547 if (IsAnd) {
3548 if ((TrueIfSignedL && !TrueIfSignedR &&
3549 match(V: LHS0, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3550 match(V: RHS0, P: m_c_And(L: m_Specific(V: X), R: m_Specific(V: Y)))) ||
3551 (!TrueIfSignedL && TrueIfSignedR &&
3552 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3553 match(V: RHS0, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y))))) {
3554 Value *NewXor = Builder.CreateXor(LHS: X, RHS: Y);
3555 return Builder.CreateIsNeg(Arg: NewXor);
3556 }
3557 } else {
3558 if ((TrueIfSignedL && !TrueIfSignedR &&
3559 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3560 match(V: RHS0, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y)))) ||
3561 (!TrueIfSignedL && TrueIfSignedR &&
3562 match(V: LHS0, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3563 match(V: RHS0, P: m_c_And(L: m_Specific(V: X), R: m_Specific(V: Y))))) {
3564 Value *NewXor = Builder.CreateXor(LHS: X, RHS: Y);
3565 return Builder.CreateIsNotNeg(Arg: NewXor);
3566 }
3567 }
3568 }
3569
3570 // (X & ExpMask) != 0 && (X & ExpMask) != ExpMask -> isnormal(X)
3571 // (X & ExpMask) == 0 || (X & ExpMask) == ExpMask -> !isnormal(X)
3572 Value *X;
3573 const APInt *MaskC;
3574 if (LHS0 == RHS0 && PredL == PredR &&
3575 PredL == (IsAnd ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ) &&
3576 !I.getFunction()->hasFnAttribute(Kind: Attribute::NoImplicitFloat) &&
3577 LHS->hasOneUse() && RHS->hasOneUse() &&
3578 match(V: LHS0, P: m_And(L: m_ElementWiseBitCast(Op: m_Value(V&: X)), R: m_APInt(Res&: MaskC))) &&
3579 X->getType()->getScalarType()->isIEEELikeFPTy() &&
3580 APFloat(X->getType()->getScalarType()->getFltSemantics(), *MaskC)
3581 .isPosInfinity() &&
3582 ((LHSC->isZero() && *RHSC == *MaskC) ||
3583 (RHSC->isZero() && *LHSC == *MaskC)))
3584 return Builder.createIsFPClass(FPNum: X, Test: IsAnd ? FPClassTest::fcNormal
3585 : ~FPClassTest::fcNormal);
3586
3587 return foldAndOrOfICmpsUsingRanges(ICmp1: LHS, ICmp2: RHS, IsAnd);
3588}
3589
3590/// If IsLogical is true, then the and/or is in select form and the transform
3591/// must be poison-safe.
3592Value *InstCombinerImpl::foldBooleanAndOr(Value *LHS, Value *RHS,
3593 Instruction &I, bool IsAnd,
3594 bool IsLogical) {
3595 if (!LHS->getType()->isIntOrIntVectorTy(BitWidth: 1))
3596 return nullptr;
3597
3598 // handle (roughly):
3599 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3600 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3601 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder,
3602 Q: SQ.getWithInstruction(I: &I)))
3603 return V;
3604
3605 if (auto *LHSCmp = dyn_cast<ICmpInst>(Val: LHS))
3606 if (auto *RHSCmp = dyn_cast<ICmpInst>(Val: RHS))
3607 if (Value *Res = foldAndOrOfICmps(LHS: LHSCmp, RHS: RHSCmp, I, IsAnd, IsLogical))
3608 return Res;
3609
3610 if (auto *LHSCmp = dyn_cast<FCmpInst>(Val: LHS))
3611 if (auto *RHSCmp = dyn_cast<FCmpInst>(Val: RHS))
3612 if (Value *Res = foldLogicOfFCmps(LHS: LHSCmp, RHS: RHSCmp, IsAnd, IsLogicalSelect: IsLogical))
3613 return Res;
3614
3615 if (Value *Res = foldEqOfParts(Cmp0: LHS, Cmp1: RHS, IsAnd))
3616 return Res;
3617
3618 return nullptr;
3619}
3620
3621static Value *foldOrOfInversions(BinaryOperator &I,
3622 InstCombiner::BuilderTy &Builder) {
3623 assert(I.getOpcode() == Instruction::Or &&
3624 "Simplification only supports or at the moment.");
3625
3626 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3627 if (!match(V: I.getOperand(i_nocapture: 0), P: m_And(L: m_Value(V&: Cmp1), R: m_Value(V&: Cmp2))) ||
3628 !match(V: I.getOperand(i_nocapture: 1), P: m_And(L: m_Value(V&: Cmp3), R: m_Value(V&: Cmp4))))
3629 return nullptr;
3630
3631 // Check if any two pairs of the and operations are inversions of each other.
3632 if (isKnownInversion(X: Cmp1, Y: Cmp3) && isKnownInversion(X: Cmp2, Y: Cmp4))
3633 return Builder.CreateXor(LHS: Cmp1, RHS: Cmp4);
3634 if (isKnownInversion(X: Cmp1, Y: Cmp4) && isKnownInversion(X: Cmp2, Y: Cmp3))
3635 return Builder.CreateXor(LHS: Cmp1, RHS: Cmp3);
3636
3637 return nullptr;
3638}
3639
3640/// Match \p V as "shufflevector -> bitcast" or "extractelement -> zext -> shl"
3641/// patterns, which extract vector elements and pack them in the same relative
3642/// positions.
3643///
3644/// \p Vec is the underlying vector being extracted from.
3645/// \p Mask is a bitmask identifying which packed elements are obtained from the
3646/// vector.
3647/// \p VecOffset is the vector element corresponding to index 0 of the
3648/// mask.
3649static bool matchSubIntegerPackFromVector(Value *V, Value *&Vec,
3650 int64_t &VecOffset,
3651 SmallBitVector &Mask,
3652 const DataLayout &DL) {
3653 // First try to match extractelement -> zext -> shl
3654 uint64_t VecIdx, ShlAmt;
3655 if (match(V, P: m_ShlOrSelf(L: m_ZExtOrSelf(Op: m_ExtractElt(Val: m_Value(V&: Vec),
3656 Idx: m_ConstantInt(V&: VecIdx))),
3657 R&: ShlAmt))) {
3658 auto *VecTy = dyn_cast<FixedVectorType>(Val: Vec->getType());
3659 if (!VecTy)
3660 return false;
3661 auto *EltTy = dyn_cast<IntegerType>(Val: VecTy->getElementType());
3662 if (!EltTy)
3663 return false;
3664
3665 const unsigned EltBitWidth = EltTy->getBitWidth();
3666 const unsigned TargetBitWidth = V->getType()->getIntegerBitWidth();
3667 if (TargetBitWidth % EltBitWidth != 0 || ShlAmt % EltBitWidth != 0)
3668 return false;
3669 const unsigned TargetEltWidth = TargetBitWidth / EltBitWidth;
3670 const unsigned ShlEltAmt = ShlAmt / EltBitWidth;
3671
3672 const unsigned MaskIdx =
3673 DL.isLittleEndian() ? ShlEltAmt : TargetEltWidth - ShlEltAmt - 1;
3674
3675 VecOffset = static_cast<int64_t>(VecIdx) - static_cast<int64_t>(MaskIdx);
3676 Mask.resize(N: TargetEltWidth);
3677 Mask.set(MaskIdx);
3678 return true;
3679 }
3680
3681 // Now try to match a bitcasted subvector.
3682 Instruction *SrcVecI;
3683 if (!match(V, P: m_BitCast(Op: m_Instruction(I&: SrcVecI))))
3684 return false;
3685
3686 auto *SrcTy = dyn_cast<FixedVectorType>(Val: SrcVecI->getType());
3687 if (!SrcTy)
3688 return false;
3689
3690 Mask.resize(N: SrcTy->getNumElements());
3691
3692 // First check for a subvector obtained from a shufflevector.
3693 if (isa<ShuffleVectorInst>(Val: SrcVecI)) {
3694 Constant *ConstVec;
3695 ArrayRef<int> ShuffleMask;
3696 if (!match(V: SrcVecI, P: m_Shuffle(v1: m_Value(V&: Vec), v2: m_Constant(C&: ConstVec),
3697 mask: m_Mask(ShuffleMask))))
3698 return false;
3699
3700 auto *VecTy = dyn_cast<FixedVectorType>(Val: Vec->getType());
3701 if (!VecTy)
3702 return false;
3703
3704 const unsigned NumVecElts = VecTy->getNumElements();
3705 bool FoundVecOffset = false;
3706 for (unsigned Idx = 0; Idx < ShuffleMask.size(); ++Idx) {
3707 if (ShuffleMask[Idx] == PoisonMaskElem)
3708 return false;
3709 const unsigned ShuffleIdx = ShuffleMask[Idx];
3710 if (ShuffleIdx >= NumVecElts) {
3711 const unsigned ConstIdx = ShuffleIdx - NumVecElts;
3712 auto *ConstElt =
3713 dyn_cast<ConstantInt>(Val: ConstVec->getAggregateElement(Elt: ConstIdx));
3714 if (!ConstElt || !ConstElt->isNullValue())
3715 return false;
3716 continue;
3717 }
3718
3719 if (FoundVecOffset) {
3720 if (VecOffset + Idx != ShuffleIdx)
3721 return false;
3722 } else {
3723 if (ShuffleIdx < Idx)
3724 return false;
3725 VecOffset = ShuffleIdx - Idx;
3726 FoundVecOffset = true;
3727 }
3728 Mask.set(Idx);
3729 }
3730 return FoundVecOffset;
3731 }
3732
3733 // Check for a subvector obtained as an (insertelement V, 0, idx)
3734 uint64_t InsertIdx;
3735 if (!match(V: SrcVecI,
3736 P: m_InsertElt(Val: m_Value(V&: Vec), Elt: m_Zero(), Idx: m_ConstantInt(V&: InsertIdx))))
3737 return false;
3738
3739 auto *VecTy = dyn_cast<FixedVectorType>(Val: Vec->getType());
3740 if (!VecTy)
3741 return false;
3742 VecOffset = 0;
3743 bool AlreadyInsertedMaskedElt = Mask.test(Idx: InsertIdx);
3744 Mask.set();
3745 if (!AlreadyInsertedMaskedElt)
3746 Mask.reset(Idx: InsertIdx);
3747 return true;
3748}
3749
3750/// Try to fold the join of two scalar integers whose contents are packed
3751/// elements of the same vector.
3752static Instruction *foldIntegerPackFromVector(Instruction &I,
3753 InstCombiner::BuilderTy &Builder,
3754 const DataLayout &DL) {
3755 assert(I.getOpcode() == Instruction::Or);
3756 Value *LhsVec, *RhsVec;
3757 int64_t LhsVecOffset, RhsVecOffset;
3758 SmallBitVector Mask;
3759 if (!matchSubIntegerPackFromVector(V: I.getOperand(i: 0), Vec&: LhsVec, VecOffset&: LhsVecOffset,
3760 Mask, DL))
3761 return nullptr;
3762 if (!matchSubIntegerPackFromVector(V: I.getOperand(i: 1), Vec&: RhsVec, VecOffset&: RhsVecOffset,
3763 Mask, DL))
3764 return nullptr;
3765 if (LhsVec != RhsVec || LhsVecOffset != RhsVecOffset)
3766 return nullptr;
3767
3768 // Convert into shufflevector -> bitcast;
3769 const unsigned ZeroVecIdx =
3770 cast<FixedVectorType>(Val: LhsVec->getType())->getNumElements();
3771 SmallVector<int> ShuffleMask(Mask.size(), ZeroVecIdx);
3772 for (unsigned Idx : Mask.set_bits()) {
3773 assert(LhsVecOffset + Idx >= 0);
3774 ShuffleMask[Idx] = LhsVecOffset + Idx;
3775 }
3776
3777 Value *MaskedVec = Builder.CreateShuffleVector(
3778 V1: LhsVec, V2: Constant::getNullValue(Ty: LhsVec->getType()), Mask: ShuffleMask,
3779 Name: I.getName() + ".v");
3780 return CastInst::Create(Instruction::BitCast, S: MaskedVec, Ty: I.getType());
3781}
3782
3783/// Match \p V as "lshr -> mask -> zext -> shl".
3784///
3785/// \p Int is the underlying integer being extracted from.
3786/// \p Mask is a bitmask identifying which bits of the integer are being
3787/// extracted. \p Offset identifies which bit of the result \p V corresponds to
3788/// the least significant bit of \p Int
3789static bool matchZExtedSubInteger(Value *V, Value *&Int, APInt &Mask,
3790 uint64_t &Offset, bool &IsShlNUW,
3791 bool &IsShlNSW) {
3792 Value *ShlOp0;
3793 uint64_t ShlAmt = 0;
3794 if (!match(V, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: ShlOp0), R: m_ConstantInt(V&: ShlAmt)))))
3795 return false;
3796
3797 IsShlNUW = cast<BinaryOperator>(Val: V)->hasNoUnsignedWrap();
3798 IsShlNSW = cast<BinaryOperator>(Val: V)->hasNoSignedWrap();
3799
3800 Value *ZExtOp0;
3801 if (!match(V: ShlOp0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: ZExtOp0)))))
3802 return false;
3803
3804 Value *MaskedOp0;
3805 const APInt *ShiftedMaskConst = nullptr;
3806 if (!match(V: ZExtOp0, P: m_CombineOr(L: m_OneUse(SubPattern: m_And(L: m_Value(V&: MaskedOp0),
3807 R: m_APInt(Res&: ShiftedMaskConst))),
3808 R: m_Value(V&: MaskedOp0))))
3809 return false;
3810
3811 uint64_t LShrAmt = 0;
3812 if (!match(V: MaskedOp0,
3813 P: m_CombineOr(L: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: Int), R: m_ConstantInt(V&: LShrAmt))),
3814 R: m_Value(V&: Int))))
3815 return false;
3816
3817 if (LShrAmt > ShlAmt)
3818 return false;
3819 Offset = ShlAmt - LShrAmt;
3820
3821 Mask = ShiftedMaskConst ? ShiftedMaskConst->shl(shiftAmt: LShrAmt)
3822 : APInt::getBitsSetFrom(
3823 numBits: Int->getType()->getScalarSizeInBits(), loBit: LShrAmt);
3824
3825 return true;
3826}
3827
3828/// Try to fold the join of two scalar integers whose bits are unpacked and
3829/// zexted from the same source integer.
3830static Value *foldIntegerRepackThroughZExt(Value *Lhs, Value *Rhs,
3831 InstCombiner::BuilderTy &Builder) {
3832
3833 Value *LhsInt, *RhsInt;
3834 APInt LhsMask, RhsMask;
3835 uint64_t LhsOffset, RhsOffset;
3836 bool IsLhsShlNUW, IsLhsShlNSW, IsRhsShlNUW, IsRhsShlNSW;
3837 if (!matchZExtedSubInteger(V: Lhs, Int&: LhsInt, Mask&: LhsMask, Offset&: LhsOffset, IsShlNUW&: IsLhsShlNUW,
3838 IsShlNSW&: IsLhsShlNSW))
3839 return nullptr;
3840 if (!matchZExtedSubInteger(V: Rhs, Int&: RhsInt, Mask&: RhsMask, Offset&: RhsOffset, IsShlNUW&: IsRhsShlNUW,
3841 IsShlNSW&: IsRhsShlNSW))
3842 return nullptr;
3843 if (LhsInt != RhsInt || LhsOffset != RhsOffset)
3844 return nullptr;
3845
3846 APInt Mask = LhsMask | RhsMask;
3847
3848 Type *DestTy = Lhs->getType();
3849 Value *Res = Builder.CreateShl(
3850 LHS: Builder.CreateZExt(
3851 V: Builder.CreateAnd(LHS: LhsInt, RHS: Mask, Name: LhsInt->getName() + ".mask"), DestTy,
3852 Name: LhsInt->getName() + ".zext"),
3853 RHS: ConstantInt::get(Ty: DestTy, V: LhsOffset), Name: "", HasNUW: IsLhsShlNUW && IsRhsShlNUW,
3854 HasNSW: IsLhsShlNSW && IsRhsShlNSW);
3855 Res->takeName(V: Lhs);
3856 return Res;
3857}
3858
3859// A decomposition of ((X & Mask) * Factor). The NUW / NSW bools
3860// track these properities for preservation. Note that we can decompose
3861// equivalent select form of this expression (e.g. (!(X & Mask) ? 0 : Mask *
3862// Factor))
3863struct DecomposedBitMaskMul {
3864 Value *X;
3865 APInt Factor;
3866 APInt Mask;
3867 bool NUW;
3868 bool NSW;
3869
3870 bool isCombineableWith(const DecomposedBitMaskMul Other) {
3871 return X == Other.X && !Mask.intersects(RHS: Other.Mask) &&
3872 Factor == Other.Factor;
3873 }
3874};
3875
3876static std::optional<DecomposedBitMaskMul> matchBitmaskMul(Value *V) {
3877 Instruction *Op = dyn_cast<Instruction>(Val: V);
3878 if (!Op)
3879 return std::nullopt;
3880
3881 // Decompose (A & N) * C) into BitMaskMul
3882 Value *Original = nullptr;
3883 const APInt *Mask = nullptr;
3884 const APInt *MulConst = nullptr;
3885 if (match(V: Op, P: m_Mul(L: m_And(L: m_Value(V&: Original), R: m_APInt(Res&: Mask)),
3886 R: m_APInt(Res&: MulConst)))) {
3887 if (MulConst->isZero() || Mask->isZero())
3888 return std::nullopt;
3889
3890 return std::optional<DecomposedBitMaskMul>(
3891 {.X: Original, .Factor: *MulConst, .Mask: *Mask,
3892 .NUW: cast<BinaryOperator>(Val: Op)->hasNoUnsignedWrap(),
3893 .NSW: cast<BinaryOperator>(Val: Op)->hasNoSignedWrap()});
3894 }
3895
3896 Value *Cond = nullptr;
3897 const APInt *EqZero = nullptr, *NeZero = nullptr;
3898
3899 // Decompose ((A & N) ? 0 : N * C) into BitMaskMul
3900 if (match(V: Op, P: m_Select(C: m_Value(V&: Cond), L: m_APInt(Res&: EqZero), R: m_APInt(Res&: NeZero)))) {
3901 auto ICmpDecompose =
3902 decomposeBitTest(Cond, /*LookThroughTrunc=*/true,
3903 /*AllowNonZeroC=*/false, /*DecomposeBitMask=*/DecomposeAnd: true);
3904 if (!ICmpDecompose.has_value())
3905 return std::nullopt;
3906
3907 assert(ICmpInst::isEquality(ICmpDecompose->Pred) &&
3908 ICmpDecompose->C.isZero());
3909
3910 if (ICmpDecompose->Pred == ICmpInst::ICMP_NE)
3911 std::swap(a&: EqZero, b&: NeZero);
3912
3913 if (!EqZero->isZero() || NeZero->isZero())
3914 return std::nullopt;
3915
3916 if (!ICmpDecompose->Mask.isPowerOf2() || ICmpDecompose->Mask.isZero() ||
3917 NeZero->getBitWidth() != ICmpDecompose->Mask.getBitWidth())
3918 return std::nullopt;
3919
3920 if (!NeZero->urem(RHS: ICmpDecompose->Mask).isZero())
3921 return std::nullopt;
3922
3923 return std::optional<DecomposedBitMaskMul>(
3924 {.X: ICmpDecompose->X, .Factor: NeZero->udiv(RHS: ICmpDecompose->Mask),
3925 .Mask: ICmpDecompose->Mask, /*NUW=*/false, /*NSW=*/false});
3926 }
3927
3928 return std::nullopt;
3929}
3930
3931/// (A & N) * C + (A & M) * C -> (A & (N + M)) & C
3932/// This also accepts the equivalent select form of (A & N) * C
3933/// expressions i.e. !(A & N) ? 0 : N * C)
3934static Value *foldBitmaskMul(Value *Op0, Value *Op1,
3935 InstCombiner::BuilderTy &Builder) {
3936 auto Decomp1 = matchBitmaskMul(V: Op1);
3937 if (!Decomp1)
3938 return nullptr;
3939
3940 auto Decomp0 = matchBitmaskMul(V: Op0);
3941 if (!Decomp0)
3942 return nullptr;
3943
3944 if (Decomp0->isCombineableWith(Other: *Decomp1)) {
3945 Value *NewAnd = Builder.CreateAnd(
3946 LHS: Decomp0->X,
3947 RHS: ConstantInt::get(Ty: Decomp0->X->getType(), V: Decomp0->Mask + Decomp1->Mask));
3948
3949 return Builder.CreateMul(
3950 LHS: NewAnd, RHS: ConstantInt::get(Ty: NewAnd->getType(), V: Decomp1->Factor), Name: "",
3951 HasNUW: Decomp0->NUW && Decomp1->NUW, HasNSW: Decomp0->NSW && Decomp1->NSW);
3952 }
3953
3954 return nullptr;
3955}
3956
3957Value *InstCombinerImpl::foldDisjointOr(Value *LHS, Value *RHS) {
3958 if (Value *Res = foldBitmaskMul(Op0: LHS, Op1: RHS, Builder))
3959 return Res;
3960 if (Value *Res = foldIntegerRepackThroughZExt(Lhs: LHS, Rhs: RHS, Builder))
3961 return Res;
3962
3963 return nullptr;
3964}
3965
3966Value *InstCombinerImpl::reassociateDisjointOr(Value *LHS, Value *RHS) {
3967
3968 Value *X, *Y;
3969 if (match(V: RHS, P: m_OneUse(SubPattern: m_DisjointOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
3970 if (Value *Res = foldDisjointOr(LHS, RHS: X))
3971 return Builder.CreateOr(LHS: Res, RHS: Y, Name: "", /*IsDisjoint=*/true);
3972 if (Value *Res = foldDisjointOr(LHS, RHS: Y))
3973 return Builder.CreateOr(LHS: Res, RHS: X, Name: "", /*IsDisjoint=*/true);
3974 }
3975
3976 if (match(V: LHS, P: m_OneUse(SubPattern: m_DisjointOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
3977 if (Value *Res = foldDisjointOr(LHS: X, RHS))
3978 return Builder.CreateOr(LHS: Res, RHS: Y, Name: "", /*IsDisjoint=*/true);
3979 if (Value *Res = foldDisjointOr(LHS: Y, RHS))
3980 return Builder.CreateOr(LHS: Res, RHS: X, Name: "", /*IsDisjoint=*/true);
3981 }
3982
3983 return nullptr;
3984}
3985
3986/// Fold Res, Overflow = (umul.with.overflow x c1); (or Overflow (ugt Res c2))
3987/// --> (ugt x (c2/c1)). This code checks whether a multiplication of two
3988/// unsigned numbers (one is a constant) is mathematically greater than a
3989/// second constant.
3990static Value *foldOrUnsignedUMulOverflowICmp(BinaryOperator &I,
3991 InstCombiner::BuilderTy &Builder,
3992 const DataLayout &DL) {
3993 Value *WOV, *X;
3994 const APInt *C1, *C2;
3995 if (match(V: &I,
3996 P: m_c_Or(L: m_ExtractValue<1>(
3997 V: m_Value(V&: WOV, Match: m_Intrinsic<Intrinsic::umul_with_overflow>(
3998 Op0: m_Value(V&: X), Op1: m_APInt(Res&: C1)))),
3999 R: m_OneUse(SubPattern: m_SpecificCmp(MatchPred: ICmpInst::ICMP_UGT,
4000 L: m_ExtractValue<0>(V: m_Deferred(V: WOV)),
4001 R: m_APInt(Res&: C2))))) &&
4002 !C1->isZero()) {
4003 Constant *NewC = ConstantInt::get(Ty: X->getType(), V: C2->udiv(RHS: *C1));
4004 return Builder.CreateICmp(P: ICmpInst::ICMP_UGT, LHS: X, RHS: NewC);
4005 }
4006 return nullptr;
4007}
4008
4009/// Fold select(X >s 0, 0, -X) | smax(X, 0) --> abs(X)
4010/// select(X <s 0, -X, 0) | smax(X, 0) --> abs(X)
4011static Value *FoldOrOfSelectSmaxToAbs(BinaryOperator &I,
4012 InstCombiner::BuilderTy &Builder) {
4013 Value *X;
4014 Value *Sel;
4015 if (match(V: &I,
4016 P: m_c_Or(L: m_Value(V&: Sel), R: m_OneUse(SubPattern: m_SMax(L: m_Value(V&: X), R: m_ZeroInt()))))) {
4017 auto NegX = m_Neg(V: m_Specific(V: X));
4018 if (match(V: Sel, P: m_Select(C: m_SpecificICmp(MatchPred: ICmpInst::ICMP_SGT, L: m_Specific(V: X),
4019 R: m_ZeroInt()),
4020 L: m_ZeroInt(), R: NegX)) ||
4021 match(V: Sel, P: m_Select(C: m_SpecificICmp(MatchPred: ICmpInst::ICMP_SLT, L: m_Specific(V: X),
4022 R: m_ZeroInt()),
4023 L: NegX, R: m_ZeroInt())))
4024 return Builder.CreateBinaryIntrinsic(ID: Intrinsic::abs, LHS: X,
4025 RHS: Builder.getFalse());
4026 }
4027 return nullptr;
4028}
4029
4030Instruction *InstCombinerImpl::FoldOrOfLogicalAnds(Value *Op0, Value *Op1) {
4031 Value *C, *A, *B;
4032 // (C && A) || (!C && B)
4033 // (C && A) || (B && !C)
4034 // (A && C) || (!C && B)
4035 // (A && C) || (B && !C) (may require freeze)
4036 //
4037 // => select C, A, B
4038 if (match(V: Op1, P: m_c_LogicalAnd(L: m_Not(V: m_Value(V&: C)), R: m_Value(V&: B))) &&
4039 match(V: Op0, P: m_c_LogicalAnd(L: m_Specific(V: C), R: m_Value(V&: A)))) {
4040 auto *SelOp0 = dyn_cast<SelectInst>(Val: Op0);
4041 auto *SelOp1 = dyn_cast<SelectInst>(Val: Op1);
4042
4043 bool MayNeedFreeze = SelOp0 && SelOp1 &&
4044 match(V: SelOp1->getTrueValue(),
4045 P: m_Not(V: m_Specific(V: SelOp0->getTrueValue())));
4046 if (MayNeedFreeze)
4047 C = Builder.CreateFreeze(V: C);
4048 if (!ProfcheckDisableMetadataFixes) {
4049 Value *C2 = nullptr, *A2 = nullptr, *B2 = nullptr;
4050 if (match(V: Op0, P: m_LogicalAnd(L: m_Specific(V: C), R: m_Value(V&: A2))) && SelOp0) {
4051 return SelectInst::Create(C, S1: A, S2: B, NameStr: "", InsertBefore: nullptr, MDFrom: SelOp0);
4052 } else if (match(V: Op1, P: m_LogicalAnd(L: m_Not(V: m_Value(V&: C2)), R: m_Value(V&: B2))) &&
4053 SelOp1) {
4054 SelectInst *NewSI = SelectInst::Create(C, S1: A, S2: B, NameStr: "", InsertBefore: nullptr, MDFrom: SelOp1);
4055 NewSI->swapProfMetadata();
4056 return NewSI;
4057 } else {
4058 return createSelectInstWithUnknownProfile(C, S1: A, S2: B);
4059 }
4060 }
4061 return SelectInst::Create(C, S1: A, S2: B);
4062 }
4063
4064 // (!C && A) || (C && B)
4065 // (A && !C) || (C && B)
4066 // (!C && A) || (B && C)
4067 // (A && !C) || (B && C) (may require freeze)
4068 //
4069 // => select C, B, A
4070 if (match(V: Op0, P: m_c_LogicalAnd(L: m_Not(V: m_Value(V&: C)), R: m_Value(V&: A))) &&
4071 match(V: Op1, P: m_c_LogicalAnd(L: m_Specific(V: C), R: m_Value(V&: B)))) {
4072 auto *SelOp0 = dyn_cast<SelectInst>(Val: Op0);
4073 auto *SelOp1 = dyn_cast<SelectInst>(Val: Op1);
4074 bool MayNeedFreeze = SelOp0 && SelOp1 &&
4075 match(V: SelOp0->getTrueValue(),
4076 P: m_Not(V: m_Specific(V: SelOp1->getTrueValue())));
4077 if (MayNeedFreeze)
4078 C = Builder.CreateFreeze(V: C);
4079 if (!ProfcheckDisableMetadataFixes) {
4080 Value *C2 = nullptr, *A2 = nullptr, *B2 = nullptr;
4081 if (match(V: Op0, P: m_LogicalAnd(L: m_Not(V: m_Value(V&: C2)), R: m_Value(V&: A2))) && SelOp0) {
4082 SelectInst *NewSI = SelectInst::Create(C, S1: B, S2: A, NameStr: "", InsertBefore: nullptr, MDFrom: SelOp0);
4083 NewSI->swapProfMetadata();
4084 return NewSI;
4085 } else if (match(V: Op1, P: m_LogicalAnd(L: m_Specific(V: C), R: m_Value(V&: B2))) &&
4086 SelOp1) {
4087 return SelectInst::Create(C, S1: B, S2: A, NameStr: "", InsertBefore: nullptr, MDFrom: SelOp1);
4088 } else {
4089 return createSelectInstWithUnknownProfile(C, S1: B, S2: A);
4090 }
4091 }
4092 return SelectInst::Create(C, S1: B, S2: A);
4093 }
4094
4095 return nullptr;
4096}
4097
4098// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4099// here. We should standardize that construct where it is needed or choose some
4100// other way to ensure that commutated variants of patterns are not missed.
4101Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
4102 if (Value *V = simplifyOrInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
4103 Q: SQ.getWithInstruction(I: &I)))
4104 return replaceInstUsesWith(I, V);
4105
4106 if (SimplifyAssociativeOrCommutative(I))
4107 return &I;
4108
4109 if (Instruction *X = foldVectorBinop(Inst&: I))
4110 return X;
4111
4112 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
4113 return Phi;
4114
4115 // See if we can simplify any instructions used by the instruction whose sole
4116 // purpose is to compute bits we don't care about.
4117 if (SimplifyDemandedInstructionBits(Inst&: I))
4118 return &I;
4119
4120 // Do this before using distributive laws to catch simple and/or/not patterns.
4121 if (Instruction *Xor = foldOrToXor(I, Builder))
4122 return Xor;
4123
4124 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
4125 return X;
4126
4127 if (Instruction *X = foldIntegerPackFromVector(I, Builder, DL))
4128 return X;
4129
4130 // (A & B) | (C & D) -> A ^ D where A == ~C && B == ~D
4131 // (A & B) | (C & D) -> A ^ C where A == ~D && B == ~C
4132 if (Value *V = foldOrOfInversions(I, Builder))
4133 return replaceInstUsesWith(I, V);
4134
4135 // (A&B)|(A&C) -> A&(B|C) etc
4136 if (Value *V = foldUsingDistributiveLaws(I))
4137 return replaceInstUsesWith(I, V);
4138
4139 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
4140 Type *Ty = I.getType();
4141 if (Ty->isIntOrIntVectorTy(BitWidth: 1)) {
4142 if (auto *SI0 = dyn_cast<SelectInst>(Val: Op0)) {
4143 if (auto *R =
4144 foldAndOrOfSelectUsingImpliedCond(Op: Op1, SI&: *SI0, /* IsAnd */ false))
4145 return R;
4146 }
4147 if (auto *SI1 = dyn_cast<SelectInst>(Val: Op1)) {
4148 if (auto *R =
4149 foldAndOrOfSelectUsingImpliedCond(Op: Op0, SI&: *SI1, /* IsAnd */ false))
4150 return R;
4151 }
4152 }
4153
4154 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
4155 return FoldedLogic;
4156
4157 if (Instruction *FoldedLogic = foldBinOpSelectBinOp(Op&: I))
4158 return FoldedLogic;
4159
4160 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true,
4161 /*MatchBitReversals*/ true))
4162 return BitOp;
4163
4164 if (Instruction *Funnel = matchFunnelShift(Or&: I, IC&: *this))
4165 return Funnel;
4166
4167 if (Value *Concat = matchOrConcat(Or&: I, Builder))
4168 return replaceInstUsesWith(I, V: Concat);
4169
4170 if (Instruction *R = foldBinOpShiftWithShift(I))
4171 return R;
4172
4173 if (Instruction *R = tryFoldInstWithCtpopWithNot(I: &I))
4174 return R;
4175
4176 if (cast<PossiblyDisjointInst>(Val&: I).isDisjoint()) {
4177 if (Instruction *R =
4178 foldAddLikeCommutative(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
4179 /*NSW=*/true, /*NUW=*/true))
4180 return R;
4181 if (Instruction *R =
4182 foldAddLikeCommutative(LHS: I.getOperand(i_nocapture: 1), RHS: I.getOperand(i_nocapture: 0),
4183 /*NSW=*/true, /*NUW=*/true))
4184 return R;
4185
4186 if (Value *Res = foldDisjointOr(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1)))
4187 return replaceInstUsesWith(I, V: Res);
4188
4189 if (Value *Res = reassociateDisjointOr(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1)))
4190 return replaceInstUsesWith(I, V: Res);
4191 }
4192
4193 Value *X, *Y;
4194 const APInt *CV;
4195 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: CV))), R: m_Value(V&: Y))) &&
4196 !CV->isAllOnes() && MaskedValueIsZero(V: Y, Mask: *CV, CxtI: &I)) {
4197 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
4198 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
4199 Value *Or = Builder.CreateOr(LHS: X, RHS: Y);
4200 return BinaryOperator::CreateXor(V1: Or, V2: ConstantInt::get(Ty, V: *CV));
4201 }
4202
4203 // If the operands have no common bits set:
4204 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
4205 if (match(V: &I, P: m_c_DisjointOr(L: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: X), R: m_Value(V&: Y))),
4206 R: m_Deferred(V: X)))) {
4207 Value *IncrementY = Builder.CreateAdd(LHS: Y, RHS: ConstantInt::get(Ty, V: 1));
4208 return BinaryOperator::CreateMul(V1: X, V2: IncrementY);
4209 }
4210
4211 // (C && A) || (C && B) => select C, A, B (and similar cases)
4212 //
4213 // Note: This is the same transformation used in `foldSelectOfBools`,
4214 // except that it's an `or` instead of `select`.
4215 if (I.getType()->isIntOrIntVectorTy(BitWidth: 1) &&
4216 (Op0->hasOneUse() || Op1->hasOneUse())) {
4217 if (Instruction *V = FoldOrOfLogicalAnds(Op0, Op1)) {
4218 return V;
4219 }
4220 }
4221
4222 // (A & C) | (B & D)
4223 Value *A, *B, *C, *D;
4224 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: C))) &&
4225 match(V: Op1, P: m_And(L: m_Value(V&: B), R: m_Value(V&: D)))) {
4226
4227 // (A & C0) | (B & C1)
4228 const APInt *C0, *C1;
4229 if (match(V: C, P: m_APInt(Res&: C0)) && match(V: D, P: m_APInt(Res&: C1))) {
4230 Value *X;
4231 if (*C0 == ~*C1) {
4232 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
4233 if (match(V: A, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: B))))
4234 return BinaryOperator::CreateOr(V1: Builder.CreateAnd(LHS: X, RHS: *C0), V2: B);
4235 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
4236 if (match(V: B, P: m_c_Or(L: m_Specific(V: A), R: m_Value(V&: X))))
4237 return BinaryOperator::CreateOr(V1: Builder.CreateAnd(LHS: X, RHS: *C1), V2: A);
4238
4239 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
4240 if (match(V: A, P: m_c_Xor(L: m_Value(V&: X), R: m_Specific(V: B))))
4241 return BinaryOperator::CreateXor(V1: Builder.CreateAnd(LHS: X, RHS: *C0), V2: B);
4242 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
4243 if (match(V: B, P: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: X))))
4244 return BinaryOperator::CreateXor(V1: Builder.CreateAnd(LHS: X, RHS: *C1), V2: A);
4245 }
4246
4247 if ((*C0 & *C1).isZero()) {
4248 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
4249 // iff (C0 & C1) == 0 and (X & ~C0) == 0
4250 if (match(V: A, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: B))) &&
4251 MaskedValueIsZero(V: X, Mask: ~*C0, CxtI: &I)) {
4252 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
4253 return BinaryOperator::CreateAnd(V1: A, V2: C01);
4254 }
4255 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
4256 // iff (C0 & C1) == 0 and (X & ~C1) == 0
4257 if (match(V: B, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: A))) &&
4258 MaskedValueIsZero(V: X, Mask: ~*C1, CxtI: &I)) {
4259 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
4260 return BinaryOperator::CreateAnd(V1: B, V2: C01);
4261 }
4262 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
4263 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
4264 const APInt *C2, *C3;
4265 if (match(V: A, P: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: C2))) &&
4266 match(V: B, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C3))) &&
4267 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) {
4268 Value *Or = Builder.CreateOr(LHS: X, RHS: *C2 | *C3, Name: "bitfield");
4269 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
4270 return BinaryOperator::CreateAnd(V1: Or, V2: C01);
4271 }
4272 }
4273 }
4274
4275 // Don't try to form a select if it's unlikely that we'll get rid of at
4276 // least one of the operands. A select is generally more expensive than the
4277 // 'or' that it is replacing.
4278 if (Op0->hasOneUse() || Op1->hasOneUse()) {
4279 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
4280 if (Value *V = matchSelectFromAndOr(A, B: C, C: B, D))
4281 return replaceInstUsesWith(I, V);
4282 if (Value *V = matchSelectFromAndOr(A, B: C, C: D, D: B))
4283 return replaceInstUsesWith(I, V);
4284 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: B, D))
4285 return replaceInstUsesWith(I, V);
4286 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: D, D: B))
4287 return replaceInstUsesWith(I, V);
4288 if (Value *V = matchSelectFromAndOr(A: B, B: D, C: A, D: C))
4289 return replaceInstUsesWith(I, V);
4290 if (Value *V = matchSelectFromAndOr(A: B, B: D, C, D: A))
4291 return replaceInstUsesWith(I, V);
4292 if (Value *V = matchSelectFromAndOr(A: D, B, C: A, D: C))
4293 return replaceInstUsesWith(I, V);
4294 if (Value *V = matchSelectFromAndOr(A: D, B, C, D: A))
4295 return replaceInstUsesWith(I, V);
4296 }
4297 }
4298
4299 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: C))) &&
4300 match(V: Op1, P: m_Not(V: m_Or(L: m_Value(V&: B), R: m_Value(V&: D)))) &&
4301 (Op0->hasOneUse() || Op1->hasOneUse())) {
4302 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
4303 if (Value *V = matchSelectFromAndOr(A, B: C, C: B, D, InvertFalseVal: true))
4304 return replaceInstUsesWith(I, V);
4305 if (Value *V = matchSelectFromAndOr(A, B: C, C: D, D: B, InvertFalseVal: true))
4306 return replaceInstUsesWith(I, V);
4307 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: B, D, InvertFalseVal: true))
4308 return replaceInstUsesWith(I, V);
4309 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: D, D: B, InvertFalseVal: true))
4310 return replaceInstUsesWith(I, V);
4311 }
4312
4313 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
4314 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))))
4315 if (match(V: Op1,
4316 P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A))) ||
4317 match(V: Op1, P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: C)), R: m_Specific(V: B))))
4318 return BinaryOperator::CreateOr(V1: Op0, V2: C);
4319
4320 // ((B ^ C) ^ A) | (A ^ B) -> (A ^ B) | C
4321 if (match(V: Op1, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))))
4322 if (match(V: Op0,
4323 P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A))) ||
4324 match(V: Op0, P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: C)), R: m_Specific(V: B))))
4325 return BinaryOperator::CreateOr(V1: Op1, V2: C);
4326
4327 if (Instruction *DeMorgan = matchDeMorgansLaws(I, IC&: *this))
4328 return DeMorgan;
4329
4330 // Canonicalize xor to the RHS.
4331 bool SwappedForXor = false;
4332 if (match(V: Op0, P: m_Xor(L: m_Value(), R: m_Value()))) {
4333 std::swap(a&: Op0, b&: Op1);
4334 SwappedForXor = true;
4335 }
4336
4337 if (match(V: Op1, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B)))) {
4338 // (A | ?) | (A ^ B) --> (A | ?) | B
4339 // (B | ?) | (A ^ B) --> (B | ?) | A
4340 if (match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Value())))
4341 return BinaryOperator::CreateOr(V1: Op0, V2: B);
4342 if (match(V: Op0, P: m_c_Or(L: m_Specific(V: B), R: m_Value())))
4343 return BinaryOperator::CreateOr(V1: Op0, V2: A);
4344
4345 // (A & B) | (A ^ B) --> A | B
4346 // (B & A) | (A ^ B) --> A | B
4347 if (match(V: Op0, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
4348 return BinaryOperator::CreateOr(V1: A, V2: B);
4349
4350 // ~A | (A ^ B) --> ~(A & B)
4351 // ~B | (A ^ B) --> ~(A & B)
4352 // The swap above should always make Op0 the 'not'.
4353 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
4354 (match(V: Op0, P: m_Not(V: m_Specific(V: A))) || match(V: Op0, P: m_Not(V: m_Specific(V: B)))))
4355 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
4356
4357 // Same as above, but peek through an 'and' to the common operand:
4358 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
4359 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
4360 Instruction *And;
4361 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
4362 match(V: Op0,
4363 P: m_Not(V: m_Instruction(I&: And, Match: m_c_And(L: m_Specific(V: A), R: m_Value())))))
4364 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: And, RHS: B));
4365 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
4366 match(V: Op0,
4367 P: m_Not(V: m_Instruction(I&: And, Match: m_c_And(L: m_Specific(V: B), R: m_Value())))))
4368 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: And, RHS: A));
4369
4370 // (~A | C) | (A ^ B) --> ~(A & B) | C
4371 // (~B | C) | (A ^ B) --> ~(A & B) | C
4372 if (Op0->hasOneUse() && Op1->hasOneUse() &&
4373 (match(V: Op0, P: m_c_Or(L: m_Not(V: m_Specific(V: A)), R: m_Value(V&: C))) ||
4374 match(V: Op0, P: m_c_Or(L: m_Not(V: m_Specific(V: B)), R: m_Value(V&: C))))) {
4375 Value *Nand = Builder.CreateNot(V: Builder.CreateAnd(LHS: A, RHS: B), Name: "nand");
4376 return BinaryOperator::CreateOr(V1: Nand, V2: C);
4377 }
4378 }
4379
4380 if (SwappedForXor)
4381 std::swap(a&: Op0, b&: Op1);
4382
4383 if (Value *Res =
4384 foldBooleanAndOr(LHS: Op0, RHS: Op1, I, /*IsAnd=*/false, /*IsLogical=*/false))
4385 return replaceInstUsesWith(I, V: Res);
4386
4387 if (match(V: Op1, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
4388 bool IsLogical = isa<SelectInst>(Val: Op1);
4389 if (auto *V = reassociateBooleanAndOr(LHS: Op0, X, Y, I, /*IsAnd=*/false,
4390 /*RHSIsLogical=*/IsLogical))
4391 return replaceInstUsesWith(I, V);
4392 }
4393 if (match(V: Op0, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
4394 bool IsLogical = isa<SelectInst>(Val: Op0);
4395 if (auto *V = reassociateBooleanAndOr(LHS: Op1, X, Y, I, /*IsAnd=*/false,
4396 /*RHSIsLogical=*/IsLogical))
4397 return replaceInstUsesWith(I, V);
4398 }
4399
4400 if (Instruction *FoldedFCmps = reassociateFCmps(BO&: I, Builder))
4401 return FoldedFCmps;
4402
4403 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
4404 return CastedOr;
4405
4406 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
4407 return Sel;
4408
4409 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
4410 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
4411 // with binop identity constant. But creating a select with non-constant
4412 // arm may not be reversible due to poison semantics. Is that a good
4413 // canonicalization?
4414 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
4415 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
4416 return createSelectInstWithUnknownProfile(
4417 C: A, S1: ConstantInt::getAllOnesValue(Ty), S2: B);
4418
4419 // Note: If we've gotten to the point of visiting the outer OR, then the
4420 // inner one couldn't be simplified. If it was a constant, then it won't
4421 // be simplified by a later pass either, so we try swapping the inner/outer
4422 // ORs in the hopes that we'll be able to simplify it this way.
4423 // (X|C) | V --> (X|V) | C
4424 // Pass the disjoint flag in the following two patterns:
4425 // 1. or-disjoint (or-disjoint X, C), V -->
4426 // or-disjoint (or-disjoint X, V), C
4427 //
4428 // 2. or-disjoint (or X, C), V -->
4429 // or (or-disjoint X, V), C
4430 ConstantInt *CI;
4431 if (Op0->hasOneUse() && !match(V: Op1, P: m_ConstantInt()) &&
4432 match(V: Op0, P: m_Or(L: m_Value(V&: A), R: m_ConstantInt(CI)))) {
4433 bool IsDisjointOuter = cast<PossiblyDisjointInst>(Val&: I).isDisjoint();
4434 bool IsDisjointInner = cast<PossiblyDisjointInst>(Val: Op0)->isDisjoint();
4435 Value *Inner = Builder.CreateOr(LHS: A, RHS: Op1);
4436 cast<PossiblyDisjointInst>(Val: Inner)->setIsDisjoint(IsDisjointOuter);
4437 Inner->takeName(V: Op0);
4438 return IsDisjointOuter && IsDisjointInner
4439 ? BinaryOperator::CreateDisjointOr(V1: Inner, V2: CI)
4440 : BinaryOperator::CreateOr(V1: Inner, V2: CI);
4441 }
4442
4443 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
4444 // Since this OR statement hasn't been optimized further yet, we hope
4445 // that this transformation will allow the new ORs to be optimized.
4446 {
4447 Value *X = nullptr, *Y = nullptr;
4448 if (Op0->hasOneUse() && Op1->hasOneUse() &&
4449 match(V: Op0, P: m_Select(C: m_Value(V&: X), L: m_Value(V&: A), R: m_Value(V&: B))) &&
4450 match(V: Op1, P: m_Select(C: m_Value(V&: Y), L: m_Value(V&: C), R: m_Value(V&: D))) && X == Y) {
4451 Value *orTrue = Builder.CreateOr(LHS: A, RHS: C);
4452 Value *orFalse = Builder.CreateOr(LHS: B, RHS: D);
4453 return SelectInst::Create(C: X, S1: orTrue, S2: orFalse);
4454 }
4455 }
4456
4457 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X.
4458 {
4459 Value *X, *Y;
4460 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_AShr(
4461 L: m_NSWSub(L: m_Value(V&: Y), R: m_Value(V&: X)),
4462 R: m_SpecificInt(V: Ty->getScalarSizeInBits() - 1))),
4463 R: m_Deferred(V: X)))) {
4464 Value *NewICmpInst = Builder.CreateICmpSGT(LHS: X, RHS: Y);
4465 Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
4466 return createSelectInstWithUnknownProfile(C: NewICmpInst, S1: AllOnes, S2: X);
4467 }
4468 }
4469
4470 {
4471 // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
4472 // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
4473 // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
4474 // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
4475 const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
4476 if (match(V: Lhs, P: m_c_Xor(L: m_And(L: m_Value(V&: A), R: m_Value(V&: B)), R: m_Deferred(V: A))) &&
4477 match(V: Rhs,
4478 P: m_c_Xor(L: m_And(L: m_Specific(V: A), R: m_Specific(V: B)), R: m_Specific(V: B)))) {
4479 return BinaryOperator::CreateXor(V1: A, V2: B);
4480 }
4481 return nullptr;
4482 };
4483
4484 if (Instruction *Result = TryXorOpt(Op0, Op1))
4485 return Result;
4486 if (Instruction *Result = TryXorOpt(Op1, Op0))
4487 return Result;
4488 }
4489
4490 if (Instruction *V =
4491 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
4492 return V;
4493
4494 CmpPredicate Pred;
4495 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv;
4496 // Check if the OR weakens the overflow condition for umul.with.overflow by
4497 // treating any non-zero result as overflow. In that case, we overflow if both
4498 // umul.with.overflow operands are != 0, as in that case the result can only
4499 // be 0, iff the multiplication overflows.
4500 if (match(V: &I, P: m_c_Or(L: m_Value(V&: Ov, Match: m_ExtractValue<1>(V: m_Value(V&: UMulWithOv))),
4501 R: m_Value(V&: MulIsNotZero,
4502 Match: m_SpecificICmp(
4503 MatchPred: ICmpInst::ICMP_NE,
4504 L: m_Value(V&: Mul, Match: m_ExtractValue<0>(
4505 V: m_Deferred(V: UMulWithOv))),
4506 R: m_ZeroInt())))) &&
4507 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse()))) {
4508 Value *A, *B;
4509 if (match(V: UMulWithOv, P: m_Intrinsic<Intrinsic::umul_with_overflow>(
4510 Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) {
4511 Value *NotNullA = Builder.CreateIsNotNull(Arg: A);
4512 Value *NotNullB = Builder.CreateIsNotNull(Arg: B);
4513 return BinaryOperator::CreateAnd(V1: NotNullA, V2: NotNullB);
4514 }
4515 }
4516
4517 /// Res, Overflow = xxx_with_overflow X, C1
4518 /// Try to canonicalize the pattern "Overflow | icmp pred Res, C2" into
4519 /// "Overflow | icmp pred X, C2 +/- C1".
4520 const WithOverflowInst *WO;
4521 const Value *WOV;
4522 const APInt *C1, *C2;
4523 if (match(V: &I, P: m_c_Or(L: m_Value(V&: Ov, Match: m_ExtractValue<1>(
4524 V: m_Value(V&: WOV, Match: m_WithOverflowInst(I&: WO)))),
4525 R: m_OneUse(SubPattern: m_ICmp(Pred, L: m_ExtractValue<0>(V: m_Deferred(V: WOV)),
4526 R: m_APInt(Res&: C2))))) &&
4527 (WO->getBinaryOp() == Instruction::Add ||
4528 WO->getBinaryOp() == Instruction::Sub) &&
4529 (ICmpInst::isEquality(P: Pred) ||
4530 WO->isSigned() == ICmpInst::isSigned(Pred)) &&
4531 match(V: WO->getRHS(), P: m_APInt(Res&: C1))) {
4532 bool Overflow;
4533 APInt NewC = WO->getBinaryOp() == Instruction::Add
4534 ? (ICmpInst::isSigned(Pred) ? C2->ssub_ov(RHS: *C1, Overflow)
4535 : C2->usub_ov(RHS: *C1, Overflow))
4536 : (ICmpInst::isSigned(Pred) ? C2->sadd_ov(RHS: *C1, Overflow)
4537 : C2->uadd_ov(RHS: *C1, Overflow));
4538 if (!Overflow || ICmpInst::isEquality(P: Pred)) {
4539 Value *NewCmp = Builder.CreateICmp(
4540 P: Pred, LHS: WO->getLHS(), RHS: ConstantInt::get(Ty: WO->getLHS()->getType(), V: NewC));
4541 return BinaryOperator::CreateOr(V1: Ov, V2: NewCmp);
4542 }
4543 }
4544
4545 // Try to fold the pattern "Overflow | icmp pred Res, C2" into a single
4546 // comparison instruction for umul.with.overflow.
4547 if (Value *R = foldOrUnsignedUMulOverflowICmp(I, Builder, DL))
4548 return replaceInstUsesWith(I, V: R);
4549
4550 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
4551 if (sinkNotIntoOtherHandOfLogicalOp(I))
4552 return &I;
4553
4554 // Improve "get low bit mask up to and including bit X" pattern:
4555 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X)
4556 if (match(V: &I, P: m_c_Or(L: m_Add(L: m_Shl(L: m_One(), R: m_Value(V&: X)), R: m_AllOnes()),
4557 R: m_Shl(L: m_One(), R: m_Deferred(V: X)))) &&
4558 match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) {
4559 Value *Sub = Builder.CreateSub(
4560 LHS: ConstantInt::get(Ty, V: Ty->getScalarSizeInBits() - 1), RHS: X);
4561 return BinaryOperator::CreateLShr(V1: Constant::getAllOnesValue(Ty), V2: Sub);
4562 }
4563
4564 // An or recurrence w/loop invariant step is equivelent to (or start, step)
4565 PHINode *PN = nullptr;
4566 Value *Start = nullptr, *Step = nullptr;
4567 if (matchSimpleRecurrence(I: &I, P&: PN, Start, Step) && DT.dominates(Def: Step, User: PN))
4568 return replaceInstUsesWith(I, V: Builder.CreateOr(LHS: Start, RHS: Step));
4569
4570 // (A & B) | (C | D) or (C | D) | (A & B)
4571 // Can be combined if C or D is of type (A/B & X)
4572 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_And(L: m_Value(V&: A), R: m_Value(V&: B))),
4573 R: m_OneUse(SubPattern: m_Or(L: m_Value(V&: C), R: m_Value(V&: D)))))) {
4574 // (A & B) | (C | ?) -> C | (? | (A & B))
4575 // (A & B) | (C | ?) -> C | (? | (A & B))
4576 // (A & B) | (C | ?) -> C | (? | (A & B))
4577 // (A & B) | (C | ?) -> C | (? | (A & B))
4578 // (C | ?) | (A & B) -> C | (? | (A & B))
4579 // (C | ?) | (A & B) -> C | (? | (A & B))
4580 // (C | ?) | (A & B) -> C | (? | (A & B))
4581 // (C | ?) | (A & B) -> C | (? | (A & B))
4582 if (match(V: D, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: A), R: m_Value()))) ||
4583 match(V: D, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: B), R: m_Value()))))
4584 return BinaryOperator::CreateOr(
4585 V1: C, V2: Builder.CreateOr(LHS: D, RHS: Builder.CreateAnd(LHS: A, RHS: B)));
4586 // (A & B) | (? | D) -> (? | (A & B)) | D
4587 // (A & B) | (? | D) -> (? | (A & B)) | D
4588 // (A & B) | (? | D) -> (? | (A & B)) | D
4589 // (A & B) | (? | D) -> (? | (A & B)) | D
4590 // (? | D) | (A & B) -> (? | (A & B)) | D
4591 // (? | D) | (A & B) -> (? | (A & B)) | D
4592 // (? | D) | (A & B) -> (? | (A & B)) | D
4593 // (? | D) | (A & B) -> (? | (A & B)) | D
4594 if (match(V: C, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: A), R: m_Value()))) ||
4595 match(V: C, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: B), R: m_Value()))))
4596 return BinaryOperator::CreateOr(
4597 V1: Builder.CreateOr(LHS: C, RHS: Builder.CreateAnd(LHS: A, RHS: B)), V2: D);
4598 }
4599
4600 if (Instruction *R = reassociateForUses(BO&: I, Builder))
4601 return R;
4602
4603 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
4604 return Canonicalized;
4605
4606 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
4607 return Folded;
4608
4609 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
4610 return Res;
4611
4612 // If we are setting the sign bit of a floating-point value, convert
4613 // this to fneg(fabs), then cast back to integer.
4614 //
4615 // If the result isn't immediately cast back to a float, this will increase
4616 // the number of instructions. This is still probably a better canonical form
4617 // as it enables FP value tracking.
4618 //
4619 // Assumes any IEEE-represented type has the sign bit in the high bit.
4620 //
4621 // This is generous interpretation of noimplicitfloat, this is not a true
4622 // floating-point operation.
4623 Value *CastOp;
4624 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
4625 match(V: Op1, P: m_SignMask()) &&
4626 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4627 Kind: Attribute::NoImplicitFloat)) {
4628 Type *EltTy = CastOp->getType()->getScalarType();
4629 if (EltTy->isFloatingPointTy() &&
4630 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
4631 Value *FAbs = Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: CastOp);
4632 Value *FNegFAbs = Builder.CreateFNeg(V: FAbs);
4633 return new BitCastInst(FNegFAbs, I.getType());
4634 }
4635 }
4636
4637 // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
4638 if (match(V: Op0, P: m_OneUse(SubPattern: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) &&
4639 match(V: Op1, P: m_APInt(Res&: C2))) {
4640 KnownBits KnownX = computeKnownBits(V: X, CxtI: &I);
4641 if ((KnownX.One & *C2) == *C2)
4642 return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: *C1 | *C2));
4643 }
4644
4645 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
4646 return Res;
4647
4648 if (Value *V =
4649 simplifyAndOrWithOpReplaced(V: Op0, Op: Op1, RepOp: Constant::getNullValue(Ty),
4650 /*SimplifyOnly*/ false, IC&: *this))
4651 return BinaryOperator::CreateOr(V1: V, V2: Op1);
4652 if (Value *V =
4653 simplifyAndOrWithOpReplaced(V: Op1, Op: Op0, RepOp: Constant::getNullValue(Ty),
4654 /*SimplifyOnly*/ false, IC&: *this))
4655 return BinaryOperator::CreateOr(V1: Op0, V2: V);
4656
4657 if (cast<PossiblyDisjointInst>(Val&: I).isDisjoint())
4658 if (Value *V = SimplifyAddWithRemainder(I))
4659 return replaceInstUsesWith(I, V);
4660
4661 if (Value *Res = FoldOrOfSelectSmaxToAbs(I, Builder))
4662 return replaceInstUsesWith(I, V: Res);
4663
4664 return nullptr;
4665}
4666
4667/// A ^ B can be specified using other logic ops in a variety of patterns. We
4668/// can fold these early and efficiently by morphing an existing instruction.
4669static Instruction *foldXorToXor(BinaryOperator &I,
4670 InstCombiner::BuilderTy &Builder) {
4671 assert(I.getOpcode() == Instruction::Xor);
4672 Value *Op0 = I.getOperand(i_nocapture: 0);
4673 Value *Op1 = I.getOperand(i_nocapture: 1);
4674 Value *A, *B;
4675
4676 // There are 4 commuted variants for each of the basic patterns.
4677
4678 // (A & B) ^ (A | B) -> A ^ B
4679 // (A & B) ^ (B | A) -> A ^ B
4680 // (A | B) ^ (A & B) -> A ^ B
4681 // (A | B) ^ (B & A) -> A ^ B
4682 if (match(V: &I, P: m_c_Xor(L: m_And(L: m_Value(V&: A), R: m_Value(V&: B)),
4683 R: m_c_Or(L: m_Deferred(V: A), R: m_Deferred(V: B)))))
4684 return BinaryOperator::CreateXor(V1: A, V2: B);
4685
4686 // (A | ~B) ^ (~A | B) -> A ^ B
4687 // (~B | A) ^ (~A | B) -> A ^ B
4688 // (~A | B) ^ (A | ~B) -> A ^ B
4689 // (B | ~A) ^ (A | ~B) -> A ^ B
4690 if (match(V: &I, P: m_Xor(L: m_c_Or(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
4691 R: m_c_Or(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
4692 return BinaryOperator::CreateXor(V1: A, V2: B);
4693
4694 // (A & ~B) ^ (~A & B) -> A ^ B
4695 // (~B & A) ^ (~A & B) -> A ^ B
4696 // (~A & B) ^ (A & ~B) -> A ^ B
4697 // (B & ~A) ^ (A & ~B) -> A ^ B
4698 if (match(V: &I, P: m_Xor(L: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
4699 R: m_c_And(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
4700 return BinaryOperator::CreateXor(V1: A, V2: B);
4701
4702 // For the remaining cases we need to get rid of one of the operands.
4703 if (!Op0->hasOneUse() && !Op1->hasOneUse())
4704 return nullptr;
4705
4706 // (A | B) ^ ~(A & B) -> ~(A ^ B)
4707 // (A | B) ^ ~(B & A) -> ~(A ^ B)
4708 // (A & B) ^ ~(A | B) -> ~(A ^ B)
4709 // (A & B) ^ ~(B | A) -> ~(A ^ B)
4710 // Complexity sorting ensures the not will be on the right side.
4711 if ((match(V: Op0, P: m_Or(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4712 match(V: Op1, P: m_Not(V: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))) ||
4713 (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4714 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))))
4715 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
4716
4717 return nullptr;
4718}
4719
4720Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
4721 BinaryOperator &I) {
4722 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
4723 I.getOperand(1) == RHS && "Should be 'xor' with these operands");
4724
4725 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
4726 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
4727 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
4728
4729 if (predicatesFoldable(P1: PredL, P2: PredR)) {
4730 if (LHS0 == RHS1 && LHS1 == RHS0) {
4731 std::swap(a&: LHS0, b&: LHS1);
4732 PredL = ICmpInst::getSwappedPredicate(pred: PredL);
4733 }
4734 if (LHS0 == RHS0 && LHS1 == RHS1) {
4735 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4736 unsigned Code = getICmpCode(Pred: PredL) ^ getICmpCode(Pred: PredR);
4737 bool IsSigned = LHS->isSigned() || RHS->isSigned();
4738 return getNewICmpValue(Code, Sign: IsSigned, LHS: LHS0, RHS: LHS1, Builder);
4739 }
4740 }
4741
4742 const APInt *LC, *RC;
4743 if (match(V: LHS1, P: m_APInt(Res&: LC)) && match(V: RHS1, P: m_APInt(Res&: RC)) &&
4744 LHS0->getType() == RHS0->getType() &&
4745 LHS0->getType()->isIntOrIntVectorTy()) {
4746 // Convert xor of signbit tests to signbit test of xor'd values:
4747 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
4748 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
4749 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
4750 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
4751 bool TrueIfSignedL, TrueIfSignedR;
4752 if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
4753 isSignBitCheck(Pred: PredL, RHS: *LC, TrueIfSigned&: TrueIfSignedL) &&
4754 isSignBitCheck(Pred: PredR, RHS: *RC, TrueIfSigned&: TrueIfSignedR)) {
4755 Value *XorLR = Builder.CreateXor(LHS: LHS0, RHS: RHS0);
4756 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(Arg: XorLR) :
4757 Builder.CreateIsNotNeg(Arg: XorLR);
4758 }
4759
4760 // Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
4761 // into a single comparison using range-based reasoning.
4762 if (LHS0 == RHS0) {
4763 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(Pred: PredL, Other: *LC);
4764 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(Pred: PredR, Other: *RC);
4765 auto CRUnion = CR1.exactUnionWith(CR: CR2);
4766 auto CRIntersect = CR1.exactIntersectWith(CR: CR2);
4767 if (CRUnion && CRIntersect)
4768 if (auto CR = CRUnion->exactIntersectWith(CR: CRIntersect->inverse())) {
4769 if (CR->isFullSet())
4770 return ConstantInt::getTrue(Ty: I.getType());
4771 if (CR->isEmptySet())
4772 return ConstantInt::getFalse(Ty: I.getType());
4773
4774 CmpInst::Predicate NewPred;
4775 APInt NewC, Offset;
4776 CR->getEquivalentICmp(Pred&: NewPred, RHS&: NewC, Offset);
4777
4778 if ((Offset.isZero() && (LHS->hasOneUse() || RHS->hasOneUse())) ||
4779 (LHS->hasOneUse() && RHS->hasOneUse())) {
4780 Value *NewV = LHS0;
4781 Type *Ty = LHS0->getType();
4782 if (!Offset.isZero())
4783 NewV = Builder.CreateAdd(LHS: NewV, RHS: ConstantInt::get(Ty, V: Offset));
4784 return Builder.CreateICmp(P: NewPred, LHS: NewV,
4785 RHS: ConstantInt::get(Ty, V: NewC));
4786 }
4787 }
4788 }
4789
4790 // Fold (icmp eq/ne (X & Pow2), 0) ^ (icmp eq/ne (Y & Pow2), 0) into
4791 // (icmp eq/ne ((X ^ Y) & Pow2), 0)
4792 Value *X, *Y, *Pow2;
4793 if (ICmpInst::isEquality(P: PredL) && ICmpInst::isEquality(P: PredR) &&
4794 LC->isZero() && RC->isZero() && LHS->hasOneUse() && RHS->hasOneUse() &&
4795 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Pow2))) &&
4796 match(V: RHS0, P: m_And(L: m_Value(V&: Y), R: m_Specific(V: Pow2))) &&
4797 isKnownToBeAPowerOfTwo(V: Pow2, /*OrZero=*/true, CxtI: &I)) {
4798 Value *Xor = Builder.CreateXor(LHS: X, RHS: Y);
4799 Value *And = Builder.CreateAnd(LHS: Xor, RHS: Pow2);
4800 return Builder.CreateICmp(P: PredL == PredR ? ICmpInst::ICMP_NE
4801 : ICmpInst::ICMP_EQ,
4802 LHS: And, RHS: ConstantInt::getNullValue(Ty: Xor->getType()));
4803 }
4804 }
4805
4806 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
4807 // into those logic ops. That is, try to turn this into an and-of-icmps
4808 // because we have many folds for that pattern.
4809 //
4810 // This is based on a truth table definition of xor:
4811 // X ^ Y --> (X | Y) & !(X & Y)
4812 if (Value *OrICmp = simplifyBinOp(Opcode: Instruction::Or, LHS, RHS, Q: SQ)) {
4813 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
4814 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
4815 if (Value *AndICmp = simplifyBinOp(Opcode: Instruction::And, LHS, RHS, Q: SQ)) {
4816 // TODO: Independently handle cases where the 'and' side is a constant.
4817 ICmpInst *X = nullptr, *Y = nullptr;
4818 if (OrICmp == LHS && AndICmp == RHS) {
4819 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y
4820 X = LHS;
4821 Y = RHS;
4822 }
4823 if (OrICmp == RHS && AndICmp == LHS) {
4824 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X
4825 X = RHS;
4826 Y = LHS;
4827 }
4828 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(V: Y, IgnoredUser: &I))) {
4829 // Invert the predicate of 'Y', thus inverting its output.
4830 Y->setPredicate(Y->getInversePredicate());
4831 // So, are there other uses of Y?
4832 if (!Y->hasOneUse()) {
4833 // We need to adapt other uses of Y though. Get a value that matches
4834 // the original value of Y before inversion. While this increases
4835 // immediate instruction count, we have just ensured that all the
4836 // users are freely-invertible, so that 'not' *will* get folded away.
4837 BuilderTy::InsertPointGuard Guard(Builder);
4838 // Set insertion point to right after the Y.
4839 Builder.SetInsertPoint(TheBB: Y->getParent(), IP: ++(Y->getIterator()));
4840 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4841 // Replace all uses of Y (excluding the one in NotY!) with NotY.
4842 Worklist.pushUsersToWorkList(I&: *Y);
4843 Y->replaceUsesWithIf(New: NotY,
4844 ShouldReplace: [NotY](Use &U) { return U.getUser() != NotY; });
4845 }
4846 // All done.
4847 return Builder.CreateAnd(LHS, RHS);
4848 }
4849 }
4850 }
4851
4852 return nullptr;
4853}
4854
4855/// If we have a masked merge, in the canonical form of:
4856/// (assuming that A only has one use.)
4857/// | A | |B|
4858/// ((x ^ y) & M) ^ y
4859/// | D |
4860/// * If M is inverted:
4861/// | D |
4862/// ((x ^ y) & ~M) ^ y
4863/// We can canonicalize by swapping the final xor operand
4864/// to eliminate the 'not' of the mask.
4865/// ((x ^ y) & M) ^ x
4866/// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
4867/// because that shortens the dependency chain and improves analysis:
4868/// (x & M) | (y & ~M)
4869static Instruction *visitMaskedMerge(BinaryOperator &I,
4870 InstCombiner::BuilderTy &Builder) {
4871 Value *B, *X, *D;
4872 Value *M;
4873 if (!match(V: &I, P: m_c_Xor(L: m_Value(V&: B),
4874 R: m_OneUse(SubPattern: m_c_And(
4875 L: m_Value(V&: D, Match: m_c_Xor(L: m_Deferred(V: B), R: m_Value(V&: X))),
4876 R: m_Value(V&: M))))))
4877 return nullptr;
4878
4879 Value *NotM;
4880 if (match(V: M, P: m_Not(V: m_Value(V&: NotM)))) {
4881 // De-invert the mask and swap the value in B part.
4882 Value *NewA = Builder.CreateAnd(LHS: D, RHS: NotM);
4883 return BinaryOperator::CreateXor(V1: NewA, V2: X);
4884 }
4885
4886 Constant *C;
4887 if (D->hasOneUse() && match(V: M, P: m_Constant(C))) {
4888 // Propagating undef is unsafe. Clamp undef elements to -1.
4889 Type *EltTy = C->getType()->getScalarType();
4890 C = Constant::replaceUndefsWith(C, Replacement: ConstantInt::getAllOnesValue(Ty: EltTy));
4891 // Unfold.
4892 Value *LHS = Builder.CreateAnd(LHS: X, RHS: C);
4893 Value *NotC = Builder.CreateNot(V: C);
4894 Value *RHS = Builder.CreateAnd(LHS: B, RHS: NotC);
4895 return BinaryOperator::CreateOr(V1: LHS, V2: RHS);
4896 }
4897
4898 return nullptr;
4899}
4900
4901static Instruction *foldNotXor(BinaryOperator &I,
4902 InstCombiner::BuilderTy &Builder) {
4903 Value *X, *Y;
4904 // FIXME: one-use check is not needed in general, but currently we are unable
4905 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4906 if (!match(V: &I, P: m_Not(V: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))))
4907 return nullptr;
4908
4909 auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
4910 return A == C || A == D || B == C || B == D;
4911 };
4912
4913 Value *A, *B, *C, *D;
4914 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4915 // 4 commuted variants
4916 if (match(V: X, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4917 match(V: Y, P: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))) && hasCommonOperand(A, B, C, D)) {
4918 Value *NotY = Builder.CreateNot(V: Y);
4919 return BinaryOperator::CreateOr(V1: X, V2: NotY);
4920 };
4921
4922 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4923 // 4 commuted variants
4924 if (match(V: Y, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4925 match(V: X, P: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))) && hasCommonOperand(A, B, C, D)) {
4926 Value *NotX = Builder.CreateNot(V: X);
4927 return BinaryOperator::CreateOr(V1: Y, V2: NotX);
4928 };
4929
4930 return nullptr;
4931}
4932
4933/// Canonicalize a shifty way to code absolute value to the more common pattern
4934/// that uses negation and select.
4935static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4936 InstCombiner::BuilderTy &Builder) {
4937 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction.");
4938
4939 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4940 // We're relying on the fact that we only do this transform when the shift has
4941 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4942 // instructions).
4943 Value *Op0 = Xor.getOperand(i_nocapture: 0), *Op1 = Xor.getOperand(i_nocapture: 1);
4944 if (Op0->hasNUses(N: 2))
4945 std::swap(a&: Op0, b&: Op1);
4946
4947 Type *Ty = Xor.getType();
4948 Value *A;
4949 const APInt *ShAmt;
4950 if (match(V: Op1, P: m_AShr(L: m_Value(V&: A), R: m_APInt(Res&: ShAmt))) &&
4951 Op1->hasNUses(N: 2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4952 match(V: Op0, P: m_OneUse(SubPattern: m_c_Add(L: m_Specific(V: A), R: m_Specific(V: Op1))))) {
4953 // Op1 = ashr i32 A, 31 ; smear the sign bit
4954 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative
4955 // --> (A < 0) ? -A : A
4956 Value *IsNeg = Builder.CreateIsNeg(Arg: A);
4957 // Copy the nsw flags from the add to the negate.
4958 auto *Add = cast<BinaryOperator>(Val: Op0);
4959 Value *NegA = Add->hasNoUnsignedWrap()
4960 ? Constant::getNullValue(Ty: A->getType())
4961 : Builder.CreateNeg(V: A, Name: "", HasNSW: Add->hasNoSignedWrap());
4962 return SelectInst::Create(C: IsNeg, S1: NegA, S2: A);
4963 }
4964 return nullptr;
4965}
4966
4967static bool canFreelyInvert(InstCombiner &IC, Value *Op,
4968 Instruction *IgnoredUser) {
4969 auto *I = dyn_cast<Instruction>(Val: Op);
4970 return I && IC.isFreeToInvert(V: I, /*WillInvertAllUses=*/true) &&
4971 IC.canFreelyInvertAllUsersOf(V: I, IgnoredUser);
4972}
4973
4974static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
4975 Instruction *IgnoredUser) {
4976 auto *I = cast<Instruction>(Val: Op);
4977 IC.Builder.SetInsertPoint(*I->getInsertionPointAfterDef());
4978 Value *NotOp = IC.Builder.CreateNot(V: Op, Name: Op->getName() + ".not");
4979 Op->replaceUsesWithIf(New: NotOp,
4980 ShouldReplace: [NotOp](Use &U) { return U.getUser() != NotOp; });
4981 IC.freelyInvertAllUsersOf(V: NotOp, IgnoredUser);
4982 return NotOp;
4983}
4984
4985// Transform
4986// z = ~(x &/| y)
4987// into:
4988// z = ((~x) |/& (~y))
4989// iff both x and y are free to invert and all uses of z can be freely updated.
4990bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {
4991 Value *Op0, *Op1;
4992 if (!match(V: &I, P: m_LogicalOp(L: m_Value(V&: Op0), R: m_Value(V&: Op1))))
4993 return false;
4994
4995 // If this logic op has not been simplified yet, just bail out and let that
4996 // happen first. Otherwise, the code below may wrongly invert.
4997 if (Op0 == Op1)
4998 return false;
4999
5000 // If one of the operands is a user of the other,
5001 // freelyInvert->freelyInvertAllUsersOf will change the operands of I, which
5002 // may cause miscompilation.
5003 if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0))))
5004 return false;
5005
5006 Instruction::BinaryOps NewOpc =
5007 match(V: &I, P: m_LogicalAnd()) ? Instruction::Or : Instruction::And;
5008 bool IsBinaryOp = isa<BinaryOperator>(Val: I);
5009
5010 // Can our users be adapted?
5011 if (!InstCombiner::canFreelyInvertAllUsersOf(V: &I, /*IgnoredUser=*/nullptr))
5012 return false;
5013
5014 // And can the operands be adapted?
5015 if (!canFreelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I) || !canFreelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I))
5016 return false;
5017
5018 Op0 = freelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I);
5019 Op1 = freelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I);
5020
5021 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
5022 Value *NewLogicOp;
5023 if (IsBinaryOp) {
5024 NewLogicOp = Builder.CreateBinOp(Opc: NewOpc, LHS: Op0, RHS: Op1, Name: I.getName() + ".not");
5025 } else {
5026 NewLogicOp =
5027 Builder.CreateLogicalOp(Opc: NewOpc, Cond1: Op0, Cond2: Op1, Name: I.getName() + ".not",
5028 MDFrom: ProfcheckDisableMetadataFixes ? nullptr : &I);
5029 if (SelectInst *SI = dyn_cast<SelectInst>(Val: NewLogicOp))
5030 SI->swapProfMetadata();
5031 }
5032
5033 replaceInstUsesWith(I, V: NewLogicOp);
5034 // We can not just create an outer `not`, it will most likely be immediately
5035 // folded back, reconstructing our initial pattern, and causing an
5036 // infinite combine loop, so immediately manually fold it away.
5037 freelyInvertAllUsersOf(V: NewLogicOp);
5038 return true;
5039}
5040
5041// Transform
5042// z = (~x) &/| y
5043// into:
5044// z = ~(x |/& (~y))
5045// iff y is free to invert and all uses of z can be freely updated.
5046bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
5047 Value *Op0, *Op1;
5048 if (!match(V: &I, P: m_LogicalOp(L: m_Value(V&: Op0), R: m_Value(V&: Op1))))
5049 return false;
5050 Instruction::BinaryOps NewOpc =
5051 match(V: &I, P: m_LogicalAnd()) ? Instruction::Or : Instruction::And;
5052 bool IsBinaryOp = isa<BinaryOperator>(Val: I);
5053
5054 Value *NotOp0 = nullptr;
5055 Value *NotOp1 = nullptr;
5056 Value **OpToInvert = nullptr;
5057 if (match(V: Op0, P: m_Not(V: m_Value(V&: NotOp0))) && canFreelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I)) {
5058 Op0 = NotOp0;
5059 OpToInvert = &Op1;
5060 } else if (match(V: Op1, P: m_Not(V: m_Value(V&: NotOp1))) &&
5061 canFreelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I)) {
5062 Op1 = NotOp1;
5063 OpToInvert = &Op0;
5064 } else
5065 return false;
5066
5067 // And can our users be adapted?
5068 if (!InstCombiner::canFreelyInvertAllUsersOf(V: &I, /*IgnoredUser=*/nullptr))
5069 return false;
5070
5071 *OpToInvert = freelyInvert(IC&: *this, Op: *OpToInvert, IgnoredUser: &I);
5072
5073 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
5074 Value *NewBinOp;
5075 if (IsBinaryOp)
5076 NewBinOp = Builder.CreateBinOp(Opc: NewOpc, LHS: Op0, RHS: Op1, Name: I.getName() + ".not");
5077 else
5078 NewBinOp = Builder.CreateLogicalOp(Opc: NewOpc, Cond1: Op0, Cond2: Op1, Name: I.getName() + ".not");
5079 replaceInstUsesWith(I, V: NewBinOp);
5080 // We can not just create an outer `not`, it will most likely be immediately
5081 // folded back, reconstructing our initial pattern, and causing an
5082 // infinite combine loop, so immediately manually fold it away.
5083 freelyInvertAllUsersOf(V: NewBinOp);
5084 return true;
5085}
5086
5087Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
5088 Value *NotOp;
5089 if (!match(V: &I, P: m_Not(V: m_Value(V&: NotOp))))
5090 return nullptr;
5091
5092 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
5093 // We must eliminate the and/or (one-use) for these transforms to not increase
5094 // the instruction count.
5095 //
5096 // ~(~X & Y) --> (X | ~Y)
5097 // ~(Y & ~X) --> (X | ~Y)
5098 //
5099 // Note: The logical matches do not check for the commuted patterns because
5100 // those are handled via SimplifySelectsFeedingBinaryOp().
5101 Type *Ty = I.getType();
5102 Value *X, *Y;
5103 if (match(V: NotOp, P: m_OneUse(SubPattern: m_c_And(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
5104 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
5105 return BinaryOperator::CreateOr(V1: X, V2: NotY);
5106 }
5107 if (match(V: NotOp, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
5108 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
5109 SelectInst *SI = SelectInst::Create(
5110 C: X, S1: ConstantInt::getTrue(Ty), S2: NotY, NameStr: "", InsertBefore: nullptr,
5111 MDFrom: ProfcheckDisableMetadataFixes ? nullptr : cast<Instruction>(Val: NotOp));
5112 SI->swapProfMetadata();
5113 return SI;
5114 }
5115
5116 // ~(~X | Y) --> (X & ~Y)
5117 // ~(Y | ~X) --> (X & ~Y)
5118 if (match(V: NotOp, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
5119 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
5120 return BinaryOperator::CreateAnd(V1: X, V2: NotY);
5121 }
5122 if (match(V: NotOp, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
5123 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
5124 SelectInst *SI = SelectInst::Create(
5125 C: X, S1: NotY, S2: ConstantInt::getFalse(Ty), NameStr: "", InsertBefore: nullptr,
5126 MDFrom: ProfcheckDisableMetadataFixes ? nullptr : cast<Instruction>(Val: NotOp));
5127 SI->swapProfMetadata();
5128 return SI;
5129 }
5130
5131 // Is this a 'not' (~) fed by a binary operator?
5132 BinaryOperator *NotVal;
5133 if (match(V: NotOp, P: m_BinOp(I&: NotVal))) {
5134 // ~((-X) | Y) --> (X - 1) & (~Y)
5135 if (match(V: NotVal,
5136 P: m_OneUse(SubPattern: m_c_Or(L: m_OneUse(SubPattern: m_Neg(V: m_Value(V&: X))), R: m_Value(V&: Y))))) {
5137 Value *DecX = Builder.CreateAdd(LHS: X, RHS: ConstantInt::getAllOnesValue(Ty));
5138 Value *NotY = Builder.CreateNot(V: Y);
5139 return BinaryOperator::CreateAnd(V1: DecX, V2: NotY);
5140 }
5141
5142 // ~(~X >>s Y) --> (X >>s Y)
5143 if (match(V: NotVal, P: m_AShr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))
5144 return BinaryOperator::CreateAShr(V1: X, V2: Y);
5145
5146 // Treat lshr with non-negative operand as ashr.
5147 // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
5148 if (match(V: NotVal, P: m_LShr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))) &&
5149 isKnownNegative(V: X, SQ: SQ.getWithInstruction(I: NotVal)))
5150 return BinaryOperator::CreateAShr(V1: X, V2: Y);
5151
5152 // Bit-hack form of a signbit test for iN type:
5153 // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
5154 unsigned FullShift = Ty->getScalarSizeInBits() - 1;
5155 if (match(V: NotVal, P: m_OneUse(SubPattern: m_AShr(L: m_Value(V&: X), R: m_SpecificInt(V: FullShift))))) {
5156 Value *IsNotNeg = Builder.CreateIsNotNeg(Arg: X, Name: "isnotneg");
5157 return new SExtInst(IsNotNeg, Ty);
5158 }
5159
5160 // If we are inverting a right-shifted constant, we may be able to eliminate
5161 // the 'not' by inverting the constant and using the opposite shift type.
5162 // Canonicalization rules ensure that only a negative constant uses 'ashr',
5163 // but we must check that in case that transform has not fired yet.
5164
5165 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
5166 Constant *C;
5167 if (match(V: NotVal, P: m_AShr(L: m_Constant(C), R: m_Value(V&: Y))) &&
5168 match(V: C, P: m_Negative()))
5169 return BinaryOperator::CreateLShr(V1: ConstantExpr::getNot(C), V2: Y);
5170
5171 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
5172 if (match(V: NotVal, P: m_LShr(L: m_Constant(C), R: m_Value(V&: Y))) &&
5173 match(V: C, P: m_NonNegative()))
5174 return BinaryOperator::CreateAShr(V1: ConstantExpr::getNot(C), V2: Y);
5175
5176 // ~(X + C) --> ~C - X
5177 if (match(V: NotVal, P: m_Add(L: m_Value(V&: X), R: m_ImmConstant(C))))
5178 return BinaryOperator::CreateSub(V1: ConstantExpr::getNot(C), V2: X);
5179
5180 // ~(X - Y) --> ~X + Y
5181 // FIXME: is it really beneficial to sink the `not` here?
5182 if (match(V: NotVal, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y))))
5183 if (isa<Constant>(Val: X) || NotVal->hasOneUse())
5184 return BinaryOperator::CreateAdd(V1: Builder.CreateNot(V: X), V2: Y);
5185
5186 // ~(~X + Y) --> X - Y
5187 if (match(V: NotVal, P: m_c_Add(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))
5188 return BinaryOperator::CreateWithCopiedFlags(Opc: Instruction::Sub, V1: X, V2: Y,
5189 CopyO: NotVal);
5190 }
5191
5192 // not (cmp A, B) = !cmp A, B
5193 CmpPredicate Pred;
5194 if (match(V: NotOp, P: m_Cmp(Pred, L: m_Value(), R: m_Value())) &&
5195 (NotOp->hasOneUse() ||
5196 InstCombiner::canFreelyInvertAllUsersOf(V: cast<Instruction>(Val: NotOp),
5197 /*IgnoredUser=*/nullptr))) {
5198 cast<CmpInst>(Val: NotOp)->setPredicate(CmpInst::getInversePredicate(pred: Pred));
5199 freelyInvertAllUsersOf(V: NotOp);
5200 return &I;
5201 }
5202
5203 // not (bitcast (cmp A, B) --> bitcast (!cmp A, B)
5204 if (match(V: NotOp, P: m_OneUse(SubPattern: m_BitCast(Op: m_Value(V&: X)))) &&
5205 match(V: X, P: m_OneUse(SubPattern: m_Cmp(Pred, L: m_Value(), R: m_Value())))) {
5206 cast<CmpInst>(Val: X)->setPredicate(CmpInst::getInversePredicate(pred: Pred));
5207 return new BitCastInst(X, Ty);
5208 }
5209
5210 // Move a 'not' ahead of casts of a bool to enable logic reduction:
5211 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
5212 if (match(V: NotOp, P: m_OneUse(SubPattern: m_BitCast(Op: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: X)))))) &&
5213 X->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
5214 Type *SextTy = cast<BitCastOperator>(Val: NotOp)->getSrcTy();
5215 Value *NotX = Builder.CreateNot(V: X);
5216 Value *Sext = Builder.CreateSExt(V: NotX, DestTy: SextTy);
5217 return new BitCastInst(Sext, Ty);
5218 }
5219
5220 if (auto *NotOpI = dyn_cast<Instruction>(Val: NotOp))
5221 if (sinkNotIntoLogicalOp(I&: *NotOpI))
5222 return &I;
5223
5224 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
5225 // ~min(~X, ~Y) --> max(X, Y)
5226 // ~max(~X, Y) --> min(X, ~Y)
5227 auto *II = dyn_cast<IntrinsicInst>(Val: NotOp);
5228 if (II && II->hasOneUse()) {
5229 if (match(V: NotOp, P: m_c_MaxOrMin(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y)))) {
5230 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMaxID: II->getIntrinsicID());
5231 Value *NotY = Builder.CreateNot(V: Y);
5232 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(ID: InvID, LHS: X, RHS: NotY);
5233 return replaceInstUsesWith(I, V: InvMaxMin);
5234 }
5235
5236 if (II->getIntrinsicID() == Intrinsic::is_fpclass) {
5237 ConstantInt *ClassMask = cast<ConstantInt>(Val: II->getArgOperand(i: 1));
5238 II->setArgOperand(
5239 i: 1, v: ConstantInt::get(Ty: ClassMask->getType(),
5240 V: ~ClassMask->getZExtValue() & fcAllFlags));
5241 return replaceInstUsesWith(I, V: II);
5242 }
5243 }
5244
5245 if (NotOp->hasOneUse()) {
5246 // Pull 'not' into operands of select if both operands are one-use compares
5247 // or one is one-use compare and the other one is a constant.
5248 // Inverting the predicates eliminates the 'not' operation.
5249 // Example:
5250 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
5251 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
5252 // not (select ?, (cmp TPred, ?, ?), true -->
5253 // select ?, (cmp InvTPred, ?, ?), false
5254 if (auto *Sel = dyn_cast<SelectInst>(Val: NotOp)) {
5255 Value *TV = Sel->getTrueValue();
5256 Value *FV = Sel->getFalseValue();
5257 auto *CmpT = dyn_cast<CmpInst>(Val: TV);
5258 auto *CmpF = dyn_cast<CmpInst>(Val: FV);
5259 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(Val: TV);
5260 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(Val: FV);
5261 if (InvertibleT && InvertibleF) {
5262 if (CmpT)
5263 CmpT->setPredicate(CmpT->getInversePredicate());
5264 else
5265 Sel->setTrueValue(ConstantExpr::getNot(C: cast<Constant>(Val: TV)));
5266 if (CmpF)
5267 CmpF->setPredicate(CmpF->getInversePredicate());
5268 else
5269 Sel->setFalseValue(ConstantExpr::getNot(C: cast<Constant>(Val: FV)));
5270 return replaceInstUsesWith(I, V: Sel);
5271 }
5272 }
5273 }
5274
5275 if (Instruction *NewXor = foldNotXor(I, Builder))
5276 return NewXor;
5277
5278 // TODO: Could handle multi-use better by checking if all uses of NotOp (other
5279 // than I) can be inverted.
5280 if (Value *R = getFreelyInverted(V: NotOp, WillInvertAllUses: NotOp->hasOneUse(), Builder: &Builder))
5281 return replaceInstUsesWith(I, V: R);
5282
5283 return nullptr;
5284}
5285
5286// ((X + C) & M) ^ M --> (~C − X) & M
5287static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
5288 InstCombiner::BuilderTy &Builder) {
5289 Value *X, *Mask;
5290 Constant *AddC;
5291 BinaryOperator *AddInst;
5292 if (match(V: &I,
5293 P: m_Xor(L: m_OneUse(SubPattern: m_And(L: m_OneUse(SubPattern: m_CombineAnd(
5294 L: m_BinOp(I&: AddInst),
5295 R: m_Add(L: m_Value(V&: X), R: m_ImmConstant(C&: AddC)))),
5296 R: m_Value(V&: Mask))),
5297 R: m_Deferred(V: Mask)))) {
5298 Value *NotC = Builder.CreateNot(V: AddC);
5299 Value *NewSub = Builder.CreateSub(LHS: NotC, RHS: X, Name: "", HasNUW: AddInst->hasNoUnsignedWrap(),
5300 HasNSW: AddInst->hasNoSignedWrap());
5301 return BinaryOperator::CreateAnd(V1: NewSub, V2: Mask);
5302 }
5303
5304 return nullptr;
5305}
5306
5307// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
5308// here. We should standardize that construct where it is needed or choose some
5309// other way to ensure that commutated variants of patterns are not missed.
5310Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
5311 if (Value *V = simplifyXorInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
5312 Q: SQ.getWithInstruction(I: &I)))
5313 return replaceInstUsesWith(I, V);
5314
5315 if (SimplifyAssociativeOrCommutative(I))
5316 return &I;
5317
5318 if (Instruction *X = foldVectorBinop(Inst&: I))
5319 return X;
5320
5321 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
5322 return Phi;
5323
5324 if (Instruction *NewXor = foldXorToXor(I, Builder))
5325 return NewXor;
5326
5327 // (A&B)^(A&C) -> A&(B^C) etc
5328 if (Value *V = foldUsingDistributiveLaws(I))
5329 return replaceInstUsesWith(I, V);
5330
5331 // See if we can simplify any instructions used by the instruction whose sole
5332 // purpose is to compute bits we don't care about.
5333 if (SimplifyDemandedInstructionBits(Inst&: I))
5334 return &I;
5335
5336 if (Instruction *R = foldNot(I))
5337 return R;
5338
5339 if (Instruction *R = foldBinOpShiftWithShift(I))
5340 return R;
5341
5342 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
5343 Value *X, *Y, *M;
5344
5345 // (X | Y) ^ M -> (X ^ M) ^ Y
5346 // (X | Y) ^ M -> (Y ^ M) ^ X
5347 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_DisjointOr(L: m_Value(V&: X), R: m_Value(V&: Y))),
5348 R: m_Value(V&: M)))) {
5349 if (Value *XorAC = simplifyXorInst(LHS: X, RHS: M, Q: SQ.getWithInstruction(I: &I)))
5350 return BinaryOperator::CreateXor(V1: XorAC, V2: Y);
5351
5352 if (Value *XorBC = simplifyXorInst(LHS: Y, RHS: M, Q: SQ.getWithInstruction(I: &I)))
5353 return BinaryOperator::CreateXor(V1: XorBC, V2: X);
5354 }
5355
5356 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
5357 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
5358 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
5359 // have already taken care of those cases.
5360 if (match(V: &I, P: m_c_Xor(L: m_c_And(L: m_Not(V: m_Value(V&: M)), R: m_Value()),
5361 R: m_c_And(L: m_Deferred(V: M), R: m_Value())))) {
5362 if (isGuaranteedNotToBeUndef(V: M))
5363 return BinaryOperator::CreateDisjointOr(V1: Op0, V2: Op1);
5364 else
5365 return BinaryOperator::CreateOr(V1: Op0, V2: Op1);
5366 }
5367
5368 if (Instruction *Xor = visitMaskedMerge(I, Builder))
5369 return Xor;
5370
5371 Constant *C1;
5372 if (match(V: Op1, P: m_Constant(C&: C1))) {
5373 Constant *C2;
5374
5375 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_ImmConstant(C&: C2)))) &&
5376 match(V: C1, P: m_ImmConstant())) {
5377 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
5378 C2 = Constant::replaceUndefsWith(
5379 C: C2, Replacement: Constant::getAllOnesValue(Ty: C2->getType()->getScalarType()));
5380 Value *And = Builder.CreateAnd(
5381 LHS: X, RHS: Constant::mergeUndefsWith(C: ConstantExpr::getNot(C: C2), Other: C1));
5382 return BinaryOperator::CreateXor(
5383 V1: And, V2: Constant::mergeUndefsWith(C: ConstantExpr::getXor(C1, C2), Other: C1));
5384 }
5385
5386 // Use DeMorgan and reassociation to eliminate a 'not' op.
5387 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Not(V: m_Value(V&: X)), R: m_Constant(C&: C2))))) {
5388 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
5389 Value *And = Builder.CreateAnd(LHS: X, RHS: ConstantExpr::getNot(C: C2));
5390 return BinaryOperator::CreateXor(V1: And, V2: ConstantExpr::getNot(C: C1));
5391 }
5392 if (match(V: Op0, P: m_OneUse(SubPattern: m_And(L: m_Not(V: m_Value(V&: X)), R: m_Constant(C&: C2))))) {
5393 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
5394 Value *Or = Builder.CreateOr(LHS: X, RHS: ConstantExpr::getNot(C: C2));
5395 return BinaryOperator::CreateXor(V1: Or, V2: ConstantExpr::getNot(C: C1));
5396 }
5397
5398 // Convert xor ([trunc] (ashr X, BW-1)), C =>
5399 // select(X >s -1, C, ~C)
5400 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
5401 // constant depending on whether this input is less than 0.
5402 const APInt *CA;
5403 if (match(V: Op0, P: m_OneUse(SubPattern: m_TruncOrSelf(
5404 Op: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: CA))))) &&
5405 *CA == X->getType()->getScalarSizeInBits() - 1 &&
5406 !match(V: C1, P: m_AllOnes())) {
5407 assert(!C1->isNullValue() && "Unexpected xor with 0");
5408 Value *IsNotNeg = Builder.CreateIsNotNeg(Arg: X);
5409 return createSelectInstWithUnknownProfile(C: IsNotNeg, S1: Op1,
5410 S2: Builder.CreateNot(V: Op1));
5411 }
5412 }
5413
5414 Type *Ty = I.getType();
5415 {
5416 const APInt *RHSC;
5417 if (match(V: Op1, P: m_APInt(Res&: RHSC))) {
5418 Value *X;
5419 const APInt *C;
5420 // (C - X) ^ signmaskC --> (C + signmaskC) - X
5421 if (RHSC->isSignMask() && match(V: Op0, P: m_Sub(L: m_APInt(Res&: C), R: m_Value(V&: X))))
5422 return BinaryOperator::CreateSub(V1: ConstantInt::get(Ty, V: *C + *RHSC), V2: X);
5423
5424 // (X + C) ^ signmaskC --> X + (C + signmaskC)
5425 if (RHSC->isSignMask() && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C))))
5426 return BinaryOperator::CreateAdd(V1: X, V2: ConstantInt::get(Ty, V: *C + *RHSC));
5427
5428 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
5429 if (match(V: Op0, P: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: C))) &&
5430 MaskedValueIsZero(V: X, Mask: *C, CxtI: &I))
5431 return BinaryOperator::CreateXor(V1: X, V2: ConstantInt::get(Ty, V: *C ^ *RHSC));
5432
5433 // When X is a power-of-two or zero and zero input is poison:
5434 // ctlz(i32 X) ^ 31 --> cttz(X)
5435 // cttz(i32 X) ^ 31 --> ctlz(X)
5436 auto *II = dyn_cast<IntrinsicInst>(Val: Op0);
5437 if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
5438 Intrinsic::ID IID = II->getIntrinsicID();
5439 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
5440 match(V: II->getArgOperand(i: 1), P: m_One()) &&
5441 isKnownToBeAPowerOfTwo(V: II->getArgOperand(i: 0), /*OrZero */ true)) {
5442 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
5443 Function *F =
5444 Intrinsic::getOrInsertDeclaration(M: II->getModule(), id: IID, Tys: Ty);
5445 return CallInst::Create(Func: F, Args: {II->getArgOperand(i: 0), Builder.getTrue()});
5446 }
5447 }
5448
5449 // If RHSC is inverting the remaining bits of shifted X,
5450 // canonicalize to a 'not' before the shift to help SCEV and codegen:
5451 // (X << C) ^ RHSC --> ~X << C
5452 if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: C)))) &&
5453 *RHSC == APInt::getAllOnes(numBits: Ty->getScalarSizeInBits()).shl(ShiftAmt: *C)) {
5454 Value *NotX = Builder.CreateNot(V: X);
5455 return BinaryOperator::CreateShl(V1: NotX, V2: ConstantInt::get(Ty, V: *C));
5456 }
5457 // (X >>u C) ^ RHSC --> ~X >>u C
5458 if (match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C)))) &&
5459 *RHSC == APInt::getAllOnes(numBits: Ty->getScalarSizeInBits()).lshr(ShiftAmt: *C)) {
5460 Value *NotX = Builder.CreateNot(V: X);
5461 return BinaryOperator::CreateLShr(V1: NotX, V2: ConstantInt::get(Ty, V: *C));
5462 }
5463 // TODO: We could handle 'ashr' here as well. That would be matching
5464 // a 'not' op and moving it before the shift. Doing that requires
5465 // preventing the inverse fold in canShiftBinOpWithConstantRHS().
5466 }
5467
5468 // If we are XORing the sign bit of a floating-point value, convert
5469 // this to fneg, then cast back to integer.
5470 //
5471 // This is generous interpretation of noimplicitfloat, this is not a true
5472 // floating-point operation.
5473 //
5474 // Assumes any IEEE-represented type has the sign bit in the high bit.
5475 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
5476 Value *CastOp;
5477 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
5478 match(V: Op1, P: m_SignMask()) &&
5479 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
5480 Kind: Attribute::NoImplicitFloat)) {
5481 Type *EltTy = CastOp->getType()->getScalarType();
5482 if (EltTy->isFloatingPointTy() &&
5483 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
5484 Value *FNeg = Builder.CreateFNeg(V: CastOp);
5485 return new BitCastInst(FNeg, I.getType());
5486 }
5487 }
5488 }
5489
5490 // FIXME: This should not be limited to scalar (pull into APInt match above).
5491 {
5492 Value *X;
5493 ConstantInt *C1, *C2, *C3;
5494 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
5495 if (match(V: Op1, P: m_ConstantInt(CI&: C3)) &&
5496 match(V: Op0, P: m_LShr(L: m_Xor(L: m_Value(V&: X), R: m_ConstantInt(CI&: C1)),
5497 R: m_ConstantInt(CI&: C2))) &&
5498 Op0->hasOneUse()) {
5499 // fold (C1 >> C2) ^ C3
5500 APInt FoldConst = C1->getValue().lshr(ShiftAmt: C2->getValue());
5501 FoldConst ^= C3->getValue();
5502 // Prepare the two operands.
5503 auto *Opnd0 = Builder.CreateLShr(LHS: X, RHS: C2);
5504 Opnd0->takeName(V: Op0);
5505 return BinaryOperator::CreateXor(V1: Opnd0, V2: ConstantInt::get(Ty, V: FoldConst));
5506 }
5507 }
5508
5509 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
5510 return FoldedLogic;
5511
5512 if (Instruction *FoldedLogic = foldBinOpSelectBinOp(Op&: I))
5513 return FoldedLogic;
5514
5515 // Y ^ (X | Y) --> X & ~Y
5516 // Y ^ (Y | X) --> X & ~Y
5517 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: Op0)))))
5518 return BinaryOperator::CreateAnd(V1: X, V2: Builder.CreateNot(V: Op0));
5519 // (X | Y) ^ Y --> X & ~Y
5520 // (Y | X) ^ Y --> X & ~Y
5521 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
5522 return BinaryOperator::CreateAnd(V1: X, V2: Builder.CreateNot(V: Op1));
5523
5524 // Y ^ (X & Y) --> ~X & Y
5525 // Y ^ (Y & X) --> ~X & Y
5526 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_And(L: m_Value(V&: X), R: m_Specific(V: Op0)))))
5527 return BinaryOperator::CreateAnd(V1: Op0, V2: Builder.CreateNot(V: X));
5528 // (X & Y) ^ Y --> ~X & Y
5529 // (Y & X) ^ Y --> ~X & Y
5530 // Canonical form is (X & C) ^ C; don't touch that.
5531 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
5532 // be fixed to prefer that (otherwise we get infinite looping).
5533 if (!match(V: Op1, P: m_Constant()) &&
5534 match(V: Op0, P: m_OneUse(SubPattern: m_c_And(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
5535 return BinaryOperator::CreateAnd(V1: Op1, V2: Builder.CreateNot(V: X));
5536
5537 Value *A, *B, *C;
5538 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
5539 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))),
5540 R: m_OneUse(SubPattern: m_c_Or(L: m_Deferred(V: A), R: m_Value(V&: C))))))
5541 return BinaryOperator::CreateXor(
5542 V1: Builder.CreateAnd(LHS: Builder.CreateNot(V: A), RHS: C), V2: B);
5543
5544 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
5545 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))),
5546 R: m_OneUse(SubPattern: m_c_Or(L: m_Deferred(V: B), R: m_Value(V&: C))))))
5547 return BinaryOperator::CreateXor(
5548 V1: Builder.CreateAnd(LHS: Builder.CreateNot(V: B), RHS: C), V2: A);
5549
5550 // (A & B) ^ (A ^ B) -> (A | B)
5551 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
5552 match(V: Op1, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
5553 return BinaryOperator::CreateOr(V1: A, V2: B);
5554 // (A ^ B) ^ (A & B) -> (A | B)
5555 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
5556 match(V: Op1, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
5557 return BinaryOperator::CreateOr(V1: A, V2: B);
5558
5559 // (A & ~B) ^ ~A -> ~(A & B)
5560 // (~B & A) ^ ~A -> ~(A & B)
5561 if (match(V: Op0, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) &&
5562 match(V: Op1, P: m_Not(V: m_Specific(V: A))))
5563 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
5564
5565 // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
5566 if (match(V: &I, P: m_c_Xor(L: m_c_And(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B)), R: m_Deferred(V: A))))
5567 return BinaryOperator::CreateOr(V1: A, V2: B);
5568
5569 // (~A | B) ^ A --> ~(A & B)
5570 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Specific(V: Op1)), R: m_Value(V&: B)))))
5571 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Op1, RHS: B));
5572
5573 // A ^ (~A | B) --> ~(A & B)
5574 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Specific(V: Op0)), R: m_Value(V&: B)))))
5575 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Op0, RHS: B));
5576
5577 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
5578 // TODO: Loosen one-use restriction if common operand is a constant.
5579 Value *D;
5580 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: A), R: m_Value(V&: B)))) &&
5581 match(V: Op1, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))))) {
5582 if (B == C || B == D)
5583 std::swap(a&: A, b&: B);
5584 if (A == C)
5585 std::swap(a&: C, b&: D);
5586 if (A == D) {
5587 Value *NotA = Builder.CreateNot(V: A);
5588 return BinaryOperator::CreateAnd(V1: Builder.CreateXor(LHS: B, RHS: C), V2: NotA);
5589 }
5590 }
5591
5592 // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
5593 if (I.getType()->isIntOrIntVectorTy(BitWidth: 1) &&
5594 match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: A), R: m_Value(V&: B))),
5595 R: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: C), R: m_Value(V&: D)))))) {
5596 bool NeedFreeze = isa<SelectInst>(Val: Op0) && isa<SelectInst>(Val: Op1) && B == D;
5597 Instruction *MDFrom = cast<Instruction>(Val: Op0);
5598 if (B == C || B == D) {
5599 std::swap(a&: A, b&: B);
5600 MDFrom = B == C ? cast<Instruction>(Val: Op1) : nullptr;
5601 }
5602 if (A == C)
5603 std::swap(a&: C, b&: D);
5604 if (A == D) {
5605 if (NeedFreeze)
5606 A = Builder.CreateFreeze(V: A);
5607 Value *NotB = Builder.CreateNot(V: B);
5608 return MDFrom == nullptr || ProfcheckDisableMetadataFixes
5609 ? createSelectInstWithUnknownProfile(C: A, S1: NotB, S2: C)
5610 : SelectInst::Create(C: A, S1: NotB, S2: C, NameStr: "", InsertBefore: nullptr, MDFrom);
5611 }
5612 }
5613
5614 if (auto *LHS = dyn_cast<ICmpInst>(Val: I.getOperand(i_nocapture: 0)))
5615 if (auto *RHS = dyn_cast<ICmpInst>(Val: I.getOperand(i_nocapture: 1)))
5616 if (Value *V = foldXorOfICmps(LHS, RHS, I))
5617 return replaceInstUsesWith(I, V);
5618
5619 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
5620 return CastedXor;
5621
5622 if (Instruction *Abs = canonicalizeAbs(Xor&: I, Builder))
5623 return Abs;
5624
5625 // Otherwise, if all else failed, try to hoist the xor-by-constant:
5626 // (X ^ C) ^ Y --> (X ^ Y) ^ C
5627 // Just like we do in other places, we completely avoid the fold
5628 // for constantexprs, at least to avoid endless combine loop.
5629 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X, Match: m_Unless(M: m_ConstantExpr())),
5630 R: m_ImmConstant(C&: C1))),
5631 R: m_Value(V&: Y))))
5632 return BinaryOperator::CreateXor(V1: Builder.CreateXor(LHS: X, RHS: Y), V2: C1);
5633
5634 if (Instruction *R = reassociateForUses(BO&: I, Builder))
5635 return R;
5636
5637 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
5638 return Canonicalized;
5639
5640 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
5641 return Folded;
5642
5643 if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I))
5644 return Folded;
5645
5646 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
5647 return Res;
5648
5649 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
5650 return Res;
5651
5652 if (Instruction *Res = foldMaskedAddXorPattern(I, Builder))
5653 return Res;
5654
5655 return nullptr;
5656}
5657