1//===- InstCombineAndOrXor.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitAnd, visitOr, and visitXor functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/Analysis/CmpInstAnalysis.h"
15#include "llvm/Analysis/FloatingPointPredicateUtils.h"
16#include "llvm/Analysis/InstructionSimplify.h"
17#include "llvm/IR/ConstantRange.h"
18#include "llvm/IR/Intrinsics.h"
19#include "llvm/IR/PatternMatch.h"
20#include "llvm/Transforms/InstCombine/InstCombiner.h"
21#include "llvm/Transforms/Utils/Local.h"
22
23using namespace llvm;
24using namespace PatternMatch;
25
26#define DEBUG_TYPE "instcombine"
27
28/// This is the complement of getICmpCode, which turns an opcode and two
29/// operands into either a constant true or false, or a brand new ICmp
30/// instruction. The sign is passed in to determine which kind of predicate to
31/// use in the new icmp instruction.
32static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
33 InstCombiner::BuilderTy &Builder) {
34 ICmpInst::Predicate NewPred;
35 if (Constant *TorF = getPredForICmpCode(Code, Sign, OpTy: LHS->getType(), Pred&: NewPred))
36 return TorF;
37 return Builder.CreateICmp(P: NewPred, LHS, RHS);
38}
39
40/// This is the complement of getFCmpCode, which turns an opcode and two
41/// operands into either a FCmp instruction, or a true/false constant.
42static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
43 InstCombiner::BuilderTy &Builder, FMFSource FMF) {
44 FCmpInst::Predicate NewPred;
45 if (Constant *TorF = getPredForFCmpCode(Code, OpTy: LHS->getType(), Pred&: NewPred))
46 return TorF;
47 return Builder.CreateFCmpFMF(P: NewPred, LHS, RHS, FMFSource: FMF);
48}
49
50/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
51/// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
52/// whether to treat V, Lo, and Hi as signed or not.
53Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
54 const APInt &Hi, bool isSigned,
55 bool Inside) {
56 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
57 "Lo is not < Hi in range emission code!");
58
59 Type *Ty = V->getType();
60
61 // V >= Min && V < Hi --> V < Hi
62 // V < Min || V >= Hi --> V >= Hi
63 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
64 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
65 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
66 return Builder.CreateICmp(P: Pred, LHS: V, RHS: ConstantInt::get(Ty, V: Hi));
67 }
68
69 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
70 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
71 Value *VMinusLo =
72 Builder.CreateSub(LHS: V, RHS: ConstantInt::get(Ty, V: Lo), Name: V->getName() + ".off");
73 Constant *HiMinusLo = ConstantInt::get(Ty, V: Hi - Lo);
74 return Builder.CreateICmp(P: Pred, LHS: VMinusLo, RHS: HiMinusLo);
75}
76
77/// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
78/// that can be simplified.
79/// One of A and B is considered the mask. The other is the value. This is
80/// described as the "AMask" or "BMask" part of the enum. If the enum contains
81/// only "Mask", then both A and B can be considered masks. If A is the mask,
82/// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
83/// If both A and C are constants, this proof is also easy.
84/// For the following explanations, we assume that A is the mask.
85///
86/// "AllOnes" declares that the comparison is true only if (A & B) == A or all
87/// bits of A are set in B.
88/// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
89///
90/// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
91/// bits of A are cleared in B.
92/// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
93///
94/// "Mixed" declares that (A & B) == C and C might or might not contain any
95/// number of one bits and zero bits.
96/// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
97///
98/// "Not" means that in above descriptions "==" should be replaced by "!=".
99/// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
100///
101/// If the mask A contains a single bit, then the following is equivalent:
102/// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
103/// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
104enum MaskedICmpType {
105 AMask_AllOnes = 1,
106 AMask_NotAllOnes = 2,
107 BMask_AllOnes = 4,
108 BMask_NotAllOnes = 8,
109 Mask_AllZeros = 16,
110 Mask_NotAllZeros = 32,
111 AMask_Mixed = 64,
112 AMask_NotMixed = 128,
113 BMask_Mixed = 256,
114 BMask_NotMixed = 512
115};
116
117/// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
118/// satisfies.
119static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
120 ICmpInst::Predicate Pred) {
121 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr;
122 match(V: A, P: m_APInt(Res&: ConstA));
123 match(V: B, P: m_APInt(Res&: ConstB));
124 match(V: C, P: m_APInt(Res&: ConstC));
125 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
126 bool IsAPow2 = ConstA && ConstA->isPowerOf2();
127 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
128 unsigned MaskVal = 0;
129 if (ConstC && ConstC->isZero()) {
130 // if C is zero, then both A and B qualify as mask
131 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
132 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
133 if (IsAPow2)
134 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
135 : (AMask_AllOnes | AMask_Mixed));
136 if (IsBPow2)
137 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
138 : (BMask_AllOnes | BMask_Mixed));
139 return MaskVal;
140 }
141
142 if (A == C) {
143 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
144 : (AMask_NotAllOnes | AMask_NotMixed));
145 if (IsAPow2)
146 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
147 : (Mask_AllZeros | AMask_Mixed));
148 } else if (ConstA && ConstC && ConstC->isSubsetOf(RHS: *ConstA)) {
149 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
150 }
151
152 if (B == C) {
153 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
154 : (BMask_NotAllOnes | BMask_NotMixed));
155 if (IsBPow2)
156 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
157 : (Mask_AllZeros | BMask_Mixed));
158 } else if (ConstB && ConstC && ConstC->isSubsetOf(RHS: *ConstB)) {
159 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
160 }
161
162 return MaskVal;
163}
164
165/// Convert an analysis of a masked ICmp into its equivalent if all boolean
166/// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
167/// is adjacent to the corresponding normal flag (recording ==), this just
168/// involves swapping those bits over.
169static unsigned conjugateICmpMask(unsigned Mask) {
170 unsigned NewMask;
171 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
172 AMask_Mixed | BMask_Mixed))
173 << 1;
174
175 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
176 AMask_NotMixed | BMask_NotMixed))
177 >> 1;
178
179 return NewMask;
180}
181
182// Adapts the external decomposeBitTestICmp for local use.
183static bool decomposeBitTestICmp(Value *Cond, CmpInst::Predicate &Pred,
184 Value *&X, Value *&Y, Value *&Z) {
185 auto Res = llvm::decomposeBitTest(Cond, /*LookThroughTrunc=*/true,
186 /*AllowNonZeroC=*/true);
187 if (!Res)
188 return false;
189
190 Pred = Res->Pred;
191 X = Res->X;
192 Y = ConstantInt::get(Ty: X->getType(), V: Res->Mask);
193 Z = ConstantInt::get(Ty: X->getType(), V: Res->C);
194 return true;
195}
196
197/// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
198/// Return the pattern classes (from MaskedICmpType) for the left hand side and
199/// the right hand side as a pair.
200/// LHS and RHS are the left hand side and the right hand side ICmps and PredL
201/// and PredR are their predicates, respectively.
202static std::optional<std::pair<unsigned, unsigned>>
203getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E,
204 Value *LHS, Value *RHS, ICmpInst::Predicate &PredL,
205 ICmpInst::Predicate &PredR) {
206
207 // Here comes the tricky part:
208 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
209 // and L11 & L12 == L21 & L22. The same goes for RHS.
210 // Now we must find those components L** and R**, that are equal, so
211 // that we can extract the parameters A, B, C, D, and E for the canonical
212 // above.
213
214 // Check whether the icmp can be decomposed into a bit test.
215 Value *L1, *L11, *L12, *L2, *L21, *L22;
216 if (decomposeBitTestICmp(Cond: LHS, Pred&: PredL, X&: L11, Y&: L12, Z&: L2)) {
217 L21 = L22 = L1 = nullptr;
218 } else {
219 auto *LHSCMP = dyn_cast<ICmpInst>(Val: LHS);
220 if (!LHSCMP)
221 return std::nullopt;
222
223 // Don't allow pointers. Splat vectors are fine.
224 if (!LHSCMP->getOperand(i_nocapture: 0)->getType()->isIntOrIntVectorTy())
225 return std::nullopt;
226
227 PredL = LHSCMP->getPredicate();
228 L1 = LHSCMP->getOperand(i_nocapture: 0);
229 L2 = LHSCMP->getOperand(i_nocapture: 1);
230 // Look for ANDs in the LHS icmp.
231 if (!match(V: L1, P: m_And(L: m_Value(V&: L11), R: m_Value(V&: L12)))) {
232 // Any icmp can be viewed as being trivially masked; if it allows us to
233 // remove one, it's worth it.
234 L11 = L1;
235 L12 = Constant::getAllOnesValue(Ty: L1->getType());
236 }
237
238 if (!match(V: L2, P: m_And(L: m_Value(V&: L21), R: m_Value(V&: L22)))) {
239 L21 = L2;
240 L22 = Constant::getAllOnesValue(Ty: L2->getType());
241 }
242 }
243
244 // Bail if LHS was a icmp that can't be decomposed into an equality.
245 if (!ICmpInst::isEquality(P: PredL))
246 return std::nullopt;
247
248 Value *R11, *R12, *R2;
249 if (decomposeBitTestICmp(Cond: RHS, Pred&: PredR, X&: R11, Y&: R12, Z&: R2)) {
250 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
251 A = R11;
252 D = R12;
253 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
254 A = R12;
255 D = R11;
256 } else {
257 return std::nullopt;
258 }
259 E = R2;
260 } else {
261 auto *RHSCMP = dyn_cast<ICmpInst>(Val: RHS);
262 if (!RHSCMP)
263 return std::nullopt;
264 // Don't allow pointers. Splat vectors are fine.
265 if (!RHSCMP->getOperand(i_nocapture: 0)->getType()->isIntOrIntVectorTy())
266 return std::nullopt;
267
268 PredR = RHSCMP->getPredicate();
269
270 Value *R1 = RHSCMP->getOperand(i_nocapture: 0);
271 R2 = RHSCMP->getOperand(i_nocapture: 1);
272 bool Ok = false;
273 if (!match(V: R1, P: m_And(L: m_Value(V&: R11), R: m_Value(V&: R12)))) {
274 // As before, model no mask as a trivial mask if it'll let us do an
275 // optimization.
276 R11 = R1;
277 R12 = Constant::getAllOnesValue(Ty: R1->getType());
278 }
279
280 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
281 A = R11;
282 D = R12;
283 E = R2;
284 Ok = true;
285 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
286 A = R12;
287 D = R11;
288 E = R2;
289 Ok = true;
290 }
291
292 // Avoid matching against the -1 value we created for unmasked operand.
293 if (Ok && match(V: A, P: m_AllOnes()))
294 Ok = false;
295
296 // Look for ANDs on the right side of the RHS icmp.
297 if (!Ok) {
298 if (!match(V: R2, P: m_And(L: m_Value(V&: R11), R: m_Value(V&: R12)))) {
299 R11 = R2;
300 R12 = Constant::getAllOnesValue(Ty: R2->getType());
301 }
302
303 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
304 A = R11;
305 D = R12;
306 E = R1;
307 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
308 A = R12;
309 D = R11;
310 E = R1;
311 } else {
312 return std::nullopt;
313 }
314 }
315 }
316
317 // Bail if RHS was a icmp that can't be decomposed into an equality.
318 if (!ICmpInst::isEquality(P: PredR))
319 return std::nullopt;
320
321 if (L11 == A) {
322 B = L12;
323 C = L2;
324 } else if (L12 == A) {
325 B = L11;
326 C = L2;
327 } else if (L21 == A) {
328 B = L22;
329 C = L1;
330 } else if (L22 == A) {
331 B = L21;
332 C = L1;
333 }
334
335 unsigned LeftType = getMaskedICmpType(A, B, C, Pred: PredL);
336 unsigned RightType = getMaskedICmpType(A, B: D, C: E, Pred: PredR);
337 return std::optional<std::pair<unsigned, unsigned>>(
338 std::make_pair(x&: LeftType, y&: RightType));
339}
340
341/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
342/// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
343/// and the right hand side is of type BMask_Mixed. For example,
344/// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
345/// Also used for logical and/or, must be poison safe.
346static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
347 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *D, Value *E,
348 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
349 InstCombiner::BuilderTy &Builder) {
350 // We are given the canonical form:
351 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
352 // where D & E == E.
353 //
354 // If IsAnd is false, we get it in negated form:
355 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
356 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
357 //
358 // We currently handle the case of B, C, D, E are constant.
359 //
360 const APInt *BCst, *DCst, *OrigECst;
361 if (!match(V: B, P: m_APInt(Res&: BCst)) || !match(V: D, P: m_APInt(Res&: DCst)) ||
362 !match(V: E, P: m_APInt(Res&: OrigECst)))
363 return nullptr;
364
365 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
366
367 // Update E to the canonical form when D is a power of two and RHS is
368 // canonicalized as,
369 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
370 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
371 APInt ECst = *OrigECst;
372 if (PredR != NewCC)
373 ECst ^= *DCst;
374
375 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
376 // other folding rules and this pattern won't apply any more.
377 if (*BCst == 0 || *DCst == 0)
378 return nullptr;
379
380 // If B and D don't intersect, ie. (B & D) == 0, try to fold isNaN idiom:
381 // (icmp ne (A & FractionBits), 0) & (icmp eq (A & ExpBits), ExpBits)
382 // -> isNaN(A)
383 // Otherwise, we cannot deduce anything from it.
384 if (!BCst->intersects(RHS: *DCst)) {
385 Value *Src;
386 if (*DCst == ECst && match(V: A, P: m_ElementWiseBitCast(Op: m_Value(V&: Src))) &&
387 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
388 Kind: Attribute::StrictFP)) {
389 Type *Ty = Src->getType()->getScalarType();
390 if (!Ty->isIEEELikeFPTy())
391 return nullptr;
392
393 APInt ExpBits = APFloat::getInf(Sem: Ty->getFltSemantics()).bitcastToAPInt();
394 if (ECst != ExpBits)
395 return nullptr;
396 APInt FractionBits = ~ExpBits;
397 FractionBits.clearSignBit();
398 if (*BCst != FractionBits)
399 return nullptr;
400
401 return Builder.CreateFCmp(P: IsAnd ? FCmpInst::FCMP_UNO : FCmpInst::FCMP_ORD,
402 LHS: Src, RHS: ConstantFP::getZero(Ty: Src->getType()));
403 }
404 return nullptr;
405 }
406
407 // If the following two conditions are met:
408 //
409 // 1. mask B covers only a single bit that's not covered by mask D, that is,
410 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
411 // B and D has only one bit set) and,
412 //
413 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
414 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
415 //
416 // then that single bit in B must be one and thus the whole expression can be
417 // folded to
418 // (A & (B | D)) == (B & (B ^ D)) | E.
419 //
420 // For example,
421 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
422 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
423 if ((((*BCst & *DCst) & ECst) == 0) &&
424 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
425 APInt BorD = *BCst | *DCst;
426 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
427 Value *NewMask = ConstantInt::get(Ty: A->getType(), V: BorD);
428 Value *NewMaskedValue = ConstantInt::get(Ty: A->getType(), V: BandBxorDorE);
429 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewMask);
430 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: NewMaskedValue);
431 }
432
433 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) {
434 return (*C1 & *C2) == *C1;
435 };
436 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) {
437 return (*C1 & *C2) == *C2;
438 };
439
440 // In the following, we consider only the cases where B is a superset of D, B
441 // is a subset of D, or B == D because otherwise there's at least one bit
442 // covered by B but not D, in which case we can't deduce much from it, so
443 // no folding (aside from the single must-be-one bit case right above.)
444 // For example,
445 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
446 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
447 return nullptr;
448
449 // At this point, either B is a superset of D, B is a subset of D or B == D.
450
451 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
452 // and the whole expression becomes false (or true if negated), otherwise, no
453 // folding.
454 // For example,
455 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
456 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
457 if (ECst.isZero()) {
458 if (IsSubSetOrEqual(BCst, DCst))
459 return ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
460 return nullptr;
461 }
462
463 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
464 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
465 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
466 // RHS. For example,
467 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
468 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
469 if (IsSuperSetOrEqual(BCst, DCst)) {
470 // We can't guarantee that samesign hold after this fold.
471 if (auto *ICmp = dyn_cast<ICmpInst>(Val: RHS))
472 ICmp->setSameSign(false);
473 return RHS;
474 }
475 // Otherwise, B is a subset of D. If B and E have a common bit set,
476 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
477 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
478 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
479 if ((*BCst & ECst) != 0) {
480 // We can't guarantee that samesign hold after this fold.
481 if (auto *ICmp = dyn_cast<ICmpInst>(Val: RHS))
482 ICmp->setSameSign(false);
483 return RHS;
484 }
485 // Otherwise, LHS and RHS contradict and the whole expression becomes false
486 // (or true if negated.) For example,
487 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
488 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
489 return ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
490}
491
492/// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
493/// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
494/// aren't of the common mask pattern type.
495/// Also used for logical and/or, must be poison safe.
496static Value *foldLogOpOfMaskedICmpsAsymmetric(
497 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D,
498 Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
499 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {
500 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
501 "Expected equality predicates for masked type of icmps.");
502 // Handle Mask_NotAllZeros-BMask_Mixed cases.
503 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
504 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
505 // which gets swapped to
506 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
507 if (!IsAnd) {
508 LHSMask = conjugateICmpMask(Mask: LHSMask);
509 RHSMask = conjugateICmpMask(Mask: RHSMask);
510 }
511 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
512 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
513 LHS, RHS, IsAnd, A, B, D, E, PredL, PredR, Builder)) {
514 return V;
515 }
516 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
517 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
518 LHS: RHS, RHS: LHS, IsAnd, A, B: D, D: B, E: C, PredL: PredR, PredR: PredL, Builder)) {
519 return V;
520 }
521 }
522 return nullptr;
523}
524
525/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
526/// into a single (icmp(A & X) ==/!= Y).
527static Value *foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd,
528 bool IsLogical,
529 InstCombiner::BuilderTy &Builder,
530 const SimplifyQuery &Q) {
531 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
532 ICmpInst::Predicate PredL, PredR;
533 std::optional<std::pair<unsigned, unsigned>> MaskPair =
534 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
535 if (!MaskPair)
536 return nullptr;
537 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
538 "Expected equality predicates for masked type of icmps.");
539 unsigned LHSMask = MaskPair->first;
540 unsigned RHSMask = MaskPair->second;
541 unsigned Mask = LHSMask & RHSMask;
542 if (Mask == 0) {
543 // Even if the two sides don't share a common pattern, check if folding can
544 // still happen.
545 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
546 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
547 Builder))
548 return V;
549 return nullptr;
550 }
551
552 // In full generality:
553 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
554 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
555 //
556 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
557 // equivalent to (icmp (A & X) !Op Y).
558 //
559 // Therefore, we can pretend for the rest of this function that we're dealing
560 // with the conjunction, provided we flip the sense of any comparisons (both
561 // input and output).
562
563 // In most cases we're going to produce an EQ for the "&&" case.
564 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
565 if (!IsAnd) {
566 // Convert the masking analysis into its equivalent with negated
567 // comparisons.
568 Mask = conjugateICmpMask(Mask);
569 }
570
571 if (Mask & Mask_AllZeros) {
572 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
573 // -> (icmp eq (A & (B|D)), 0)
574 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
575 return nullptr; // TODO: Use freeze?
576 Value *NewOr = Builder.CreateOr(LHS: B, RHS: D);
577 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewOr);
578 // We can't use C as zero because we might actually handle
579 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
580 // with B and D, having a single bit set.
581 Value *Zero = Constant::getNullValue(Ty: A->getType());
582 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: Zero);
583 }
584 if (Mask & BMask_AllOnes) {
585 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
586 // -> (icmp eq (A & (B|D)), (B|D))
587 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
588 return nullptr; // TODO: Use freeze?
589 Value *NewOr = Builder.CreateOr(LHS: B, RHS: D);
590 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: NewOr);
591 return Builder.CreateICmp(P: NewCC, LHS: NewAnd, RHS: NewOr);
592 }
593 if (Mask & AMask_AllOnes) {
594 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
595 // -> (icmp eq (A & (B&D)), A)
596 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(V: D))
597 return nullptr; // TODO: Use freeze?
598 Value *NewAnd1 = Builder.CreateAnd(LHS: B, RHS: D);
599 Value *NewAnd2 = Builder.CreateAnd(LHS: A, RHS: NewAnd1);
600 return Builder.CreateICmp(P: NewCC, LHS: NewAnd2, RHS: A);
601 }
602
603 const APInt *ConstB, *ConstD;
604 if (match(V: B, P: m_APInt(Res&: ConstB)) && match(V: D, P: m_APInt(Res&: ConstD))) {
605 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
606 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
607 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
608 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
609 // Only valid if one of the masks is a superset of the other (check "B&D"
610 // is the same as either B or D).
611 APInt NewMask = *ConstB & *ConstD;
612 if (NewMask == *ConstB)
613 return LHS;
614 if (NewMask == *ConstD) {
615 if (IsLogical) {
616 if (auto *RHSI = dyn_cast<Instruction>(Val: RHS))
617 RHSI->dropPoisonGeneratingFlags();
618 }
619 return RHS;
620 }
621 }
622
623 if (Mask & AMask_NotAllOnes) {
624 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
625 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
626 // Only valid if one of the masks is a superset of the other (check "B|D"
627 // is the same as either B or D).
628 APInt NewMask = *ConstB | *ConstD;
629 if (NewMask == *ConstB)
630 return LHS;
631 if (NewMask == *ConstD)
632 return RHS;
633 }
634
635 if (Mask & (BMask_Mixed | BMask_NotMixed)) {
636 // Mixed:
637 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
638 // We already know that B & C == C && D & E == E.
639 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
640 // C and E, which are shared by both the mask B and the mask D, don't
641 // contradict, then we can transform to
642 // -> (icmp eq (A & (B|D)), (C|E))
643 // Currently, we only handle the case of B, C, D, and E being constant.
644 // We can't simply use C and E because we might actually handle
645 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
646 // with B and D, having a single bit set.
647
648 // NotMixed:
649 // (icmp ne (A & B), C) & (icmp ne (A & D), E)
650 // -> (icmp ne (A & (B & D)), (C & E))
651 // Check the intersection (B & D) for inequality.
652 // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
653 // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both
654 // the B and the D, don't contradict. Note that we can assume (~B & C) ==
655 // 0 && (~D & E) == 0, previous operation should delete these icmps if it
656 // hadn't been met.
657
658 const APInt *OldConstC, *OldConstE;
659 if (!match(V: C, P: m_APInt(Res&: OldConstC)) || !match(V: E, P: m_APInt(Res&: OldConstE)))
660 return nullptr;
661
662 auto FoldBMixed = [&](ICmpInst::Predicate CC, bool IsNot) -> Value * {
663 CC = IsNot ? CmpInst::getInversePredicate(pred: CC) : CC;
664 const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
665 const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
666
667 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
668 return IsNot ? nullptr : ConstantInt::get(Ty: LHS->getType(), V: !IsAnd);
669
670 if (IsNot && !ConstB->isSubsetOf(RHS: *ConstD) &&
671 !ConstD->isSubsetOf(RHS: *ConstB))
672 return nullptr;
673
674 APInt BD, CE;
675 if (IsNot) {
676 BD = *ConstB & *ConstD;
677 CE = ConstC & ConstE;
678 } else {
679 BD = *ConstB | *ConstD;
680 CE = ConstC | ConstE;
681 }
682 Value *NewAnd = Builder.CreateAnd(LHS: A, RHS: BD);
683 Value *CEVal = ConstantInt::get(Ty: A->getType(), V: CE);
684 return Builder.CreateICmp(P: CC, LHS: NewAnd, RHS: CEVal);
685 };
686
687 if (Mask & BMask_Mixed)
688 return FoldBMixed(NewCC, false);
689 if (Mask & BMask_NotMixed) // can be else also
690 return FoldBMixed(NewCC, true);
691 }
692 }
693
694 // (icmp eq (A & B), 0) | (icmp eq (A & D), 0)
695 // -> (icmp ne (A & (B|D)), (B|D))
696 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0)
697 // -> (icmp eq (A & (B|D)), (B|D))
698 // iff B and D is known to be a power of two
699 if (Mask & Mask_NotAllZeros &&
700 isKnownToBeAPowerOfTwo(V: B, /*OrZero=*/false, Q) &&
701 isKnownToBeAPowerOfTwo(V: D, /*OrZero=*/false, Q)) {
702 // If this is a logical and/or, then we must prevent propagation of a
703 // poison value from the RHS by inserting freeze.
704 if (IsLogical)
705 D = Builder.CreateFreeze(V: D);
706 Value *Mask = Builder.CreateOr(LHS: B, RHS: D);
707 Value *Masked = Builder.CreateAnd(LHS: A, RHS: Mask);
708 return Builder.CreateICmp(P: NewCC, LHS: Masked, RHS: Mask);
709 }
710 return nullptr;
711}
712
713/// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
714/// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
715/// If \p Inverted is true then the check is for the inverted range, e.g.
716/// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
717Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
718 bool Inverted) {
719 // Check the lower range comparison, e.g. x >= 0
720 // InstCombine already ensured that if there is a constant it's on the RHS.
721 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Val: Cmp0->getOperand(i_nocapture: 1));
722 if (!RangeStart)
723 return nullptr;
724
725 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
726 Cmp0->getPredicate());
727
728 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
729 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
730 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
731 return nullptr;
732
733 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
734 Cmp1->getPredicate());
735
736 Value *Input = Cmp0->getOperand(i_nocapture: 0);
737 Value *Cmp1Op0 = Cmp1->getOperand(i_nocapture: 0);
738 Value *Cmp1Op1 = Cmp1->getOperand(i_nocapture: 1);
739 Value *RangeEnd;
740 if (match(V: Cmp1Op0, P: m_SExtOrSelf(Op: m_Specific(V: Input)))) {
741 // For the upper range compare we have: icmp x, n
742 Input = Cmp1Op0;
743 RangeEnd = Cmp1Op1;
744 } else if (match(V: Cmp1Op1, P: m_SExtOrSelf(Op: m_Specific(V: Input)))) {
745 // For the upper range compare we have: icmp n, x
746 Input = Cmp1Op1;
747 RangeEnd = Cmp1Op0;
748 Pred1 = ICmpInst::getSwappedPredicate(pred: Pred1);
749 } else {
750 return nullptr;
751 }
752
753 // Check the upper range comparison, e.g. x < n
754 ICmpInst::Predicate NewPred;
755 switch (Pred1) {
756 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
757 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
758 default: return nullptr;
759 }
760
761 // This simplification is only valid if the upper range is not negative.
762 KnownBits Known = computeKnownBits(V: RangeEnd, CxtI: Cmp1);
763 if (!Known.isNonNegative())
764 return nullptr;
765
766 if (Inverted)
767 NewPred = ICmpInst::getInversePredicate(pred: NewPred);
768
769 return Builder.CreateICmp(P: NewPred, LHS: Input, RHS: RangeEnd);
770}
771
772// (or (icmp eq X, 0), (icmp eq X, Pow2OrZero))
773// -> (icmp eq (and X, Pow2OrZero), X)
774// (and (icmp ne X, 0), (icmp ne X, Pow2OrZero))
775// -> (icmp ne (and X, Pow2OrZero), X)
776static Value *
777foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
778 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
779 const SimplifyQuery &Q) {
780 CmpPredicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
781 // Make sure we have right compares for our op.
782 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
783 return nullptr;
784
785 // Make it so we can match LHS against the (icmp eq/ne X, 0) just for
786 // simplicity.
787 if (match(V: RHS->getOperand(i_nocapture: 1), P: m_Zero()))
788 std::swap(a&: LHS, b&: RHS);
789
790 Value *Pow2, *Op;
791 // Match the desired pattern:
792 // LHS: (icmp eq/ne X, 0)
793 // RHS: (icmp eq/ne X, Pow2OrZero)
794 // Skip if Pow2OrZero is 1. Either way it gets folded to (icmp ugt X, 1) but
795 // this form ends up slightly less canonical.
796 // We could potentially be more sophisticated than requiring LHS/RHS
797 // be one-use. We don't create additional instructions if only one
798 // of them is one-use. So cases where one is one-use and the other
799 // is two-use might be profitable.
800 if (!match(V: LHS, P: m_OneUse(SubPattern: m_ICmp(Pred, L: m_Value(V&: Op), R: m_Zero()))) ||
801 !match(V: RHS, P: m_OneUse(SubPattern: m_c_ICmp(Pred, L: m_Specific(V: Op), R: m_Value(V&: Pow2)))) ||
802 match(V: Pow2, P: m_One()) ||
803 !isKnownToBeAPowerOfTwo(V: Pow2, DL: Q.DL, /*OrZero=*/true, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT))
804 return nullptr;
805
806 Value *And = Builder.CreateAnd(LHS: Op, RHS: Pow2);
807 return Builder.CreateICmp(P: Pred, LHS: And, RHS: Op);
808}
809
810/// General pattern:
811/// X & Y
812///
813/// Where Y is checking that all the high bits (covered by a mask 4294967168)
814/// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
815/// Pattern can be one of:
816/// %t = add i32 %arg, 128
817/// %r = icmp ult i32 %t, 256
818/// Or
819/// %t0 = shl i32 %arg, 24
820/// %t1 = ashr i32 %t0, 24
821/// %r = icmp eq i32 %t1, %arg
822/// Or
823/// %t0 = trunc i32 %arg to i8
824/// %t1 = sext i8 %t0 to i32
825/// %r = icmp eq i32 %t1, %arg
826/// This pattern is a signed truncation check.
827///
828/// And X is checking that some bit in that same mask is zero.
829/// I.e. can be one of:
830/// %r = icmp sgt i32 %arg, -1
831/// Or
832/// %t = and i32 %arg, 2147483648
833/// %r = icmp eq i32 %t, 0
834///
835/// Since we are checking that all the bits in that mask are the same,
836/// and a particular bit is zero, what we are really checking is that all the
837/// masked bits are zero.
838/// So this should be transformed to:
839/// %r = icmp ult i32 %arg, 128
840static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
841 Instruction &CxtI,
842 InstCombiner::BuilderTy &Builder) {
843 assert(CxtI.getOpcode() == Instruction::And);
844
845 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
846 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
847 APInt &SignBitMask) -> bool {
848 const APInt *I01, *I1; // powers of two; I1 == I01 << 1
849 if (!(match(V: ICmp, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_ULT,
850 L: m_Add(L: m_Value(V&: X), R: m_Power2(V&: I01)),
851 R: m_Power2(V&: I1))) &&
852 I1->ugt(RHS: *I01) && I01->shl(shiftAmt: 1) == *I1))
853 return false;
854 // Which bit is the new sign bit as per the 'signed truncation' pattern?
855 SignBitMask = *I01;
856 return true;
857 };
858
859 // One icmp needs to be 'signed truncation check'.
860 // We need to match this first, else we will mismatch commutative cases.
861 Value *X1;
862 APInt HighestBit;
863 ICmpInst *OtherICmp;
864 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
865 OtherICmp = ICmp0;
866 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
867 OtherICmp = ICmp1;
868 else
869 return nullptr;
870
871 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
872
873 // Try to match/decompose into: icmp eq (X & Mask), 0
874 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
875 APInt &UnsetBitsMask) -> bool {
876 CmpPredicate Pred = ICmp->getPredicate();
877 // Can it be decomposed into icmp eq (X & Mask), 0 ?
878 auto Res = llvm::decomposeBitTestICmp(
879 LHS: ICmp->getOperand(i_nocapture: 0), RHS: ICmp->getOperand(i_nocapture: 1), Pred,
880 /*LookThroughTrunc=*/false, /*AllowNonZeroC=*/false,
881 /*DecomposeAnd=*/true);
882 if (Res && Res->Pred == ICmpInst::ICMP_EQ) {
883 X = Res->X;
884 UnsetBitsMask = Res->Mask;
885 return true;
886 }
887
888 return false;
889 };
890
891 // And the other icmp needs to be decomposable into a bit test.
892 Value *X0;
893 APInt UnsetBitsMask;
894 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
895 return nullptr;
896
897 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense.");
898
899 // Are they working on the same value?
900 Value *X;
901 if (X1 == X0) {
902 // Ok as is.
903 X = X1;
904 } else if (match(V: X0, P: m_Trunc(Op: m_Specific(V: X1)))) {
905 UnsetBitsMask = UnsetBitsMask.zext(width: X1->getType()->getScalarSizeInBits());
906 X = X1;
907 } else
908 return nullptr;
909
910 // So which bits should be uniform as per the 'signed truncation check'?
911 // (all the bits starting with (i.e. including) HighestBit)
912 APInt SignBitsMask = ~(HighestBit - 1U);
913
914 // UnsetBitsMask must have some common bits with SignBitsMask,
915 if (!UnsetBitsMask.intersects(RHS: SignBitsMask))
916 return nullptr;
917
918 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
919 if (!UnsetBitsMask.isSubsetOf(RHS: SignBitsMask)) {
920 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
921 if (!OtherHighestBit.isPowerOf2())
922 return nullptr;
923 HighestBit = APIntOps::umin(A: HighestBit, B: OtherHighestBit);
924 }
925 // Else, if it does not, then all is ok as-is.
926
927 // %r = icmp ult %X, SignBit
928 return Builder.CreateICmpULT(LHS: X, RHS: ConstantInt::get(Ty: X->getType(), V: HighestBit),
929 Name: CxtI.getName() + ".simplified");
930}
931
932/// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
933/// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
934/// Also used for logical and/or, must be poison safe if range attributes are
935/// dropped.
936static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
937 InstCombiner::BuilderTy &Builder,
938 InstCombinerImpl &IC) {
939 CmpPredicate Pred0, Pred1;
940 Value *X;
941 if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: X)),
942 R: m_SpecificInt(V: 1))) ||
943 !match(V: Cmp1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V: X), R: m_ZeroInt())))
944 return nullptr;
945
946 auto *CtPop = cast<Instruction>(Val: Cmp0->getOperand(i_nocapture: 0));
947 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE) {
948 // Drop range attributes and re-infer them in the next iteration.
949 CtPop->dropPoisonGeneratingAnnotations();
950 IC.addToWorklist(I: CtPop);
951 return Builder.CreateICmpUGT(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
952 }
953 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ) {
954 // Drop range attributes and re-infer them in the next iteration.
955 CtPop->dropPoisonGeneratingAnnotations();
956 IC.addToWorklist(I: CtPop);
957 return Builder.CreateICmpULT(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 2));
958 }
959
960 return nullptr;
961}
962
963/// Reduce a pair of compares that check if a value has exactly 1 bit set.
964/// Also used for logical and/or, must be poison safe if range attributes are
965/// dropped.
966static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
967 InstCombiner::BuilderTy &Builder,
968 InstCombinerImpl &IC) {
969 // Handle 'and' / 'or' commutation: make the equality check the first operand.
970 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
971 std::swap(a&: Cmp0, b&: Cmp1);
972 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
973 std::swap(a&: Cmp0, b&: Cmp1);
974
975 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
976 Value *X;
977 if (JoinedByAnd &&
978 match(V: Cmp0, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_NE, L: m_Value(V&: X), R: m_ZeroInt())) &&
979 match(V: Cmp1, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_ULT,
980 L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Specific(V: X)),
981 R: m_SpecificInt(V: 2)))) {
982 auto *CtPop = cast<Instruction>(Val: Cmp1->getOperand(i_nocapture: 0));
983 // Drop range attributes and re-infer them in the next iteration.
984 CtPop->dropPoisonGeneratingAnnotations();
985 IC.addToWorklist(I: CtPop);
986 return Builder.CreateICmpEQ(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
987 }
988 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
989 if (!JoinedByAnd &&
990 match(V: Cmp0, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_EQ, L: m_Value(V&: X), R: m_ZeroInt())) &&
991 match(V: Cmp1, P: m_SpecificICmp(MatchPred: ICmpInst::ICMP_UGT,
992 L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Specific(V: X)),
993 R: m_SpecificInt(V: 1)))) {
994 auto *CtPop = cast<Instruction>(Val: Cmp1->getOperand(i_nocapture: 0));
995 // Drop range attributes and re-infer them in the next iteration.
996 CtPop->dropPoisonGeneratingAnnotations();
997 IC.addToWorklist(I: CtPop);
998 return Builder.CreateICmpNE(LHS: CtPop, RHS: ConstantInt::get(Ty: CtPop->getType(), V: 1));
999 }
1000 return nullptr;
1001}
1002
1003/// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
1004/// B is a contiguous set of ones starting from the most significant bit
1005/// (negative power of 2), D and E are equal, and D is a contiguous set of ones
1006/// starting at the most significant zero bit in B. Parameter B supports masking
1007/// using undef/poison in either scalar or vector values.
1008static Value *foldNegativePower2AndShiftedMask(
1009 Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
1010 ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {
1011 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
1012 "Expected equality predicates for masked type of icmps.");
1013 if (PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE)
1014 return nullptr;
1015
1016 if (!match(V: B, P: m_NegatedPower2()) || !match(V: D, P: m_ShiftedMask()) ||
1017 !match(V: E, P: m_ShiftedMask()))
1018 return nullptr;
1019
1020 // Test scalar arguments for conversion. B has been validated earlier to be a
1021 // negative power of two and thus is guaranteed to have one or more contiguous
1022 // ones starting from the MSB followed by zero or more contiguous zeros. D has
1023 // been validated earlier to be a shifted set of one or more contiguous ones.
1024 // In order to match, B leading ones and D leading zeros should be equal. The
1025 // predicate that B be a negative power of 2 prevents the condition of there
1026 // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
1027 // D always be a shifted mask prevents the condition of D equaling 0. This
1028 // prevents matching the condition where B contains the maximum number of
1029 // leading one bits (-1) and D contains the maximum number of leading zero
1030 // bits (0).
1031 auto isReducible = [](const Value *B, const Value *D, const Value *E) {
1032 const APInt *BCst, *DCst, *ECst;
1033 return match(V: B, P: m_APIntAllowPoison(Res&: BCst)) && match(V: D, P: m_APInt(Res&: DCst)) &&
1034 match(V: E, P: m_APInt(Res&: ECst)) && *DCst == *ECst &&
1035 (isa<PoisonValue>(Val: B) ||
1036 (BCst->countLeadingOnes() == DCst->countLeadingZeros()));
1037 };
1038
1039 // Test vector type arguments for conversion.
1040 if (const auto *BVTy = dyn_cast<VectorType>(Val: B->getType())) {
1041 const auto *BFVTy = dyn_cast<FixedVectorType>(Val: BVTy);
1042 const auto *BConst = dyn_cast<Constant>(Val: B);
1043 const auto *DConst = dyn_cast<Constant>(Val: D);
1044 const auto *EConst = dyn_cast<Constant>(Val: E);
1045
1046 if (!BFVTy || !BConst || !DConst || !EConst)
1047 return nullptr;
1048
1049 for (unsigned I = 0; I != BFVTy->getNumElements(); ++I) {
1050 const auto *BElt = BConst->getAggregateElement(Elt: I);
1051 const auto *DElt = DConst->getAggregateElement(Elt: I);
1052 const auto *EElt = EConst->getAggregateElement(Elt: I);
1053
1054 if (!BElt || !DElt || !EElt)
1055 return nullptr;
1056 if (!isReducible(BElt, DElt, EElt))
1057 return nullptr;
1058 }
1059 } else {
1060 // Test scalar type arguments for conversion.
1061 if (!isReducible(B, D, E))
1062 return nullptr;
1063 }
1064 return Builder.CreateICmp(P: ICmpInst::ICMP_ULT, LHS: A, RHS: D);
1065}
1066
1067/// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1068/// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1069/// M is a contiguous shifted mask starting at the right most significant zero
1070/// bit in P. SGT is supported as when P is the largest representable power of
1071/// 2, an earlier optimization converts the expression into (icmp X s> -1).
1072/// Parameter P supports masking using undef/poison in either scalar or vector
1073/// values.
1074static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
1075 bool JoinedByAnd,
1076 InstCombiner::BuilderTy &Builder) {
1077 if (!JoinedByAnd)
1078 return nullptr;
1079 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
1080 ICmpInst::Predicate CmpPred0, CmpPred1;
1081 // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1082 // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1083 // SignMask) == 0).
1084 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1085 getMaskedTypeForICmpPair(A, B, C, D, E, LHS: Cmp0, RHS: Cmp1, PredL&: CmpPred0, PredR&: CmpPred1);
1086 if (!MaskPair)
1087 return nullptr;
1088
1089 const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes;
1090 unsigned CmpMask0 = MaskPair->first;
1091 unsigned CmpMask1 = MaskPair->second;
1092 if ((CmpMask0 & Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1093 if (Value *V = foldNegativePower2AndShiftedMask(A, B, D, E, PredL: CmpPred0,
1094 PredR: CmpPred1, Builder))
1095 return V;
1096 } else if ((CmpMask0 == compareBMask) && (CmpMask1 & Mask_AllZeros)) {
1097 if (Value *V = foldNegativePower2AndShiftedMask(A, B: D, D: B, E: C, PredL: CmpPred1,
1098 PredR: CmpPred0, Builder))
1099 return V;
1100 }
1101 return nullptr;
1102}
1103
1104/// Commuted variants are assumed to be handled by calling this function again
1105/// with the parameters swapped.
1106static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1107 ICmpInst *UnsignedICmp, bool IsAnd,
1108 const SimplifyQuery &Q,
1109 InstCombiner::BuilderTy &Builder) {
1110 Value *ZeroCmpOp;
1111 CmpPredicate EqPred;
1112 if (!match(V: ZeroICmp, P: m_ICmp(Pred&: EqPred, L: m_Value(V&: ZeroCmpOp), R: m_Zero())) ||
1113 !ICmpInst::isEquality(P: EqPred))
1114 return nullptr;
1115
1116 CmpPredicate UnsignedPred;
1117
1118 Value *A, *B;
1119 if (match(V: UnsignedICmp,
1120 P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: ZeroCmpOp), R: m_Value(V&: A))) &&
1121 match(V: ZeroCmpOp, P: m_c_Add(L: m_Specific(V: A), R: m_Value(V&: B))) &&
1122 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1123 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1124 if (!isKnownNonZero(V: NonZero, Q))
1125 std::swap(a&: NonZero, b&: Other);
1126 return isKnownNonZero(V: NonZero, Q);
1127 };
1128
1129 // Given ZeroCmpOp = (A + B)
1130 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
1131 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
1132 // with X being the value (A/B) that is known to be non-zero,
1133 // and Y being remaining value.
1134 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1135 IsAnd && GetKnownNonZeroAndOther(B, A))
1136 return Builder.CreateICmpULT(LHS: Builder.CreateNeg(V: B), RHS: A);
1137 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1138 !IsAnd && GetKnownNonZeroAndOther(B, A))
1139 return Builder.CreateICmpUGE(LHS: Builder.CreateNeg(V: B), RHS: A);
1140 }
1141
1142 return nullptr;
1143}
1144
1145struct IntPart {
1146 Value *From;
1147 unsigned StartBit;
1148 unsigned NumBits;
1149};
1150
1151/// Match an extraction of bits from an integer.
1152static std::optional<IntPart> matchIntPart(Value *V) {
1153 Value *X;
1154 if (!match(V, P: m_OneUse(SubPattern: m_Trunc(Op: m_Value(V&: X)))))
1155 return std::nullopt;
1156
1157 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits();
1158 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1159 Value *Y;
1160 const APInt *Shift;
1161 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1162 // from Y, not any shifted-in zeroes.
1163 if (match(V: X, P: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: Y), R: m_APInt(Res&: Shift)))) &&
1164 Shift->ule(RHS: NumOriginalBits - NumExtractedBits))
1165 return {{.From: Y, .StartBit: (unsigned)Shift->getZExtValue(), .NumBits: NumExtractedBits}};
1166 return {{.From: X, .StartBit: 0, .NumBits: NumExtractedBits}};
1167}
1168
1169/// Materialize an extraction of bits from an integer in IR.
1170static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {
1171 Value *V = P.From;
1172 if (P.StartBit)
1173 V = Builder.CreateLShr(LHS: V, RHS: P.StartBit);
1174 Type *TruncTy = V->getType()->getWithNewBitWidth(NewBitWidth: P.NumBits);
1175 if (TruncTy != V->getType())
1176 V = Builder.CreateTrunc(V, DestTy: TruncTy);
1177 return V;
1178}
1179
1180/// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1181/// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1182/// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
1183Value *InstCombinerImpl::foldEqOfParts(Value *Cmp0, Value *Cmp1, bool IsAnd) {
1184 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
1185 return nullptr;
1186
1187 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1188 auto GetMatchPart = [&](Value *CmpV,
1189 unsigned OpNo) -> std::optional<IntPart> {
1190 assert(CmpV->getType()->isIntOrIntVectorTy(1) && "Must be bool");
1191
1192 Value *X, *Y;
1193 // icmp ne (and x, 1), (and y, 1) <=> trunc (xor x, y) to i1
1194 // icmp eq (and x, 1), (and y, 1) <=> not (trunc (xor x, y) to i1)
1195 if (Pred == CmpInst::ICMP_NE
1196 ? match(V: CmpV, P: m_Trunc(Op: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))
1197 : match(V: CmpV, P: m_Not(V: m_Trunc(Op: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))))
1198 return {{.From: OpNo == 0 ? X : Y, .StartBit: 0, .NumBits: 1}};
1199
1200 auto *Cmp = dyn_cast<ICmpInst>(Val: CmpV);
1201 if (!Cmp)
1202 return std::nullopt;
1203
1204 if (Pred == Cmp->getPredicate())
1205 return matchIntPart(V: Cmp->getOperand(i_nocapture: OpNo));
1206
1207 const APInt *C;
1208 // (icmp eq (lshr x, C), (lshr y, C)) gets optimized to:
1209 // (icmp ult (xor x, y), 1 << C) so also look for that.
1210 if (Pred == CmpInst::ICMP_EQ && Cmp->getPredicate() == CmpInst::ICMP_ULT) {
1211 if (!match(V: Cmp->getOperand(i_nocapture: 1), P: m_Power2(V&: C)) ||
1212 !match(V: Cmp->getOperand(i_nocapture: 0), P: m_Xor(L: m_Value(), R: m_Value())))
1213 return std::nullopt;
1214 }
1215
1216 // (icmp ne (lshr x, C), (lshr y, C)) gets optimized to:
1217 // (icmp ugt (xor x, y), (1 << C) - 1) so also look for that.
1218 else if (Pred == CmpInst::ICMP_NE &&
1219 Cmp->getPredicate() == CmpInst::ICMP_UGT) {
1220 if (!match(V: Cmp->getOperand(i_nocapture: 1), P: m_LowBitMask(V&: C)) ||
1221 !match(V: Cmp->getOperand(i_nocapture: 0), P: m_Xor(L: m_Value(), R: m_Value())))
1222 return std::nullopt;
1223 } else {
1224 return std::nullopt;
1225 }
1226
1227 unsigned From = Pred == CmpInst::ICMP_NE ? C->popcount() : C->countr_zero();
1228 Instruction *I = cast<Instruction>(Val: Cmp->getOperand(i_nocapture: 0));
1229 return {{.From: I->getOperand(i: OpNo), .StartBit: From, .NumBits: C->getBitWidth() - From}};
1230 };
1231
1232 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1233 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1234 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1235 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1236 if (!L0 || !R0 || !L1 || !R1)
1237 return nullptr;
1238
1239 // Make sure the LHS/RHS compare a part of the same value, possibly after
1240 // an operand swap.
1241 if (L0->From != L1->From || R0->From != R1->From) {
1242 if (L0->From != R1->From || R0->From != L1->From)
1243 return nullptr;
1244 std::swap(lhs&: L1, rhs&: R1);
1245 }
1246
1247 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1248 // the low part and L1/R1 being the high part.
1249 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1250 R0->StartBit + R0->NumBits != R1->StartBit) {
1251 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1252 R1->StartBit + R1->NumBits != R0->StartBit)
1253 return nullptr;
1254 std::swap(lhs&: L0, rhs&: L1);
1255 std::swap(lhs&: R0, rhs&: R1);
1256 }
1257
1258 // We can simplify to a comparison of these larger parts of the integers.
1259 IntPart L = {.From: L0->From, .StartBit: L0->StartBit, .NumBits: L0->NumBits + L1->NumBits};
1260 IntPart R = {.From: R0->From, .StartBit: R0->StartBit, .NumBits: R0->NumBits + R1->NumBits};
1261 Value *LValue = extractIntPart(P: L, Builder);
1262 Value *RValue = extractIntPart(P: R, Builder);
1263 return Builder.CreateICmp(P: Pred, LHS: LValue, RHS: RValue);
1264}
1265
1266/// Reduce logic-of-compares with equality to a constant by substituting a
1267/// common operand with the constant. Callers are expected to call this with
1268/// Cmp0/Cmp1 switched to handle logic op commutativity.
1269static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
1270 bool IsAnd, bool IsLogical,
1271 InstCombiner::BuilderTy &Builder,
1272 const SimplifyQuery &Q) {
1273 // Match an equality compare with a non-poison constant as Cmp0.
1274 // Also, give up if the compare can be constant-folded to avoid looping.
1275 CmpPredicate Pred0;
1276 Value *X;
1277 Constant *C;
1278 if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Value(V&: X), R: m_Constant(C))) ||
1279 !isGuaranteedNotToBeUndefOrPoison(V: C) || isa<Constant>(Val: X))
1280 return nullptr;
1281 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) ||
1282 (!IsAnd && Pred0 != ICmpInst::ICMP_NE))
1283 return nullptr;
1284
1285 // The other compare must include a common operand (X). Canonicalize the
1286 // common operand as operand 1 (Pred1 is swapped if the common operand was
1287 // operand 0).
1288 Value *Y;
1289 CmpPredicate Pred1;
1290 if (!match(V: Cmp1, P: m_c_ICmp(Pred&: Pred1, L: m_Value(V&: Y), R: m_Specific(V: X))))
1291 return nullptr;
1292
1293 // Replace variable with constant value equivalence to remove a variable use:
1294 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1295 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1296 // Can think of the 'or' substitution with the 'and' bool equivalent:
1297 // A || B --> A || (!A && B)
1298 Value *SubstituteCmp = simplifyICmpInst(Pred: Pred1, LHS: Y, RHS: C, Q);
1299 if (!SubstituteCmp) {
1300 // If we need to create a new instruction, require that the old compare can
1301 // be removed.
1302 if (!Cmp1->hasOneUse())
1303 return nullptr;
1304 SubstituteCmp = Builder.CreateICmp(P: Pred1, LHS: Y, RHS: C);
1305 }
1306 if (IsLogical)
1307 return IsAnd ? Builder.CreateLogicalAnd(Cond1: Cmp0, Cond2: SubstituteCmp)
1308 : Builder.CreateLogicalOr(Cond1: Cmp0, Cond2: SubstituteCmp);
1309 return Builder.CreateBinOp(Opc: IsAnd ? Instruction::And : Instruction::Or, LHS: Cmp0,
1310 RHS: SubstituteCmp);
1311}
1312
1313/// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1314/// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1315/// into a single comparison using range-based reasoning.
1316/// NOTE: This is also used for logical and/or, must be poison-safe!
1317Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
1318 ICmpInst *ICmp2,
1319 bool IsAnd) {
1320 CmpPredicate Pred1, Pred2;
1321 Value *V1, *V2;
1322 const APInt *C1, *C2;
1323 if (!match(V: ICmp1, P: m_ICmp(Pred&: Pred1, L: m_Value(V&: V1), R: m_APInt(Res&: C1))) ||
1324 !match(V: ICmp2, P: m_ICmp(Pred&: Pred2, L: m_Value(V&: V2), R: m_APInt(Res&: C2))))
1325 return nullptr;
1326
1327 // Look through add of a constant offset on V1, V2, or both operands. This
1328 // allows us to interpret the V + C' < C'' range idiom into a proper range.
1329 const APInt *Offset1 = nullptr, *Offset2 = nullptr;
1330 if (V1 != V2) {
1331 Value *X;
1332 if (match(V: V1, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: Offset1))))
1333 V1 = X;
1334 if (match(V: V2, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: Offset2))))
1335 V2 = X;
1336 }
1337
1338 if (V1 != V2)
1339 return nullptr;
1340
1341 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(
1342 Pred: IsAnd ? ICmpInst::getInverseCmpPredicate(Pred: Pred1) : Pred1, Other: *C1);
1343 if (Offset1)
1344 CR1 = CR1.subtract(CI: *Offset1);
1345
1346 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(
1347 Pred: IsAnd ? ICmpInst::getInverseCmpPredicate(Pred: Pred2) : Pred2, Other: *C2);
1348 if (Offset2)
1349 CR2 = CR2.subtract(CI: *Offset2);
1350
1351 Type *Ty = V1->getType();
1352 Value *NewV = V1;
1353 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR: CR2);
1354 if (!CR) {
1355 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
1356 CR2.isWrappedSet())
1357 return nullptr;
1358
1359 // Check whether we have equal-size ranges that only differ by one bit.
1360 // In that case we can apply a mask to map one range onto the other.
1361 APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1362 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1363 APInt CR1Size = CR1.getUpper() - CR1.getLower();
1364 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
1365 CR1Size != CR2.getUpper() - CR2.getLower())
1366 return nullptr;
1367
1368 CR = CR1.getLower().ult(RHS: CR2.getLower()) ? CR1 : CR2;
1369 NewV = Builder.CreateAnd(LHS: NewV, RHS: ConstantInt::get(Ty, V: ~LowerDiff));
1370 }
1371
1372 if (IsAnd)
1373 CR = CR->inverse();
1374
1375 CmpInst::Predicate NewPred;
1376 APInt NewC, Offset;
1377 CR->getEquivalentICmp(Pred&: NewPred, RHS&: NewC, Offset);
1378
1379 if (Offset != 0)
1380 NewV = Builder.CreateAdd(LHS: NewV, RHS: ConstantInt::get(Ty, V: Offset));
1381 return Builder.CreateICmp(P: NewPred, LHS: NewV, RHS: ConstantInt::get(Ty, V: NewC));
1382}
1383
1384/// Ignore all operations which only change the sign of a value, returning the
1385/// underlying magnitude value.
1386static Value *stripSignOnlyFPOps(Value *Val) {
1387 match(V: Val, P: m_FNeg(X: m_Value(V&: Val)));
1388 match(V: Val, P: m_FAbs(Op0: m_Value(V&: Val)));
1389 match(V: Val, P: m_CopySign(Op0: m_Value(V&: Val), Op1: m_Value()));
1390 return Val;
1391}
1392
1393/// Matches canonical form of isnan, fcmp ord x, 0
1394static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {
1395 return P == FCmpInst::FCMP_ORD && match(V: RHS, P: m_AnyZeroFP());
1396}
1397
1398/// Matches fcmp u__ x, +/-inf
1399static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
1400 Value *RHS) {
1401 return FCmpInst::isUnordered(predicate: P) && match(V: RHS, P: m_Inf());
1402}
1403
1404/// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1405///
1406/// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
1407static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
1408 FCmpInst *RHS) {
1409 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
1410 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
1411 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1412
1413 if (!matchIsNotNaN(P: PredL, LHS: LHS0, RHS: LHS1) ||
1414 !matchUnorderedInfCompare(P: PredR, LHS: RHS0, RHS: RHS1))
1415 return nullptr;
1416
1417 return Builder.CreateFCmpFMF(P: FCmpInst::getOrderedPredicate(Pred: PredR), LHS: RHS0, RHS: RHS1,
1418 FMFSource: FMFSource::intersect(A: LHS, B: RHS));
1419}
1420
1421Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
1422 bool IsAnd, bool IsLogicalSelect) {
1423 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
1424 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
1425 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1426
1427 if (LHS0 == RHS1 && RHS0 == LHS1) {
1428 // Swap RHS operands to match LHS.
1429 PredR = FCmpInst::getSwappedPredicate(pred: PredR);
1430 std::swap(a&: RHS0, b&: RHS1);
1431 }
1432
1433 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1434 // Suppose the relation between x and y is R, where R is one of
1435 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1436 // testing the desired relations.
1437 //
1438 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1439 // bool(R & CC0) && bool(R & CC1)
1440 // = bool((R & CC0) & (R & CC1))
1441 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1442 //
1443 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1444 // bool(R & CC0) || bool(R & CC1)
1445 // = bool((R & CC0) | (R & CC1))
1446 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1447 if (LHS0 == RHS0 && LHS1 == RHS1) {
1448 unsigned FCmpCodeL = getFCmpCode(CC: PredL);
1449 unsigned FCmpCodeR = getFCmpCode(CC: PredR);
1450 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1451
1452 // Intersect the fast math flags.
1453 // TODO: We can union the fast math flags unless this is a logical select.
1454 return getFCmpValue(Code: NewPred, LHS: LHS0, RHS: LHS1, Builder,
1455 FMF: FMFSource::intersect(A: LHS, B: RHS));
1456 }
1457
1458 // This transform is not valid for a logical select.
1459 if (!IsLogicalSelect &&
1460 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1461 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO &&
1462 !IsAnd))) {
1463 if (LHS0->getType() != RHS0->getType())
1464 return nullptr;
1465
1466 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1467 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1468 if (match(V: LHS1, P: m_PosZeroFP()) && match(V: RHS1, P: m_PosZeroFP())) {
1469 // Ignore the constants because they are obviously not NANs:
1470 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1471 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1472 return Builder.CreateFCmpFMF(P: PredL, LHS: LHS0, RHS: RHS0,
1473 FMFSource: FMFSource::intersect(A: LHS, B: RHS));
1474 }
1475 }
1476
1477 // This transform is not valid for a logical select.
1478 if (!IsLogicalSelect && IsAnd &&
1479 stripSignOnlyFPOps(Val: LHS0) == stripSignOnlyFPOps(Val: RHS0)) {
1480 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1481 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1482 if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS))
1483 return Left;
1484 if (Value *Right = matchIsFiniteTest(Builder, LHS: RHS, RHS: LHS))
1485 return Right;
1486 }
1487
1488 // Turn at least two fcmps with constants into llvm.is.fpclass.
1489 //
1490 // If we can represent a combined value test with one class call, we can
1491 // potentially eliminate 4-6 instructions. If we can represent a test with a
1492 // single fcmp with fneg and fabs, that's likely a better canonical form.
1493 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1494 auto [ClassValRHS, ClassMaskRHS] =
1495 fcmpToClassTest(Pred: PredR, F: *RHS->getFunction(), LHS: RHS0, RHS: RHS1);
1496 if (ClassValRHS) {
1497 auto [ClassValLHS, ClassMaskLHS] =
1498 fcmpToClassTest(Pred: PredL, F: *LHS->getFunction(), LHS: LHS0, RHS: LHS1);
1499 if (ClassValLHS == ClassValRHS) {
1500 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1501 : (ClassMaskLHS | ClassMaskRHS);
1502 return Builder.CreateIntrinsic(
1503 ID: Intrinsic::is_fpclass, Types: {ClassValLHS->getType()},
1504 Args: {ClassValLHS, Builder.getInt32(C: CombinedMask)});
1505 }
1506 }
1507 }
1508
1509 // Canonicalize the range check idiom:
1510 // and (fcmp olt/ole/ult/ule x, C), (fcmp ogt/oge/ugt/uge x, -C)
1511 // --> fabs(x) olt/ole/ult/ule C
1512 // or (fcmp ogt/oge/ugt/uge x, C), (fcmp olt/ole/ult/ule x, -C)
1513 // --> fabs(x) ogt/oge/ugt/uge C
1514 // TODO: Generalize to handle a negated variable operand?
1515 const APFloat *LHSC, *RHSC;
1516 if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
1517 FCmpInst::getSwappedPredicate(pred: PredL) == PredR &&
1518 match(V: LHS1, P: m_APFloatAllowPoison(Res&: LHSC)) &&
1519 match(V: RHS1, P: m_APFloatAllowPoison(Res&: RHSC)) &&
1520 LHSC->bitwiseIsEqual(RHS: neg(X: *RHSC))) {
1521 auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
1522 switch (Pred) {
1523 case FCmpInst::FCMP_OLT:
1524 case FCmpInst::FCMP_OLE:
1525 case FCmpInst::FCMP_ULT:
1526 case FCmpInst::FCMP_ULE:
1527 return true;
1528 default:
1529 return false;
1530 }
1531 };
1532 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1533 std::swap(a&: LHSC, b&: RHSC);
1534 std::swap(a&: PredL, b&: PredR);
1535 }
1536 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1537 FastMathFlags NewFlag = LHS->getFastMathFlags();
1538 if (!IsLogicalSelect)
1539 NewFlag |= RHS->getFastMathFlags();
1540
1541 Value *FAbs =
1542 Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: LHS0, FMFSource: NewFlag);
1543 return Builder.CreateFCmpFMF(
1544 P: PredL, LHS: FAbs, RHS: ConstantFP::get(Ty: LHS0->getType(), V: *LHSC), FMFSource: NewFlag);
1545 }
1546 }
1547
1548 return nullptr;
1549}
1550
1551/// Match an fcmp against a special value that performs a test possible by
1552/// llvm.is.fpclass.
1553static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
1554 uint64_t &ClassMask) {
1555 auto *FCmp = dyn_cast<FCmpInst>(Val: Op);
1556 if (!FCmp || !FCmp->hasOneUse())
1557 return false;
1558
1559 std::tie(args&: ClassVal, args&: ClassMask) =
1560 fcmpToClassTest(Pred: FCmp->getPredicate(), F: *FCmp->getParent()->getParent(),
1561 LHS: FCmp->getOperand(i_nocapture: 0), RHS: FCmp->getOperand(i_nocapture: 1));
1562 return ClassVal != nullptr;
1563}
1564
1565/// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1566/// -> is_fpclass x, (mask0 | mask1)
1567/// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1568/// -> is_fpclass x, (mask0 & mask1)
1569/// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1570/// -> is_fpclass x, (mask0 ^ mask1)
1571Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
1572 Value *Op0, Value *Op1) {
1573 Value *ClassVal0 = nullptr;
1574 Value *ClassVal1 = nullptr;
1575 uint64_t ClassMask0, ClassMask1;
1576
1577 // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1578 // new class.
1579 //
1580 // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1581 // better.
1582
1583 bool IsLHSClass =
1584 match(V: Op0, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::is_fpclass>(
1585 Op0: m_Value(V&: ClassVal0), Op1: m_ConstantInt(V&: ClassMask0))));
1586 bool IsRHSClass =
1587 match(V: Op1, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::is_fpclass>(
1588 Op0: m_Value(V&: ClassVal1), Op1: m_ConstantInt(V&: ClassMask1))));
1589 if ((((IsLHSClass || matchIsFPClassLikeFCmp(Op: Op0, ClassVal&: ClassVal0, ClassMask&: ClassMask0)) &&
1590 (IsRHSClass || matchIsFPClassLikeFCmp(Op: Op1, ClassVal&: ClassVal1, ClassMask&: ClassMask1)))) &&
1591 ClassVal0 == ClassVal1) {
1592 unsigned NewClassMask;
1593 switch (BO.getOpcode()) {
1594 case Instruction::And:
1595 NewClassMask = ClassMask0 & ClassMask1;
1596 break;
1597 case Instruction::Or:
1598 NewClassMask = ClassMask0 | ClassMask1;
1599 break;
1600 case Instruction::Xor:
1601 NewClassMask = ClassMask0 ^ ClassMask1;
1602 break;
1603 default:
1604 llvm_unreachable("not a binary logic operator");
1605 }
1606
1607 if (IsLHSClass) {
1608 auto *II = cast<IntrinsicInst>(Val: Op0);
1609 II->setArgOperand(
1610 i: 1, v: ConstantInt::get(Ty: II->getArgOperand(i: 1)->getType(), V: NewClassMask));
1611 return replaceInstUsesWith(I&: BO, V: II);
1612 }
1613
1614 if (IsRHSClass) {
1615 auto *II = cast<IntrinsicInst>(Val: Op1);
1616 II->setArgOperand(
1617 i: 1, v: ConstantInt::get(Ty: II->getArgOperand(i: 1)->getType(), V: NewClassMask));
1618 return replaceInstUsesWith(I&: BO, V: II);
1619 }
1620
1621 CallInst *NewClass =
1622 Builder.CreateIntrinsic(ID: Intrinsic::is_fpclass, Types: {ClassVal0->getType()},
1623 Args: {ClassVal0, Builder.getInt32(C: NewClassMask)});
1624 return replaceInstUsesWith(I&: BO, V: NewClass);
1625 }
1626
1627 return nullptr;
1628}
1629
1630/// Look for the pattern that conditionally negates a value via math operations:
1631/// cond.splat = sext i1 cond
1632/// sub = add cond.splat, x
1633/// xor = xor sub, cond.splat
1634/// and rewrite it to do the same, but via logical operations:
1635/// value.neg = sub 0, value
1636/// cond = select i1 neg, value.neg, value
1637Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1638 BinaryOperator &I) {
1639 assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!");
1640 Value *Cond, *X;
1641 // As per complexity ordering, `xor` is not commutative here.
1642 if (!match(V: &I, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value())) ||
1643 !match(V: I.getOperand(i_nocapture: 1), P: m_SExt(Op: m_Value(V&: Cond))) ||
1644 !Cond->getType()->isIntOrIntVectorTy(BitWidth: 1) ||
1645 !match(V: I.getOperand(i_nocapture: 0), P: m_c_Add(L: m_SExt(Op: m_Specific(V: Cond)), R: m_Value(V&: X))))
1646 return nullptr;
1647 return SelectInst::Create(C: Cond, S1: Builder.CreateNeg(V: X, Name: X->getName() + ".neg"),
1648 S2: X);
1649}
1650
1651/// This a limited reassociation for a special case (see above) where we are
1652/// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1653/// This could be handled more generally in '-reassociation', but it seems like
1654/// an unlikely pattern for a large number of logic ops and fcmps.
1655static Instruction *reassociateFCmps(BinaryOperator &BO,
1656 InstCombiner::BuilderTy &Builder) {
1657 Instruction::BinaryOps Opcode = BO.getOpcode();
1658 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1659 "Expecting and/or op for fcmp transform");
1660
1661 // There are 4 commuted variants of the pattern. Canonicalize operands of this
1662 // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1663 Value *Op0 = BO.getOperand(i_nocapture: 0), *Op1 = BO.getOperand(i_nocapture: 1), *X;
1664 if (match(V: Op1, P: m_FCmp(L: m_Value(), R: m_AnyZeroFP())))
1665 std::swap(a&: Op0, b&: Op1);
1666
1667 // Match inner binop and the predicate for combining 2 NAN checks into 1.
1668 Value *BO10, *BO11;
1669 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1670 : FCmpInst::FCMP_UNO;
1671 if (!match(V: Op0, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: X), R: m_AnyZeroFP())) ||
1672 !match(V: Op1, P: m_BinOp(Opcode, L: m_Value(V&: BO10), R: m_Value(V&: BO11))))
1673 return nullptr;
1674
1675 // The inner logic op must have a matching fcmp operand.
1676 Value *Y;
1677 if (!match(V: BO10, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: Y), R: m_AnyZeroFP())) ||
1678 X->getType() != Y->getType())
1679 std::swap(a&: BO10, b&: BO11);
1680
1681 if (!match(V: BO10, P: m_SpecificFCmp(MatchPred: NanPred, L: m_Value(V&: Y), R: m_AnyZeroFP())) ||
1682 X->getType() != Y->getType())
1683 return nullptr;
1684
1685 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1686 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z
1687 // Intersect FMF from the 2 source fcmps.
1688 Value *NewFCmp =
1689 Builder.CreateFCmpFMF(P: NanPred, LHS: X, RHS: Y, FMFSource: FMFSource::intersect(A: Op0, B: BO10));
1690 return BinaryOperator::Create(Op: Opcode, S1: NewFCmp, S2: BO11);
1691}
1692
1693/// Match variations of De Morgan's Laws:
1694/// (~A & ~B) == (~(A | B))
1695/// (~A | ~B) == (~(A & B))
1696static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1697 InstCombiner &IC) {
1698 const Instruction::BinaryOps Opcode = I.getOpcode();
1699 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1700 "Trying to match De Morgan's Laws with something other than and/or");
1701
1702 // Flip the logic operation.
1703 const Instruction::BinaryOps FlippedOpcode =
1704 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1705
1706 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1707 Value *A, *B;
1708 if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: A)))) &&
1709 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: B)))) &&
1710 !IC.isFreeToInvert(V: A, WillInvertAllUses: A->hasOneUse()) &&
1711 !IC.isFreeToInvert(V: B, WillInvertAllUses: B->hasOneUse())) {
1712 Value *AndOr =
1713 IC.Builder.CreateBinOp(Opc: FlippedOpcode, LHS: A, RHS: B, Name: I.getName() + ".demorgan");
1714 return BinaryOperator::CreateNot(Op: AndOr);
1715 }
1716
1717 // The 'not' ops may require reassociation.
1718 // (A & ~B) & ~C --> A & ~(B | C)
1719 // (~B & A) & ~C --> A & ~(B | C)
1720 // (A | ~B) | ~C --> A | ~(B & C)
1721 // (~B | A) | ~C --> A | ~(B & C)
1722 Value *C;
1723 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(Opcode, L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))))) &&
1724 match(V: Op1, P: m_Not(V: m_Value(V&: C)))) {
1725 Value *FlippedBO = IC.Builder.CreateBinOp(Opc: FlippedOpcode, LHS: B, RHS: C);
1726 return BinaryOperator::Create(Op: Opcode, S1: A, S2: IC.Builder.CreateNot(V: FlippedBO));
1727 }
1728
1729 return nullptr;
1730}
1731
1732bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {
1733 Value *CastSrc = CI->getOperand(i_nocapture: 0);
1734
1735 // Noop casts and casts of constants should be eliminated trivially.
1736 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(Val: CastSrc))
1737 return false;
1738
1739 // If this cast is paired with another cast that can be eliminated, we prefer
1740 // to have it eliminated.
1741 if (const auto *PrecedingCI = dyn_cast<CastInst>(Val: CastSrc))
1742 if (isEliminableCastPair(CI1: PrecedingCI, CI2: CI))
1743 return false;
1744
1745 return true;
1746}
1747
1748/// Fold {and,or,xor} (cast X), C.
1749static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1750 InstCombinerImpl &IC) {
1751 Constant *C = dyn_cast<Constant>(Val: Logic.getOperand(i_nocapture: 1));
1752 if (!C)
1753 return nullptr;
1754
1755 auto LogicOpc = Logic.getOpcode();
1756 Type *DestTy = Logic.getType();
1757 Type *SrcTy = Cast->getSrcTy();
1758
1759 // Move the logic operation ahead of a zext or sext if the constant is
1760 // unchanged in the smaller source type. Performing the logic in a smaller
1761 // type may provide more information to later folds, and the smaller logic
1762 // instruction may be cheaper (particularly in the case of vectors).
1763 Value *X;
1764 if (match(V: Cast, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))) {
1765 if (Constant *TruncC = IC.getLosslessUnsignedTrunc(C, TruncTy: SrcTy)) {
1766 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1767 Value *NewOp = IC.Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: TruncC);
1768 return new ZExtInst(NewOp, DestTy);
1769 }
1770 }
1771
1772 if (match(V: Cast, P: m_OneUse(SubPattern: m_SExtLike(Op: m_Value(V&: X))))) {
1773 if (Constant *TruncC = IC.getLosslessSignedTrunc(C, TruncTy: SrcTy)) {
1774 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1775 Value *NewOp = IC.Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: TruncC);
1776 return new SExtInst(NewOp, DestTy);
1777 }
1778 }
1779
1780 return nullptr;
1781}
1782
1783/// Fold {and,or,xor} (cast X), Y.
1784Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1785 auto LogicOpc = I.getOpcode();
1786 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1787
1788 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1789
1790 // fold bitwise(A >> BW - 1, zext(icmp)) (BW is the scalar bits of the
1791 // type of A)
1792 // -> bitwise(zext(A < 0), zext(icmp))
1793 // -> zext(bitwise(A < 0, icmp))
1794 auto FoldBitwiseICmpZeroWithICmp = [&](Value *Op0,
1795 Value *Op1) -> Instruction * {
1796 Value *A;
1797 bool IsMatched =
1798 match(V: Op0,
1799 P: m_OneUse(SubPattern: m_LShr(
1800 L: m_Value(V&: A),
1801 R: m_SpecificInt(V: Op0->getType()->getScalarSizeInBits() - 1)))) &&
1802 match(V: Op1, P: m_OneUse(SubPattern: m_ZExt(Op: m_ICmp(L: m_Value(), R: m_Value()))));
1803
1804 if (!IsMatched)
1805 return nullptr;
1806
1807 auto *ICmpL =
1808 Builder.CreateICmpSLT(LHS: A, RHS: Constant::getNullValue(Ty: A->getType()));
1809 auto *ICmpR = cast<ZExtInst>(Val: Op1)->getOperand(i_nocapture: 0);
1810 auto *BitwiseOp = Builder.CreateBinOp(Opc: LogicOpc, LHS: ICmpL, RHS: ICmpR);
1811
1812 return new ZExtInst(BitwiseOp, Op0->getType());
1813 };
1814
1815 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1816 return Ret;
1817
1818 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1819 return Ret;
1820
1821 CastInst *Cast0 = dyn_cast<CastInst>(Val: Op0);
1822 if (!Cast0)
1823 return nullptr;
1824
1825 // This must be a cast from an integer or integer vector source type to allow
1826 // transformation of the logic operation to the source type.
1827 Type *DestTy = I.getType();
1828 Type *SrcTy = Cast0->getSrcTy();
1829 if (!SrcTy->isIntOrIntVectorTy())
1830 return nullptr;
1831
1832 if (Instruction *Ret = foldLogicCastConstant(Logic&: I, Cast: Cast0, IC&: *this))
1833 return Ret;
1834
1835 CastInst *Cast1 = dyn_cast<CastInst>(Val: Op1);
1836 if (!Cast1)
1837 return nullptr;
1838
1839 // Both operands of the logic operation are casts. The casts must be the
1840 // same kind for reduction.
1841 Instruction::CastOps CastOpcode = Cast0->getOpcode();
1842 if (CastOpcode != Cast1->getOpcode())
1843 return nullptr;
1844
1845 // Can't fold it profitably if no one of casts has one use.
1846 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
1847 return nullptr;
1848
1849 Value *X, *Y;
1850 if (match(V: Cast0, P: m_ZExtOrSExt(Op: m_Value(V&: X))) &&
1851 match(V: Cast1, P: m_ZExtOrSExt(Op: m_Value(V&: Y)))) {
1852 // Cast the narrower source to the wider source type.
1853 unsigned XNumBits = X->getType()->getScalarSizeInBits();
1854 unsigned YNumBits = Y->getType()->getScalarSizeInBits();
1855 if (XNumBits != YNumBits) {
1856 // Cast the narrower source to the wider source type only if both of casts
1857 // have one use to avoid creating an extra instruction.
1858 if (!Cast0->hasOneUse() || !Cast1->hasOneUse())
1859 return nullptr;
1860
1861 // If the source types do not match, but the casts are matching extends,
1862 // we can still narrow the logic op.
1863 if (XNumBits < YNumBits) {
1864 X = Builder.CreateCast(Op: CastOpcode, V: X, DestTy: Y->getType());
1865 } else if (YNumBits < XNumBits) {
1866 Y = Builder.CreateCast(Op: CastOpcode, V: Y, DestTy: X->getType());
1867 }
1868 }
1869
1870 // Do the logic op in the intermediate width, then widen more.
1871 Value *NarrowLogic = Builder.CreateBinOp(Opc: LogicOpc, LHS: X, RHS: Y, Name: I.getName());
1872 auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: &I);
1873 auto *NewDisjoint = dyn_cast<PossiblyDisjointInst>(Val: NarrowLogic);
1874 if (Disjoint && NewDisjoint)
1875 NewDisjoint->setIsDisjoint(Disjoint->isDisjoint());
1876 return CastInst::Create(CastOpcode, S: NarrowLogic, Ty: DestTy);
1877 }
1878
1879 // If the src type of casts are different, give up for other cast opcodes.
1880 if (SrcTy != Cast1->getSrcTy())
1881 return nullptr;
1882
1883 Value *Cast0Src = Cast0->getOperand(i_nocapture: 0);
1884 Value *Cast1Src = Cast1->getOperand(i_nocapture: 0);
1885
1886 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1887 if (shouldOptimizeCast(CI: Cast0) && shouldOptimizeCast(CI: Cast1)) {
1888 Value *NewOp = Builder.CreateBinOp(Opc: LogicOpc, LHS: Cast0Src, RHS: Cast1Src,
1889 Name: I.getName());
1890 return CastInst::Create(CastOpcode, S: NewOp, Ty: DestTy);
1891 }
1892
1893 return nullptr;
1894}
1895
1896static Instruction *foldAndToXor(BinaryOperator &I,
1897 InstCombiner::BuilderTy &Builder) {
1898 assert(I.getOpcode() == Instruction::And);
1899 Value *Op0 = I.getOperand(i_nocapture: 0);
1900 Value *Op1 = I.getOperand(i_nocapture: 1);
1901 Value *A, *B;
1902
1903 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1904 // (A | B) & ~(A & B) --> A ^ B
1905 // (A | B) & ~(B & A) --> A ^ B
1906 if (match(V: &I, P: m_BinOp(L: m_Or(L: m_Value(V&: A), R: m_Value(V&: B)),
1907 R: m_Not(V: m_c_And(L: m_Deferred(V: A), R: m_Deferred(V: B))))))
1908 return BinaryOperator::CreateXor(V1: A, V2: B);
1909
1910 // (A | ~B) & (~A | B) --> ~(A ^ B)
1911 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1912 // (~B | A) & (~A | B) --> ~(A ^ B)
1913 // (~B | A) & (B | ~A) --> ~(A ^ B)
1914 if (Op0->hasOneUse() || Op1->hasOneUse())
1915 if (match(V: &I, P: m_BinOp(L: m_c_Or(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
1916 R: m_c_Or(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
1917 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
1918
1919 return nullptr;
1920}
1921
1922static Instruction *foldOrToXor(BinaryOperator &I,
1923 InstCombiner::BuilderTy &Builder) {
1924 assert(I.getOpcode() == Instruction::Or);
1925 Value *Op0 = I.getOperand(i_nocapture: 0);
1926 Value *Op1 = I.getOperand(i_nocapture: 1);
1927 Value *A, *B;
1928
1929 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1930 // (A & B) | ~(A | B) --> ~(A ^ B)
1931 // (A & B) | ~(B | A) --> ~(A ^ B)
1932 if (Op0->hasOneUse() || Op1->hasOneUse())
1933 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
1934 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))))
1935 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
1936
1937 // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1938 // (A ^ B) | ~(A | B) --> ~(A & B)
1939 // (A ^ B) | ~(B | A) --> ~(A & B)
1940 if (Op0->hasOneUse() || Op1->hasOneUse())
1941 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
1942 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))))
1943 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
1944
1945 // (A & ~B) | (~A & B) --> A ^ B
1946 // (A & ~B) | (B & ~A) --> A ^ B
1947 // (~B & A) | (~A & B) --> A ^ B
1948 // (~B & A) | (B & ~A) --> A ^ B
1949 if (match(V: Op0, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) &&
1950 match(V: Op1, P: m_c_And(L: m_Not(V: m_Specific(V: A)), R: m_Specific(V: B))))
1951 return BinaryOperator::CreateXor(V1: A, V2: B);
1952
1953 return nullptr;
1954}
1955
1956/// Return true if a constant shift amount is always less than the specified
1957/// bit-width. If not, the shift could create poison in the narrower type.
1958static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1959 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth);
1960 return match(V: C, P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold));
1961}
1962
1963/// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1964/// a common zext operand: and (binop (zext X), C), (zext X).
1965Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {
1966 // This transform could also apply to {or, and, xor}, but there are better
1967 // folds for those cases, so we don't expect those patterns here. AShr is not
1968 // handled because it should always be transformed to LShr in this sequence.
1969 // The subtract transform is different because it has a constant on the left.
1970 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1971 Value *Op0 = And.getOperand(i_nocapture: 0), *Op1 = And.getOperand(i_nocapture: 1);
1972 Constant *C;
1973 if (!match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
1974 !match(V: Op0, P: m_OneUse(SubPattern: m_Mul(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
1975 !match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
1976 !match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Specific(V: Op1), R: m_Constant(C)))) &&
1977 !match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_Constant(C), R: m_Specific(V: Op1)))))
1978 return nullptr;
1979
1980 Value *X;
1981 if (!match(V: Op1, P: m_ZExt(Op: m_Value(V&: X))) || Op1->hasNUsesOrMore(N: 3))
1982 return nullptr;
1983
1984 Type *Ty = And.getType();
1985 if (!isa<VectorType>(Val: Ty) && !shouldChangeType(From: Ty, To: X->getType()))
1986 return nullptr;
1987
1988 // If we're narrowing a shift, the shift amount must be safe (less than the
1989 // width) in the narrower type. If the shift amount is greater, instsimplify
1990 // usually handles that case, but we can't guarantee/assert it.
1991 Instruction::BinaryOps Opc = cast<BinaryOperator>(Val: Op0)->getOpcode();
1992 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1993 if (!canNarrowShiftAmt(C, BitWidth: X->getType()->getScalarSizeInBits()))
1994 return nullptr;
1995
1996 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1997 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1998 Value *NewC = ConstantExpr::getTrunc(C, Ty: X->getType());
1999 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, LHS: NewC, RHS: X)
2000 : Builder.CreateBinOp(Opc, LHS: X, RHS: NewC);
2001 return new ZExtInst(Builder.CreateAnd(LHS: NewBO, RHS: X), Ty);
2002}
2003
2004/// Try folding relatively complex patterns for both And and Or operations
2005/// with all And and Or swapped.
2006static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
2007 InstCombiner::BuilderTy &Builder) {
2008 const Instruction::BinaryOps Opcode = I.getOpcode();
2009 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
2010
2011 // Flip the logic operation.
2012 const Instruction::BinaryOps FlippedOpcode =
2013 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
2014
2015 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2016 Value *A, *B, *C, *X, *Y, *Dummy;
2017
2018 // Match following expressions:
2019 // (~(A | B) & C)
2020 // (~(A & B) | C)
2021 // Captures X = ~(A | B) or ~(A & B)
2022 const auto matchNotOrAnd =
2023 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
2024 Value *&X, bool CountUses = false) -> bool {
2025 if (CountUses && !Op->hasOneUse())
2026 return false;
2027
2028 if (match(Op, m_c_BinOp(FlippedOpcode,
2029 m_CombineAnd(m_Value(V&: X),
2030 m_Not(m_c_BinOp(Opcode, m_A, m_B))),
2031 m_C)))
2032 return !CountUses || X->hasOneUse();
2033
2034 return false;
2035 };
2036
2037 // (~(A | B) & C) | ... --> ...
2038 // (~(A & B) | C) & ... --> ...
2039 // TODO: One use checks are conservative. We just need to check that a total
2040 // number of multiple used values does not exceed reduction
2041 // in operations.
2042 if (matchNotOrAnd(Op0, m_Value(V&: A), m_Value(V&: B), m_Value(V&: C), X)) {
2043 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
2044 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
2045 if (matchNotOrAnd(Op1, m_Specific(V: A), m_Specific(V: C), m_Specific(V: B), Dummy,
2046 true)) {
2047 Value *Xor = Builder.CreateXor(LHS: B, RHS: C);
2048 return (Opcode == Instruction::Or)
2049 ? BinaryOperator::CreateAnd(V1: Xor, V2: Builder.CreateNot(V: A))
2050 : BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Xor, RHS: A));
2051 }
2052
2053 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
2054 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
2055 if (matchNotOrAnd(Op1, m_Specific(V: B), m_Specific(V: C), m_Specific(V: A), Dummy,
2056 true)) {
2057 Value *Xor = Builder.CreateXor(LHS: A, RHS: C);
2058 return (Opcode == Instruction::Or)
2059 ? BinaryOperator::CreateAnd(V1: Xor, V2: Builder.CreateNot(V: B))
2060 : BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Xor, RHS: B));
2061 }
2062
2063 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
2064 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
2065 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2066 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)))))))
2067 return BinaryOperator::CreateNot(Op: Builder.CreateBinOp(
2068 Opc: Opcode, LHS: Builder.CreateBinOp(Opc: FlippedOpcode, LHS: B, RHS: C), RHS: A));
2069
2070 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
2071 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
2072 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2073 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: B), R: m_Specific(V: C)))))))
2074 return BinaryOperator::CreateNot(Op: Builder.CreateBinOp(
2075 Opc: Opcode, LHS: Builder.CreateBinOp(Opc: FlippedOpcode, LHS: A, RHS: C), RHS: B));
2076
2077 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
2078 // Note, the pattern with swapped and/or is not handled because the
2079 // result is more undefined than a source:
2080 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
2081 if (Opcode == Instruction::Or && Op0->hasOneUse() &&
2082 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_CombineAnd(
2083 L: m_Value(V&: Y),
2084 R: m_c_BinOp(Opcode, L: m_Specific(V: C),
2085 R: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B)))))))) {
2086 // X = ~(A | B)
2087 // Y = (C | (A ^ B)
2088 Value *Or = cast<BinaryOperator>(Val: X)->getOperand(i_nocapture: 0);
2089 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Or, RHS: Y));
2090 }
2091 }
2092
2093 // (~A & B & C) | ... --> ...
2094 // (~A | B | C) | ... --> ...
2095 // TODO: One use checks are conservative. We just need to check that a total
2096 // number of multiple used values does not exceed reduction
2097 // in operations.
2098 if (match(V: Op0,
2099 P: m_OneUse(SubPattern: m_c_BinOp(Opcode: FlippedOpcode,
2100 L: m_BinOp(Opcode: FlippedOpcode, L: m_Value(V&: B), R: m_Value(V&: C)),
2101 R: m_CombineAnd(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: A)))))) ||
2102 match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(
2103 Opcode: FlippedOpcode,
2104 L: m_c_BinOp(Opcode: FlippedOpcode, L: m_Value(V&: C),
2105 R: m_CombineAnd(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: A)))),
2106 R: m_Value(V&: B))))) {
2107 // X = ~A
2108 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2109 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2110 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2111 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: B)),
2112 R: m_Specific(V: C))))) ||
2113 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2114 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: B), R: m_Specific(V: C)),
2115 R: m_Specific(V: A))))) ||
2116 match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_c_BinOp(
2117 Opcode, L: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)),
2118 R: m_Specific(V: B)))))) {
2119 Value *Xor = Builder.CreateXor(LHS: B, RHS: C);
2120 return (Opcode == Instruction::Or)
2121 ? BinaryOperator::CreateNot(Op: Builder.CreateOr(LHS: Xor, RHS: A))
2122 : BinaryOperator::CreateOr(V1: Xor, V2: X);
2123 }
2124
2125 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2126 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2127 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2128 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: B)))))))
2129 return BinaryOperator::Create(
2130 Op: FlippedOpcode, S1: Builder.CreateBinOp(Opc: Opcode, LHS: C, RHS: Builder.CreateNot(V: B)),
2131 S2: X);
2132
2133 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2134 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2135 if (match(V: Op1, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(
2136 SubPattern: m_c_BinOp(Opcode, L: m_Specific(V: A), R: m_Specific(V: C)))))))
2137 return BinaryOperator::Create(
2138 Op: FlippedOpcode, S1: Builder.CreateBinOp(Opc: Opcode, LHS: B, RHS: Builder.CreateNot(V: C)),
2139 S2: X);
2140 }
2141
2142 return nullptr;
2143}
2144
2145/// Try to reassociate a pair of binops so that values with one use only are
2146/// part of the same instruction. This may enable folds that are limited with
2147/// multi-use restrictions and makes it more likely to match other patterns that
2148/// are looking for a common operand.
2149static Instruction *reassociateForUses(BinaryOperator &BO,
2150 InstCombinerImpl::BuilderTy &Builder) {
2151 Instruction::BinaryOps Opcode = BO.getOpcode();
2152 Value *X, *Y, *Z;
2153 if (match(V: &BO,
2154 P: m_c_BinOp(Opcode, L: m_OneUse(SubPattern: m_BinOp(Opcode, L: m_Value(V&: X), R: m_Value(V&: Y))),
2155 R: m_OneUse(SubPattern: m_Value(V&: Z))))) {
2156 if (!isa<Constant>(Val: X) && !isa<Constant>(Val: Y) && !isa<Constant>(Val: Z)) {
2157 // (X op Y) op Z --> (Y op Z) op X
2158 if (!X->hasOneUse()) {
2159 Value *YZ = Builder.CreateBinOp(Opc: Opcode, LHS: Y, RHS: Z);
2160 return BinaryOperator::Create(Op: Opcode, S1: YZ, S2: X);
2161 }
2162 // (X op Y) op Z --> (X op Z) op Y
2163 if (!Y->hasOneUse()) {
2164 Value *XZ = Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Z);
2165 return BinaryOperator::Create(Op: Opcode, S1: XZ, S2: Y);
2166 }
2167 }
2168 }
2169
2170 return nullptr;
2171}
2172
2173// Match
2174// (X + C2) | C
2175// (X + C2) ^ C
2176// (X + C2) & C
2177// and convert to do the bitwise logic first:
2178// (X | C) + C2
2179// (X ^ C) + C2
2180// (X & C) + C2
2181// iff bits affected by logic op are lower than last bit affected by math op
2182static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
2183 InstCombiner::BuilderTy &Builder) {
2184 Type *Ty = I.getType();
2185 Instruction::BinaryOps OpC = I.getOpcode();
2186 Value *Op0 = I.getOperand(i_nocapture: 0);
2187 Value *Op1 = I.getOperand(i_nocapture: 1);
2188 Value *X;
2189 const APInt *C, *C2;
2190
2191 if (!(match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C2)))) &&
2192 match(V: Op1, P: m_APInt(Res&: C))))
2193 return nullptr;
2194
2195 unsigned Width = Ty->getScalarSizeInBits();
2196 unsigned LastOneMath = Width - C2->countr_zero();
2197
2198 switch (OpC) {
2199 case Instruction::And:
2200 if (C->countl_one() < LastOneMath)
2201 return nullptr;
2202 break;
2203 case Instruction::Xor:
2204 case Instruction::Or:
2205 if (C->countl_zero() < LastOneMath)
2206 return nullptr;
2207 break;
2208 default:
2209 llvm_unreachable("Unexpected BinaryOp!");
2210 }
2211
2212 Value *NewBinOp = Builder.CreateBinOp(Opc: OpC, LHS: X, RHS: ConstantInt::get(Ty, V: *C));
2213 return BinaryOperator::CreateWithCopiedFlags(Opc: Instruction::Add, V1: NewBinOp,
2214 V2: ConstantInt::get(Ty, V: *C2), CopyO: Op0);
2215}
2216
2217// binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2218// shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2219// where both shifts are the same and AddC is a valid shift amount.
2220Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {
2221 assert((I.isBitwiseLogicOp() || I.getOpcode() == Instruction::Add) &&
2222 "Unexpected opcode");
2223
2224 Value *ShAmt;
2225 Constant *ShiftedC1, *ShiftedC2, *AddC;
2226 Type *Ty = I.getType();
2227 unsigned BitWidth = Ty->getScalarSizeInBits();
2228 if (!match(V: &I, P: m_c_BinOp(L: m_Shift(L: m_ImmConstant(C&: ShiftedC1), R: m_Value(V&: ShAmt)),
2229 R: m_Shift(L: m_ImmConstant(C&: ShiftedC2),
2230 R: m_AddLike(L: m_Deferred(V: ShAmt),
2231 R: m_ImmConstant(C&: AddC))))))
2232 return nullptr;
2233
2234 // Make sure the add constant is a valid shift amount.
2235 if (!match(V: AddC,
2236 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(BitWidth, BitWidth))))
2237 return nullptr;
2238
2239 // Avoid constant expressions.
2240 auto *Op0Inst = dyn_cast<Instruction>(Val: I.getOperand(i_nocapture: 0));
2241 auto *Op1Inst = dyn_cast<Instruction>(Val: I.getOperand(i_nocapture: 1));
2242 if (!Op0Inst || !Op1Inst)
2243 return nullptr;
2244
2245 // Both shifts must be the same.
2246 Instruction::BinaryOps ShiftOp =
2247 static_cast<Instruction::BinaryOps>(Op0Inst->getOpcode());
2248 if (ShiftOp != Op1Inst->getOpcode())
2249 return nullptr;
2250
2251 // For adds, only left shifts are supported.
2252 if (I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2253 return nullptr;
2254
2255 Value *NewC = Builder.CreateBinOp(
2256 Opc: I.getOpcode(), LHS: ShiftedC1, RHS: Builder.CreateBinOp(Opc: ShiftOp, LHS: ShiftedC2, RHS: AddC));
2257 return BinaryOperator::Create(Op: ShiftOp, S1: NewC, S2: ShAmt);
2258}
2259
2260// Fold and/or/xor with two equal intrinsic IDs:
2261// bitwise(fshl (A, B, ShAmt), fshl(C, D, ShAmt))
2262// -> fshl(bitwise(A, C), bitwise(B, D), ShAmt)
2263// bitwise(fshr (A, B, ShAmt), fshr(C, D, ShAmt))
2264// -> fshr(bitwise(A, C), bitwise(B, D), ShAmt)
2265// bitwise(bswap(A), bswap(B)) -> bswap(bitwise(A, B))
2266// bitwise(bswap(A), C) -> bswap(bitwise(A, bswap(C)))
2267// bitwise(bitreverse(A), bitreverse(B)) -> bitreverse(bitwise(A, B))
2268// bitwise(bitreverse(A), C) -> bitreverse(bitwise(A, bitreverse(C)))
2269static Instruction *
2270foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
2271 InstCombiner::BuilderTy &Builder) {
2272 assert(I.isBitwiseLogicOp() && "Should and/or/xor");
2273 if (!I.getOperand(i_nocapture: 0)->hasOneUse())
2274 return nullptr;
2275 IntrinsicInst *X = dyn_cast<IntrinsicInst>(Val: I.getOperand(i_nocapture: 0));
2276 if (!X)
2277 return nullptr;
2278
2279 IntrinsicInst *Y = dyn_cast<IntrinsicInst>(Val: I.getOperand(i_nocapture: 1));
2280 if (Y && (!Y->hasOneUse() || X->getIntrinsicID() != Y->getIntrinsicID()))
2281 return nullptr;
2282
2283 Intrinsic::ID IID = X->getIntrinsicID();
2284 const APInt *RHSC;
2285 // Try to match constant RHS.
2286 if (!Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2287 !match(V: I.getOperand(i_nocapture: 1), P: m_APInt(Res&: RHSC))))
2288 return nullptr;
2289
2290 switch (IID) {
2291 case Intrinsic::fshl:
2292 case Intrinsic::fshr: {
2293 if (X->getOperand(i_nocapture: 2) != Y->getOperand(i_nocapture: 2))
2294 return nullptr;
2295 Value *NewOp0 =
2296 Builder.CreateBinOp(Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 0), RHS: Y->getOperand(i_nocapture: 0));
2297 Value *NewOp1 =
2298 Builder.CreateBinOp(Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 1), RHS: Y->getOperand(i_nocapture: 1));
2299 Function *F =
2300 Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: IID, Tys: I.getType());
2301 return CallInst::Create(Func: F, Args: {NewOp0, NewOp1, X->getOperand(i_nocapture: 2)});
2302 }
2303 case Intrinsic::bswap:
2304 case Intrinsic::bitreverse: {
2305 Value *NewOp0 = Builder.CreateBinOp(
2306 Opc: I.getOpcode(), LHS: X->getOperand(i_nocapture: 0),
2307 RHS: Y ? Y->getOperand(i_nocapture: 0)
2308 : ConstantInt::get(Ty: I.getType(), V: IID == Intrinsic::bswap
2309 ? RHSC->byteSwap()
2310 : RHSC->reverseBits()));
2311 Function *F =
2312 Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: IID, Tys: I.getType());
2313 return CallInst::Create(Func: F, Args: {NewOp0});
2314 }
2315 default:
2316 return nullptr;
2317 }
2318}
2319
2320// Try to simplify V by replacing occurrences of Op with RepOp, but only look
2321// through bitwise operations. In particular, for X | Y we try to replace Y with
2322// 0 inside X and for X & Y we try to replace Y with -1 inside X.
2323// Return the simplified result of X if successful, and nullptr otherwise.
2324// If SimplifyOnly is true, no new instructions will be created.
2325static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp,
2326 bool SimplifyOnly,
2327 InstCombinerImpl &IC,
2328 unsigned Depth = 0) {
2329 if (Op == RepOp)
2330 return nullptr;
2331
2332 if (V == Op)
2333 return RepOp;
2334
2335 auto *I = dyn_cast<BinaryOperator>(Val: V);
2336 if (!I || !I->isBitwiseLogicOp() || Depth >= 3)
2337 return nullptr;
2338
2339 if (!I->hasOneUse())
2340 SimplifyOnly = true;
2341
2342 Value *NewOp0 = simplifyAndOrWithOpReplaced(V: I->getOperand(i_nocapture: 0), Op, RepOp,
2343 SimplifyOnly, IC, Depth: Depth + 1);
2344 Value *NewOp1 = simplifyAndOrWithOpReplaced(V: I->getOperand(i_nocapture: 1), Op, RepOp,
2345 SimplifyOnly, IC, Depth: Depth + 1);
2346 if (!NewOp0 && !NewOp1)
2347 return nullptr;
2348
2349 if (!NewOp0)
2350 NewOp0 = I->getOperand(i_nocapture: 0);
2351 if (!NewOp1)
2352 NewOp1 = I->getOperand(i_nocapture: 1);
2353
2354 if (Value *Res = simplifyBinOp(Opcode: I->getOpcode(), LHS: NewOp0, RHS: NewOp1,
2355 Q: IC.getSimplifyQuery().getWithInstruction(I)))
2356 return Res;
2357
2358 if (SimplifyOnly)
2359 return nullptr;
2360 return IC.Builder.CreateBinOp(Opc: I->getOpcode(), LHS: NewOp0, RHS: NewOp1);
2361}
2362
2363/// Reassociate and/or expressions to see if we can fold the inner and/or ops.
2364/// TODO: Make this recursive; it's a little tricky because an arbitrary
2365/// number of and/or instructions might have to be created.
2366Value *InstCombinerImpl::reassociateBooleanAndOr(Value *LHS, Value *X, Value *Y,
2367 Instruction &I, bool IsAnd,
2368 bool RHSIsLogical) {
2369 Instruction::BinaryOps Opcode = IsAnd ? Instruction::And : Instruction::Or;
2370 // LHS bop (X lop Y) --> (LHS bop X) lop Y
2371 // LHS bop (X bop Y) --> (LHS bop X) bop Y
2372 if (Value *Res = foldBooleanAndOr(LHS, RHS: X, I, IsAnd, /*IsLogical=*/false))
2373 return RHSIsLogical ? Builder.CreateLogicalOp(Opc: Opcode, Cond1: Res, Cond2: Y)
2374 : Builder.CreateBinOp(Opc: Opcode, LHS: Res, RHS: Y);
2375 // LHS bop (X bop Y) --> X bop (LHS bop Y)
2376 // LHS bop (X lop Y) --> X lop (LHS bop Y)
2377 if (Value *Res = foldBooleanAndOr(LHS, RHS: Y, I, IsAnd, /*IsLogical=*/false))
2378 return RHSIsLogical ? Builder.CreateLogicalOp(Opc: Opcode, Cond1: X, Cond2: Res)
2379 : Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Res);
2380 return nullptr;
2381}
2382
2383// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2384// here. We should standardize that construct where it is needed or choose some
2385// other way to ensure that commutated variants of patterns are not missed.
2386Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2387 Type *Ty = I.getType();
2388
2389 if (Value *V = simplifyAndInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
2390 Q: SQ.getWithInstruction(I: &I)))
2391 return replaceInstUsesWith(I, V);
2392
2393 if (SimplifyAssociativeOrCommutative(I))
2394 return &I;
2395
2396 if (Instruction *X = foldVectorBinop(Inst&: I))
2397 return X;
2398
2399 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
2400 return Phi;
2401
2402 // See if we can simplify any instructions used by the instruction whose sole
2403 // purpose is to compute bits we don't care about.
2404 if (SimplifyDemandedInstructionBits(Inst&: I))
2405 return &I;
2406
2407 // Do this before using distributive laws to catch simple and/or/not patterns.
2408 if (Instruction *Xor = foldAndToXor(I, Builder))
2409 return Xor;
2410
2411 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
2412 return X;
2413
2414 // (A|B)&(A|C) -> A|(B&C) etc
2415 if (Value *V = foldUsingDistributiveLaws(I))
2416 return replaceInstUsesWith(I, V);
2417
2418 if (Instruction *R = foldBinOpShiftWithShift(I))
2419 return R;
2420
2421 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2422
2423 Value *X, *Y;
2424 const APInt *C;
2425 if ((match(V: Op0, P: m_OneUse(SubPattern: m_LogicalShift(L: m_One(), R: m_Value(V&: X)))) ||
2426 (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_APInt(Res&: C), R: m_Value(V&: X)))) && (*C)[0])) &&
2427 match(V: Op1, P: m_One())) {
2428 // (1 >> X) & 1 --> zext(X == 0)
2429 // (C << X) & 1 --> zext(X == 0), when C is odd
2430 Value *IsZero = Builder.CreateICmpEQ(LHS: X, RHS: ConstantInt::get(Ty, V: 0));
2431 return new ZExtInst(IsZero, Ty);
2432 }
2433
2434 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2435 Value *Neg;
2436 if (match(V: &I,
2437 P: m_c_And(L: m_CombineAnd(L: m_Value(V&: Neg),
2438 R: m_OneUse(SubPattern: m_Neg(V: m_And(L: m_Value(), R: m_One())))),
2439 R: m_Value(V&: Y)))) {
2440 Value *Cmp = Builder.CreateIsNull(Arg: Neg);
2441 return SelectInst::Create(C: Cmp, S1: ConstantInt::getNullValue(Ty), S2: Y);
2442 }
2443
2444 // Canonicalize:
2445 // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
2446 if (match(V: &I, P: m_c_And(L: m_Value(V&: Y), R: m_OneUse(SubPattern: m_CombineOr(
2447 L: m_c_Add(L: m_Value(V&: X), R: m_Deferred(V: Y)),
2448 R: m_Sub(L: m_Value(V&: X), R: m_Deferred(V: Y)))))) &&
2449 isKnownToBeAPowerOfTwo(V: Y, /*OrZero*/ true, CxtI: &I))
2450 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: X), V2: Y);
2451
2452 if (match(V: Op1, P: m_APInt(Res&: C))) {
2453 const APInt *XorC;
2454 if (match(V: Op0, P: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: XorC))))) {
2455 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2456 Constant *NewC = ConstantInt::get(Ty, V: *C & *XorC);
2457 Value *And = Builder.CreateAnd(LHS: X, RHS: Op1);
2458 And->takeName(V: Op0);
2459 return BinaryOperator::CreateXor(V1: And, V2: NewC);
2460 }
2461
2462 const APInt *OrC;
2463 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: OrC))))) {
2464 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2465 // NOTE: This reduces the number of bits set in the & mask, which
2466 // can expose opportunities for store narrowing for scalars.
2467 // NOTE: SimplifyDemandedBits should have already removed bits from C1
2468 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2469 // above, but this feels safer.
2470 APInt Together = *C & *OrC;
2471 Value *And = Builder.CreateAnd(LHS: X, RHS: ConstantInt::get(Ty, V: Together ^ *C));
2472 And->takeName(V: Op0);
2473 return BinaryOperator::CreateOr(V1: And, V2: ConstantInt::get(Ty, V: Together));
2474 }
2475
2476 unsigned Width = Ty->getScalarSizeInBits();
2477 const APInt *ShiftC;
2478 if (match(V: Op0, P: m_OneUse(SubPattern: m_SExt(Op: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShiftC))))) &&
2479 ShiftC->ult(RHS: Width)) {
2480 if (*C == APInt::getLowBitsSet(numBits: Width, loBitsSet: Width - ShiftC->getZExtValue())) {
2481 // We are clearing high bits that were potentially set by sext+ashr:
2482 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2483 Value *Sext = Builder.CreateSExt(V: X, DestTy: Ty);
2484 Constant *ShAmtC = ConstantInt::get(Ty, V: ShiftC->zext(width: Width));
2485 return BinaryOperator::CreateLShr(V1: Sext, V2: ShAmtC);
2486 }
2487 }
2488
2489 // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2490 // and (ashr X, ShiftC), C --> lshr X, ShiftC
2491 if (match(V: Op0, P: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShiftC))) && ShiftC->ult(RHS: Width) &&
2492 C->isMask(numBits: Width - ShiftC->getZExtValue()))
2493 return BinaryOperator::CreateLShr(V1: X, V2: ConstantInt::get(Ty, V: *ShiftC));
2494
2495 const APInt *AddC;
2496 if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: AddC)))) {
2497 // If we are masking the result of the add down to exactly one bit and
2498 // the constant we are adding has no bits set below that bit, then the
2499 // add is flipping a single bit. Example:
2500 // (X + 4) & 4 --> (X & 4) ^ 4
2501 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) {
2502 assert((*C & *AddC) != 0 && "Expected common bit");
2503 Value *NewAnd = Builder.CreateAnd(LHS: X, RHS: Op1);
2504 return BinaryOperator::CreateXor(V1: NewAnd, V2: Op1);
2505 }
2506 }
2507
2508 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2509 // bitwidth of X and OP behaves well when given trunc(C1) and X.
2510 auto isNarrowableBinOpcode = [](BinaryOperator *B) {
2511 switch (B->getOpcode()) {
2512 case Instruction::Xor:
2513 case Instruction::Or:
2514 case Instruction::Mul:
2515 case Instruction::Add:
2516 case Instruction::Sub:
2517 return true;
2518 default:
2519 return false;
2520 }
2521 };
2522 BinaryOperator *BO;
2523 if (match(V: Op0, P: m_OneUse(SubPattern: m_BinOp(I&: BO))) && isNarrowableBinOpcode(BO)) {
2524 Instruction::BinaryOps BOpcode = BO->getOpcode();
2525 Value *X;
2526 const APInt *C1;
2527 // TODO: The one-use restrictions could be relaxed a little if the AND
2528 // is going to be removed.
2529 // Try to narrow the 'and' and a binop with constant operand:
2530 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2531 if (match(V: BO, P: m_c_BinOp(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))), R: m_APInt(Res&: C1))) &&
2532 C->isIntN(N: X->getType()->getScalarSizeInBits())) {
2533 unsigned XWidth = X->getType()->getScalarSizeInBits();
2534 Constant *TruncC1 = ConstantInt::get(Ty: X->getType(), V: C1->trunc(width: XWidth));
2535 Value *BinOp = isa<ZExtInst>(Val: BO->getOperand(i_nocapture: 0))
2536 ? Builder.CreateBinOp(Opc: BOpcode, LHS: X, RHS: TruncC1)
2537 : Builder.CreateBinOp(Opc: BOpcode, LHS: TruncC1, RHS: X);
2538 Constant *TruncC = ConstantInt::get(Ty: X->getType(), V: C->trunc(width: XWidth));
2539 Value *And = Builder.CreateAnd(LHS: BinOp, RHS: TruncC);
2540 return new ZExtInst(And, Ty);
2541 }
2542
2543 // Similar to above: if the mask matches the zext input width, then the
2544 // 'and' can be eliminated, so we can truncate the other variable op:
2545 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2546 if (isa<Instruction>(Val: BO->getOperand(i_nocapture: 0)) &&
2547 match(V: BO->getOperand(i_nocapture: 0), P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
2548 C->isMask(numBits: X->getType()->getScalarSizeInBits())) {
2549 Y = BO->getOperand(i_nocapture: 1);
2550 Value *TrY = Builder.CreateTrunc(V: Y, DestTy: X->getType(), Name: Y->getName() + ".tr");
2551 Value *NewBO =
2552 Builder.CreateBinOp(Opc: BOpcode, LHS: X, RHS: TrY, Name: BO->getName() + ".narrow");
2553 return new ZExtInst(NewBO, Ty);
2554 }
2555 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2556 if (isa<Instruction>(Val: BO->getOperand(i_nocapture: 1)) &&
2557 match(V: BO->getOperand(i_nocapture: 1), P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
2558 C->isMask(numBits: X->getType()->getScalarSizeInBits())) {
2559 Y = BO->getOperand(i_nocapture: 0);
2560 Value *TrY = Builder.CreateTrunc(V: Y, DestTy: X->getType(), Name: Y->getName() + ".tr");
2561 Value *NewBO =
2562 Builder.CreateBinOp(Opc: BOpcode, LHS: TrY, RHS: X, Name: BO->getName() + ".narrow");
2563 return new ZExtInst(NewBO, Ty);
2564 }
2565 }
2566
2567 // This is intentionally placed after the narrowing transforms for
2568 // efficiency (transform directly to the narrow logic op if possible).
2569 // If the mask is only needed on one incoming arm, push the 'and' op up.
2570 if (match(V: Op0, P: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y)))) ||
2571 match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2572 APInt NotAndMask(~(*C));
2573 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Val: Op0)->getOpcode();
2574 if (MaskedValueIsZero(V: X, Mask: NotAndMask, CxtI: &I)) {
2575 // Not masking anything out for the LHS, move mask to RHS.
2576 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2577 Value *NewRHS = Builder.CreateAnd(LHS: Y, RHS: Op1, Name: Y->getName() + ".masked");
2578 return BinaryOperator::Create(Op: BinOp, S1: X, S2: NewRHS);
2579 }
2580 if (!isa<Constant>(Val: Y) && MaskedValueIsZero(V: Y, Mask: NotAndMask, CxtI: &I)) {
2581 // Not masking anything out for the RHS, move mask to LHS.
2582 // and ({x}or X, Y), C --> {x}or (and X, C), Y
2583 Value *NewLHS = Builder.CreateAnd(LHS: X, RHS: Op1, Name: X->getName() + ".masked");
2584 return BinaryOperator::Create(Op: BinOp, S1: NewLHS, S2: Y);
2585 }
2586 }
2587
2588 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2589 // constant, test if the shift amount equals the offset bit index:
2590 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2591 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2592 if (C->isPowerOf2() &&
2593 match(V: Op0, P: m_OneUse(SubPattern: m_LogicalShift(L: m_Power2(V&: ShiftC), R: m_Value(V&: X))))) {
2594 int Log2ShiftC = ShiftC->exactLogBase2();
2595 int Log2C = C->exactLogBase2();
2596 bool IsShiftLeft =
2597 cast<BinaryOperator>(Val: Op0)->getOpcode() == Instruction::Shl;
2598 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2599 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
2600 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: ConstantInt::get(Ty, V: BitNum));
2601 return SelectInst::Create(C: Cmp, S1: ConstantInt::get(Ty, V: *C),
2602 S2: ConstantInt::getNullValue(Ty));
2603 }
2604
2605 Constant *C1, *C2;
2606 const APInt *C3 = C;
2607 Value *X;
2608 if (C3->isPowerOf2()) {
2609 Constant *Log2C3 = ConstantInt::get(Ty, V: C3->countr_zero());
2610 if (match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Shl(L: m_ImmConstant(C&: C1), R: m_Value(V&: X)),
2611 R: m_ImmConstant(C&: C2)))) &&
2612 match(V: C1, P: m_Power2())) {
2613 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C: C1);
2614 Constant *LshrC = ConstantExpr::getAdd(C1: C2, C2: Log2C3);
2615 KnownBits KnownLShrc = computeKnownBits(V: LshrC, CxtI: nullptr);
2616 if (KnownLShrc.getMaxValue().ult(RHS: Width)) {
2617 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2618 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2619 Constant *CmpC = ConstantExpr::getSub(C1: LshrC, C2: Log2C1);
2620 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: CmpC);
2621 return SelectInst::Create(C: Cmp, S1: ConstantInt::get(Ty, V: *C3),
2622 S2: ConstantInt::getNullValue(Ty));
2623 }
2624 }
2625
2626 if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_LShr(L: m_ImmConstant(C&: C1), R: m_Value(V&: X)),
2627 R: m_ImmConstant(C&: C2)))) &&
2628 match(V: C1, P: m_Power2())) {
2629 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C: C1);
2630 Constant *Cmp =
2631 ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_ULT, LHS: Log2C3, RHS: C2, DL);
2632 if (Cmp && Cmp->isZeroValue()) {
2633 // iff C1,C3 is pow2 and Log2(C3) >= C2:
2634 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2635 Constant *ShlC = ConstantExpr::getAdd(C1: C2, C2: Log2C1);
2636 Constant *CmpC = ConstantExpr::getSub(C1: ShlC, C2: Log2C3);
2637 Value *Cmp = Builder.CreateICmpEQ(LHS: X, RHS: CmpC);
2638 return SelectInst::Create(C: Cmp, S1: ConstantInt::get(Ty, V: *C3),
2639 S2: ConstantInt::getNullValue(Ty));
2640 }
2641 }
2642 }
2643 }
2644
2645 // If we are clearing the sign bit of a floating-point value, convert this to
2646 // fabs, then cast back to integer.
2647 //
2648 // This is a generous interpretation for noimplicitfloat, this is not a true
2649 // floating-point operation.
2650 //
2651 // Assumes any IEEE-represented type has the sign bit in the high bit.
2652 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2653 Value *CastOp;
2654 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
2655 match(V: Op1, P: m_MaxSignedValue()) &&
2656 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2657 Kind: Attribute::NoImplicitFloat)) {
2658 Type *EltTy = CastOp->getType()->getScalarType();
2659 if (EltTy->isFloatingPointTy() &&
2660 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
2661 Value *FAbs = Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: CastOp);
2662 return new BitCastInst(FAbs, I.getType());
2663 }
2664 }
2665
2666 // and(shl(zext(X), Y), SignMask) -> and(sext(X), SignMask)
2667 // where Y is a valid shift amount.
2668 if (match(V: &I, P: m_And(L: m_OneUse(SubPattern: m_Shl(L: m_ZExt(Op: m_Value(V&: X)), R: m_Value(V&: Y))),
2669 R: m_SignMask())) &&
2670 match(V: Y, P: m_SpecificInt_ICMP(
2671 Predicate: ICmpInst::Predicate::ICMP_EQ,
2672 Threshold: APInt(Ty->getScalarSizeInBits(),
2673 Ty->getScalarSizeInBits() -
2674 X->getType()->getScalarSizeInBits())))) {
2675 auto *SExt = Builder.CreateSExt(V: X, DestTy: Ty, Name: X->getName() + ".signext");
2676 return BinaryOperator::CreateAnd(V1: SExt, V2: Op1);
2677 }
2678
2679 if (Instruction *Z = narrowMaskedBinOp(And&: I))
2680 return Z;
2681
2682 if (I.getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2683 if (auto *SI0 = dyn_cast<SelectInst>(Val: Op0)) {
2684 if (auto *R =
2685 foldAndOrOfSelectUsingImpliedCond(Op: Op1, SI&: *SI0, /* IsAnd */ true))
2686 return R;
2687 }
2688 if (auto *SI1 = dyn_cast<SelectInst>(Val: Op1)) {
2689 if (auto *R =
2690 foldAndOrOfSelectUsingImpliedCond(Op: Op0, SI&: *SI1, /* IsAnd */ true))
2691 return R;
2692 }
2693 }
2694
2695 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2696 return FoldedLogic;
2697
2698 if (Instruction *DeMorgan = matchDeMorgansLaws(I, IC&: *this))
2699 return DeMorgan;
2700
2701 {
2702 Value *A, *B, *C;
2703 // A & ~(A ^ B) --> A & B
2704 if (match(V: Op1, P: m_Not(V: m_c_Xor(L: m_Specific(V: Op0), R: m_Value(V&: B)))))
2705 return BinaryOperator::CreateAnd(V1: Op0, V2: B);
2706 // ~(A ^ B) & A --> A & B
2707 if (match(V: Op0, P: m_Not(V: m_c_Xor(L: m_Specific(V: Op1), R: m_Value(V&: B)))))
2708 return BinaryOperator::CreateAnd(V1: Op1, V2: B);
2709
2710 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2711 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
2712 match(V: Op1, P: m_Xor(L: m_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A)))) {
2713 Value *NotC = Op1->hasOneUse()
2714 ? Builder.CreateNot(V: C)
2715 : getFreelyInverted(V: C, WillInvertAllUses: C->hasOneUse(), Builder: &Builder);
2716 if (NotC != nullptr)
2717 return BinaryOperator::CreateAnd(V1: Op0, V2: NotC);
2718 }
2719
2720 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2721 if (match(V: Op0, P: m_Xor(L: m_Xor(L: m_Value(V&: A), R: m_Value(V&: C)), R: m_Value(V&: B))) &&
2722 match(V: Op1, P: m_Xor(L: m_Specific(V: B), R: m_Specific(V: A)))) {
2723 Value *NotC = Op0->hasOneUse()
2724 ? Builder.CreateNot(V: C)
2725 : getFreelyInverted(V: C, WillInvertAllUses: C->hasOneUse(), Builder: &Builder);
2726 if (NotC != nullptr)
2727 return BinaryOperator::CreateAnd(V1: Op1, V2: Builder.CreateNot(V: C));
2728 }
2729
2730 // (A | B) & (~A ^ B) -> A & B
2731 // (A | B) & (B ^ ~A) -> A & B
2732 // (B | A) & (~A ^ B) -> A & B
2733 // (B | A) & (B ^ ~A) -> A & B
2734 if (match(V: Op1, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2735 match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2736 return BinaryOperator::CreateAnd(V1: A, V2: B);
2737
2738 // (~A ^ B) & (A | B) -> A & B
2739 // (~A ^ B) & (B | A) -> A & B
2740 // (B ^ ~A) & (A | B) -> A & B
2741 // (B ^ ~A) & (B | A) -> A & B
2742 if (match(V: Op0, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2743 match(V: Op1, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2744 return BinaryOperator::CreateAnd(V1: A, V2: B);
2745
2746 // (~A | B) & (A ^ B) -> ~A & B
2747 // (~A | B) & (B ^ A) -> ~A & B
2748 // (B | ~A) & (A ^ B) -> ~A & B
2749 // (B | ~A) & (B ^ A) -> ~A & B
2750 if (match(V: Op0, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2751 match(V: Op1, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2752 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: A), V2: B);
2753
2754 // (A ^ B) & (~A | B) -> ~A & B
2755 // (B ^ A) & (~A | B) -> ~A & B
2756 // (A ^ B) & (B | ~A) -> ~A & B
2757 // (B ^ A) & (B | ~A) -> ~A & B
2758 if (match(V: Op1, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2759 match(V: Op0, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2760 return BinaryOperator::CreateAnd(V1: Builder.CreateNot(V: A), V2: B);
2761 }
2762
2763 if (Value *Res =
2764 foldBooleanAndOr(LHS: Op0, RHS: Op1, I, /*IsAnd=*/true, /*IsLogical=*/false))
2765 return replaceInstUsesWith(I, V: Res);
2766
2767 if (match(V: Op1, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2768 bool IsLogical = isa<SelectInst>(Val: Op1);
2769 if (auto *V = reassociateBooleanAndOr(LHS: Op0, X, Y, I, /*IsAnd=*/true,
2770 /*RHSIsLogical=*/IsLogical))
2771 return replaceInstUsesWith(I, V);
2772 }
2773 if (match(V: Op0, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
2774 bool IsLogical = isa<SelectInst>(Val: Op0);
2775 if (auto *V = reassociateBooleanAndOr(LHS: Op1, X, Y, I, /*IsAnd=*/true,
2776 /*RHSIsLogical=*/IsLogical))
2777 return replaceInstUsesWith(I, V);
2778 }
2779
2780 if (Instruction *FoldedFCmps = reassociateFCmps(BO&: I, Builder))
2781 return FoldedFCmps;
2782
2783 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
2784 return CastedAnd;
2785
2786 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
2787 return Sel;
2788
2789 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2790 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2791 // with binop identity constant. But creating a select with non-constant
2792 // arm may not be reversible due to poison semantics. Is that a good
2793 // canonicalization?
2794 Value *A, *B;
2795 if (match(V: &I, P: m_c_And(L: m_SExt(Op: m_Value(V&: A)), R: m_Value(V&: B))) &&
2796 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2797 return SelectInst::Create(C: A, S1: B, S2: Constant::getNullValue(Ty));
2798
2799 // Similarly, a 'not' of the bool translates to a swap of the select arms:
2800 // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2801 if (match(V: &I, P: m_c_And(L: m_Not(V: m_SExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
2802 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2803 return SelectInst::Create(C: A, S1: Constant::getNullValue(Ty), S2: B);
2804
2805 // and(zext(A), B) -> A ? (B & 1) : 0
2806 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
2807 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2808 return SelectInst::Create(C: A, S1: Builder.CreateAnd(LHS: B, RHS: ConstantInt::get(Ty, V: 1)),
2809 S2: Constant::getNullValue(Ty));
2810
2811 // (-1 + A) & B --> A ? 0 : B where A is 0/1.
2812 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_Add(L: m_ZExtOrSelf(Op: m_Value(V&: A)), R: m_AllOnes())),
2813 R: m_Value(V&: B)))) {
2814 if (A->getType()->isIntOrIntVectorTy(BitWidth: 1))
2815 return SelectInst::Create(C: A, S1: Constant::getNullValue(Ty), S2: B);
2816 if (computeKnownBits(V: A, CxtI: &I).countMaxActiveBits() <= 1) {
2817 return SelectInst::Create(
2818 C: Builder.CreateICmpEQ(LHS: A, RHS: Constant::getNullValue(Ty: A->getType())), S1: B,
2819 S2: Constant::getNullValue(Ty));
2820 }
2821 }
2822
2823 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2824 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_SExtOrSelf(
2825 Op: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: C)))),
2826 R: m_Value(V&: Y))) &&
2827 *C == X->getType()->getScalarSizeInBits() - 1) {
2828 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
2829 return SelectInst::Create(C: IsNeg, S1: Y, S2: ConstantInt::getNullValue(Ty));
2830 }
2831 // If there's a 'not' of the shifted value, swap the select operands:
2832 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2833 if (match(V: &I, P: m_c_And(L: m_OneUse(SubPattern: m_SExtOrSelf(
2834 Op: m_Not(V: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: C))))),
2835 R: m_Value(V&: Y))) &&
2836 *C == X->getType()->getScalarSizeInBits() - 1) {
2837 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
2838 return SelectInst::Create(C: IsNeg, S1: ConstantInt::getNullValue(Ty), S2: Y);
2839 }
2840
2841 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
2842 if (sinkNotIntoOtherHandOfLogicalOp(I))
2843 return &I;
2844
2845 // An and recurrence w/loop invariant step is equivelent to (and start, step)
2846 PHINode *PN = nullptr;
2847 Value *Start = nullptr, *Step = nullptr;
2848 if (matchSimpleRecurrence(I: &I, P&: PN, Start, Step) && DT.dominates(Def: Step, User: PN))
2849 return replaceInstUsesWith(I, V: Builder.CreateAnd(LHS: Start, RHS: Step));
2850
2851 if (Instruction *R = reassociateForUses(BO&: I, Builder))
2852 return R;
2853
2854 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
2855 return Canonicalized;
2856
2857 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
2858 return Folded;
2859
2860 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
2861 return Res;
2862
2863 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
2864 return Res;
2865
2866 if (Value *V =
2867 simplifyAndOrWithOpReplaced(V: Op0, Op: Op1, RepOp: Constant::getAllOnesValue(Ty),
2868 /*SimplifyOnly*/ false, IC&: *this))
2869 return BinaryOperator::CreateAnd(V1: V, V2: Op1);
2870 if (Value *V =
2871 simplifyAndOrWithOpReplaced(V: Op1, Op: Op0, RepOp: Constant::getAllOnesValue(Ty),
2872 /*SimplifyOnly*/ false, IC&: *this))
2873 return BinaryOperator::CreateAnd(V1: Op0, V2: V);
2874
2875 return nullptr;
2876}
2877
2878Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
2879 bool MatchBSwaps,
2880 bool MatchBitReversals) {
2881 SmallVector<Instruction *, 4> Insts;
2882 if (!recognizeBSwapOrBitReverseIdiom(I: &I, MatchBSwaps, MatchBitReversals,
2883 InsertedInsts&: Insts))
2884 return nullptr;
2885 Instruction *LastInst = Insts.pop_back_val();
2886 LastInst->removeFromParent();
2887
2888 for (auto *Inst : Insts) {
2889 Inst->setDebugLoc(I.getDebugLoc());
2890 Worklist.push(I: Inst);
2891 }
2892 return LastInst;
2893}
2894
2895std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
2896InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
2897 // TODO: Can we reduce the code duplication between this and the related
2898 // rotate matching code under visitSelect and visitTrunc?
2899 assert(Or.getOpcode() == BinaryOperator::Or && "Expecting or instruction");
2900
2901 unsigned Width = Or.getType()->getScalarSizeInBits();
2902
2903 Instruction *Or0, *Or1;
2904 if (!match(V: Or.getOperand(i: 0), P: m_Instruction(I&: Or0)) ||
2905 !match(V: Or.getOperand(i: 1), P: m_Instruction(I&: Or1)))
2906 return std::nullopt;
2907
2908 bool IsFshl = true; // Sub on LSHR.
2909 SmallVector<Value *, 3> FShiftArgs;
2910
2911 // First, find an or'd pair of opposite shifts:
2912 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2913 if (isa<BinaryOperator>(Val: Or0) && isa<BinaryOperator>(Val: Or1)) {
2914 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2915 if (!match(V: Or0,
2916 P: m_OneUse(SubPattern: m_LogicalShift(L: m_Value(V&: ShVal0), R: m_Value(V&: ShAmt0)))) ||
2917 !match(V: Or1,
2918 P: m_OneUse(SubPattern: m_LogicalShift(L: m_Value(V&: ShVal1), R: m_Value(V&: ShAmt1)))) ||
2919 Or0->getOpcode() == Or1->getOpcode())
2920 return std::nullopt;
2921
2922 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2923 if (Or0->getOpcode() == BinaryOperator::LShr) {
2924 std::swap(a&: Or0, b&: Or1);
2925 std::swap(a&: ShVal0, b&: ShVal1);
2926 std::swap(a&: ShAmt0, b&: ShAmt1);
2927 }
2928 assert(Or0->getOpcode() == BinaryOperator::Shl &&
2929 Or1->getOpcode() == BinaryOperator::LShr &&
2930 "Illegal or(shift,shift) pair");
2931
2932 // Match the shift amount operands for a funnel shift pattern. This always
2933 // matches a subtraction on the R operand.
2934 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
2935 // Check for constant shift amounts that sum to the bitwidth.
2936 const APInt *LI, *RI;
2937 if (match(V: L, P: m_APIntAllowPoison(Res&: LI)) && match(V: R, P: m_APIntAllowPoison(Res&: RI)))
2938 if (LI->ult(RHS: Width) && RI->ult(RHS: Width) && (*LI + *RI) == Width)
2939 return ConstantInt::get(Ty: L->getType(), V: *LI);
2940
2941 Constant *LC, *RC;
2942 if (match(V: L, P: m_Constant(C&: LC)) && match(V: R, P: m_Constant(C&: RC)) &&
2943 match(V: L,
2944 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(Width, Width))) &&
2945 match(V: R,
2946 P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold: APInt(Width, Width))) &&
2947 match(V: ConstantExpr::getAdd(C1: LC, C2: RC), P: m_SpecificIntAllowPoison(V: Width)))
2948 return ConstantExpr::mergeUndefsWith(C: LC, Other: RC);
2949
2950 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2951 // We limit this to X < Width in case the backend re-expands the
2952 // intrinsic, and has to reintroduce a shift modulo operation (InstCombine
2953 // might remove it after this fold). This still doesn't guarantee that the
2954 // final codegen will match this original pattern.
2955 if (match(V: R, P: m_OneUse(SubPattern: m_Sub(L: m_SpecificInt(V: Width), R: m_Specific(V: L))))) {
2956 KnownBits KnownL = computeKnownBits(V: L, CxtI: &Or);
2957 return KnownL.getMaxValue().ult(RHS: Width) ? L : nullptr;
2958 }
2959
2960 // For non-constant cases, the following patterns currently only work for
2961 // rotation patterns.
2962 // TODO: Add general funnel-shift compatible patterns.
2963 if (ShVal0 != ShVal1)
2964 return nullptr;
2965
2966 // For non-constant cases we don't support non-pow2 shift masks.
2967 // TODO: Is it worth matching urem as well?
2968 if (!isPowerOf2_32(Value: Width))
2969 return nullptr;
2970
2971 // The shift amount may be masked with negation:
2972 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2973 Value *X;
2974 unsigned Mask = Width - 1;
2975 if (match(V: L, P: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask))) &&
2976 match(V: R, P: m_And(L: m_Neg(V: m_Specific(V: X)), R: m_SpecificInt(V: Mask))))
2977 return X;
2978
2979 // (shl ShVal, X) | (lshr ShVal, ((-X) & (Width - 1)))
2980 if (match(V: R, P: m_And(L: m_Neg(V: m_Specific(V: L)), R: m_SpecificInt(V: Mask))))
2981 return L;
2982
2983 // Similar to above, but the shift amount may be extended after masking,
2984 // so return the extended value as the parameter for the intrinsic.
2985 if (match(V: L, P: m_ZExt(Op: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask)))) &&
2986 match(V: R,
2987 P: m_And(L: m_Neg(V: m_ZExt(Op: m_And(L: m_Specific(V: X), R: m_SpecificInt(V: Mask)))),
2988 R: m_SpecificInt(V: Mask))))
2989 return L;
2990
2991 if (match(V: L, P: m_ZExt(Op: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: Mask)))) &&
2992 match(V: R, P: m_ZExt(Op: m_And(L: m_Neg(V: m_Specific(V: X)), R: m_SpecificInt(V: Mask)))))
2993 return L;
2994
2995 return nullptr;
2996 };
2997
2998 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2999 if (!ShAmt) {
3000 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
3001 IsFshl = false; // Sub on SHL.
3002 }
3003 if (!ShAmt)
3004 return std::nullopt;
3005
3006 FShiftArgs = {ShVal0, ShVal1, ShAmt};
3007 } else if (isa<ZExtInst>(Val: Or0) || isa<ZExtInst>(Val: Or1)) {
3008 // If there are two 'or' instructions concat variables in opposite order:
3009 //
3010 // Slot1 and Slot2 are all zero bits.
3011 // | Slot1 | Low | Slot2 | High |
3012 // LowHigh = or (shl (zext Low), ZextLowShlAmt), (zext High)
3013 // | Slot2 | High | Slot1 | Low |
3014 // HighLow = or (shl (zext High), ZextHighShlAmt), (zext Low)
3015 //
3016 // the latter 'or' can be safely convert to
3017 // -> HighLow = fshl LowHigh, LowHigh, ZextHighShlAmt
3018 // if ZextLowShlAmt + ZextHighShlAmt == Width.
3019 if (!isa<ZExtInst>(Val: Or1))
3020 std::swap(a&: Or0, b&: Or1);
3021
3022 Value *High, *ZextHigh, *Low;
3023 const APInt *ZextHighShlAmt;
3024 if (!match(V: Or0,
3025 P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: ZextHigh), R: m_APInt(Res&: ZextHighShlAmt)))))
3026 return std::nullopt;
3027
3028 if (!match(V: Or1, P: m_ZExt(Op: m_Value(V&: Low))) ||
3029 !match(V: ZextHigh, P: m_ZExt(Op: m_Value(V&: High))))
3030 return std::nullopt;
3031
3032 unsigned HighSize = High->getType()->getScalarSizeInBits();
3033 unsigned LowSize = Low->getType()->getScalarSizeInBits();
3034 // Make sure High does not overlap with Low and most significant bits of
3035 // High aren't shifted out.
3036 if (ZextHighShlAmt->ult(RHS: LowSize) || ZextHighShlAmt->ugt(RHS: Width - HighSize))
3037 return std::nullopt;
3038
3039 for (User *U : ZextHigh->users()) {
3040 Value *X, *Y;
3041 if (!match(V: U, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))))
3042 continue;
3043
3044 if (!isa<ZExtInst>(Val: Y))
3045 std::swap(a&: X, b&: Y);
3046
3047 const APInt *ZextLowShlAmt;
3048 if (!match(V: X, P: m_Shl(L: m_Specific(V: Or1), R: m_APInt(Res&: ZextLowShlAmt))) ||
3049 !match(V: Y, P: m_Specific(V: ZextHigh)) || !DT.dominates(Def: U, User: &Or))
3050 continue;
3051
3052 // HighLow is good concat. If sum of two shifts amount equals to Width,
3053 // LowHigh must also be a good concat.
3054 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3055 continue;
3056
3057 // Low must not overlap with High and most significant bits of Low must
3058 // not be shifted out.
3059 assert(ZextLowShlAmt->uge(HighSize) &&
3060 ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
3061
3062 FShiftArgs = {U, U, ConstantInt::get(Ty: Or0->getType(), V: *ZextHighShlAmt)};
3063 break;
3064 }
3065 }
3066
3067 if (FShiftArgs.empty())
3068 return std::nullopt;
3069
3070 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3071 return std::make_pair(x&: IID, y&: FShiftArgs);
3072}
3073
3074/// Match UB-safe variants of the funnel shift intrinsic.
3075static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
3076 if (auto Opt = IC.convertOrOfShiftsToFunnelShift(Or)) {
3077 auto [IID, FShiftArgs] = *Opt;
3078 Function *F =
3079 Intrinsic::getOrInsertDeclaration(M: Or.getModule(), id: IID, Tys: Or.getType());
3080 return CallInst::Create(Func: F, Args: FShiftArgs);
3081 }
3082
3083 return nullptr;
3084}
3085
3086/// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
3087static Value *matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder) {
3088 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
3089 Value *Op0 = Or.getOperand(i: 0), *Op1 = Or.getOperand(i: 1);
3090 Type *Ty = Or.getType();
3091
3092 unsigned Width = Ty->getScalarSizeInBits();
3093 if ((Width & 1) != 0)
3094 return nullptr;
3095 unsigned HalfWidth = Width / 2;
3096
3097 // Canonicalize zext (lower half) to LHS.
3098 if (!isa<ZExtInst>(Val: Op0))
3099 std::swap(a&: Op0, b&: Op1);
3100
3101 // Find lower/upper half.
3102 Value *LowerSrc, *ShlVal, *UpperSrc;
3103 const APInt *C;
3104 if (!match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: LowerSrc)))) ||
3105 !match(V: Op1, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: ShlVal), R: m_APInt(Res&: C)))) ||
3106 !match(V: ShlVal, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: UpperSrc)))))
3107 return nullptr;
3108 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() ||
3109 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth)
3110 return nullptr;
3111
3112 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) {
3113 Value *NewLower = Builder.CreateZExt(V: Lo, DestTy: Ty);
3114 Value *NewUpper = Builder.CreateZExt(V: Hi, DestTy: Ty);
3115 NewUpper = Builder.CreateShl(LHS: NewUpper, RHS: HalfWidth);
3116 Value *BinOp = Builder.CreateOr(LHS: NewLower, RHS: NewUpper);
3117 return Builder.CreateIntrinsic(ID: id, Types: Ty, Args: BinOp);
3118 };
3119
3120 // BSWAP: Push the concat down, swapping the lower/upper sources.
3121 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
3122 Value *LowerBSwap, *UpperBSwap;
3123 if (match(V: LowerSrc, P: m_BSwap(Op0: m_Value(V&: LowerBSwap))) &&
3124 match(V: UpperSrc, P: m_BSwap(Op0: m_Value(V&: UpperBSwap))))
3125 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3126
3127 // BITREVERSE: Push the concat down, swapping the lower/upper sources.
3128 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
3129 Value *LowerBRev, *UpperBRev;
3130 if (match(V: LowerSrc, P: m_BitReverse(Op0: m_Value(V&: LowerBRev))) &&
3131 match(V: UpperSrc, P: m_BitReverse(Op0: m_Value(V&: UpperBRev))))
3132 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3133
3134 // iX ext split: extending or(zext(x),shl(zext(y),bw/2) pattern
3135 // to consume sext/ashr:
3136 // or(zext(sext(x)),shl(zext(sext(ashr(x,xbw-1))),bw/2)
3137 // or(zext(x),shl(zext(ashr(x,xbw-1)),bw/2)
3138 Value *X;
3139 if (match(V: LowerSrc, P: m_SExtOrSelf(Op: m_Value(V&: X))) &&
3140 match(V: UpperSrc,
3141 P: m_SExtOrSelf(Op: m_AShr(
3142 L: m_Specific(V: X),
3143 R: m_SpecificInt(V: X->getType()->getScalarSizeInBits() - 1)))))
3144 return Builder.CreateSExt(V: X, DestTy: Ty);
3145
3146 return nullptr;
3147}
3148
3149/// If all elements of two constant vectors are 0/-1 and inverses, return true.
3150static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
3151 unsigned NumElts = cast<FixedVectorType>(Val: C1->getType())->getNumElements();
3152 for (unsigned i = 0; i != NumElts; ++i) {
3153 Constant *EltC1 = C1->getAggregateElement(Elt: i);
3154 Constant *EltC2 = C2->getAggregateElement(Elt: i);
3155 if (!EltC1 || !EltC2)
3156 return false;
3157
3158 // One element must be all ones, and the other must be all zeros.
3159 if (!((match(V: EltC1, P: m_Zero()) && match(V: EltC2, P: m_AllOnes())) ||
3160 (match(V: EltC2, P: m_Zero()) && match(V: EltC1, P: m_AllOnes()))))
3161 return false;
3162 }
3163 return true;
3164}
3165
3166/// We have an expression of the form (A & C) | (B & D). If A is a scalar or
3167/// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
3168/// B, it can be used as the condition operand of a select instruction.
3169/// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
3170Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
3171 bool ABIsTheSame) {
3172 // We may have peeked through bitcasts in the caller.
3173 // Exit immediately if we don't have (vector) integer types.
3174 Type *Ty = A->getType();
3175 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
3176 return nullptr;
3177
3178 // If A is the 'not' operand of B and has enough signbits, we have our answer.
3179 if (ABIsTheSame ? (A == B) : match(V: B, P: m_Not(V: m_Specific(V: A)))) {
3180 // If these are scalars or vectors of i1, A can be used directly.
3181 if (Ty->isIntOrIntVectorTy(BitWidth: 1))
3182 return A;
3183
3184 // If we look through a vector bitcast, the caller will bitcast the operands
3185 // to match the condition's number of bits (N x i1).
3186 // To make this poison-safe, disallow bitcast from wide element to narrow
3187 // element. That could allow poison in lanes where it was not present in the
3188 // original code.
3189 A = peekThroughBitcast(V: A);
3190 if (A->getType()->isIntOrIntVectorTy()) {
3191 unsigned NumSignBits = ComputeNumSignBits(Op: A);
3192 if (NumSignBits == A->getType()->getScalarSizeInBits() &&
3193 NumSignBits <= Ty->getScalarSizeInBits())
3194 return Builder.CreateTrunc(V: A, DestTy: CmpInst::makeCmpResultType(opnd_type: A->getType()));
3195 }
3196 return nullptr;
3197 }
3198
3199 // TODO: add support for sext and constant case
3200 if (ABIsTheSame)
3201 return nullptr;
3202
3203 // If both operands are constants, see if the constants are inverse bitmasks.
3204 Constant *AConst, *BConst;
3205 if (match(V: A, P: m_Constant(C&: AConst)) && match(V: B, P: m_Constant(C&: BConst)))
3206 if (AConst == ConstantExpr::getNot(C: BConst) &&
3207 ComputeNumSignBits(Op: A) == Ty->getScalarSizeInBits())
3208 return Builder.CreateZExtOrTrunc(V: A, DestTy: CmpInst::makeCmpResultType(opnd_type: Ty));
3209
3210 // Look for more complex patterns. The 'not' op may be hidden behind various
3211 // casts. Look through sexts and bitcasts to find the booleans.
3212 Value *Cond;
3213 Value *NotB;
3214 if (match(V: A, P: m_SExt(Op: m_Value(V&: Cond))) &&
3215 Cond->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
3216 // A = sext i1 Cond; B = sext (not (i1 Cond))
3217 if (match(V: B, P: m_SExt(Op: m_Not(V: m_Specific(V: Cond)))))
3218 return Cond;
3219
3220 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
3221 // TODO: The one-use checks are unnecessary or misplaced. If the caller
3222 // checked for uses on logic ops/casts, that should be enough to
3223 // make this transform worthwhile.
3224 if (match(V: B, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: NotB))))) {
3225 NotB = peekThroughBitcast(V: NotB, OneUseOnly: true);
3226 if (match(V: NotB, P: m_SExt(Op: m_Specific(V: Cond))))
3227 return Cond;
3228 }
3229 }
3230
3231 // All scalar (and most vector) possibilities should be handled now.
3232 // Try more matches that only apply to non-splat constant vectors.
3233 if (!Ty->isVectorTy())
3234 return nullptr;
3235
3236 // If both operands are xor'd with constants using the same sexted boolean
3237 // operand, see if the constants are inverse bitmasks.
3238 // TODO: Use ConstantExpr::getNot()?
3239 if (match(V: A, P: (m_Xor(L: m_SExt(Op: m_Value(V&: Cond)), R: m_Constant(C&: AConst)))) &&
3240 match(V: B, P: (m_Xor(L: m_SExt(Op: m_Specific(V: Cond)), R: m_Constant(C&: BConst)))) &&
3241 Cond->getType()->isIntOrIntVectorTy(BitWidth: 1) &&
3242 areInverseVectorBitmasks(C1: AConst, C2: BConst)) {
3243 AConst = ConstantExpr::getTrunc(C: AConst, Ty: CmpInst::makeCmpResultType(opnd_type: Ty));
3244 return Builder.CreateXor(LHS: Cond, RHS: AConst);
3245 }
3246 return nullptr;
3247}
3248
3249/// We have an expression of the form (A & B) | (C & D). Try to simplify this
3250/// to "A' ? B : D", where A' is a boolean or vector of booleans.
3251/// When InvertFalseVal is set to true, we try to match the pattern
3252/// where we have peeked through a 'not' op and A and C are the same:
3253/// (A & B) | ~(A | D) --> (A & B) | (~A & ~D) --> A' ? B : ~D
3254Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *B, Value *C,
3255 Value *D, bool InvertFalseVal) {
3256 // The potential condition of the select may be bitcasted. In that case, look
3257 // through its bitcast and the corresponding bitcast of the 'not' condition.
3258 Type *OrigType = A->getType();
3259 A = peekThroughBitcast(V: A, OneUseOnly: true);
3260 C = peekThroughBitcast(V: C, OneUseOnly: true);
3261 if (Value *Cond = getSelectCondition(A, B: C, ABIsTheSame: InvertFalseVal)) {
3262 // ((bc Cond) & B) | ((bc ~Cond) & D) --> bc (select Cond, (bc B), (bc D))
3263 // If this is a vector, we may need to cast to match the condition's length.
3264 // The bitcasts will either all exist or all not exist. The builder will
3265 // not create unnecessary casts if the types already match.
3266 Type *SelTy = A->getType();
3267 if (auto *VecTy = dyn_cast<VectorType>(Val: Cond->getType())) {
3268 // For a fixed or scalable vector get N from <{vscale x} N x iM>
3269 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3270 // For a fixed or scalable vector, get the size in bits of N x iM; for a
3271 // scalar this is just M.
3272 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
3273 Type *EltTy = Builder.getIntNTy(N: SelEltSize / Elts);
3274 SelTy = VectorType::get(ElementType: EltTy, EC: VecTy->getElementCount());
3275 }
3276 Value *BitcastB = Builder.CreateBitCast(V: B, DestTy: SelTy);
3277 if (InvertFalseVal)
3278 D = Builder.CreateNot(V: D);
3279 Value *BitcastD = Builder.CreateBitCast(V: D, DestTy: SelTy);
3280 Value *Select = Builder.CreateSelect(C: Cond, True: BitcastB, False: BitcastD);
3281 return Builder.CreateBitCast(V: Select, DestTy: OrigType);
3282 }
3283
3284 return nullptr;
3285}
3286
3287// (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3288// (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
3289static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
3290 bool IsAnd, bool IsLogical,
3291 IRBuilderBase &Builder) {
3292 Value *LHS0 = LHS->getOperand(i_nocapture: 0);
3293 Value *RHS0 = RHS->getOperand(i_nocapture: 0);
3294 Value *RHS1 = RHS->getOperand(i_nocapture: 1);
3295
3296 ICmpInst::Predicate LPred =
3297 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate();
3298 ICmpInst::Predicate RPred =
3299 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate();
3300
3301 const APInt *CInt;
3302 if (LPred != ICmpInst::ICMP_EQ ||
3303 !match(V: LHS->getOperand(i_nocapture: 1), P: m_APIntAllowPoison(Res&: CInt)) ||
3304 !LHS0->getType()->isIntOrIntVectorTy() ||
3305 !(LHS->hasOneUse() || RHS->hasOneUse()))
3306 return nullptr;
3307
3308 auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
3309 return match(V: RHSOp,
3310 P: m_Add(L: m_Specific(V: LHS0), R: m_SpecificIntAllowPoison(V: -*CInt))) ||
3311 (CInt->isZero() && RHSOp == LHS0);
3312 };
3313
3314 Value *Other;
3315 if (RPred == ICmpInst::ICMP_ULT && MatchRHSOp(RHS1))
3316 Other = RHS0;
3317 else if (RPred == ICmpInst::ICMP_UGT && MatchRHSOp(RHS0))
3318 Other = RHS1;
3319 else
3320 return nullptr;
3321
3322 if (IsLogical)
3323 Other = Builder.CreateFreeze(V: Other);
3324
3325 return Builder.CreateICmp(
3326 P: IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE,
3327 LHS: Builder.CreateSub(LHS: LHS0, RHS: ConstantInt::get(Ty: LHS0->getType(), V: *CInt + 1)),
3328 RHS: Other);
3329}
3330
3331/// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3332/// If IsLogical is true, then the and/or is in select form and the transform
3333/// must be poison-safe.
3334Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3335 Instruction &I, bool IsAnd,
3336 bool IsLogical) {
3337 const SimplifyQuery Q = SQ.getWithInstruction(I: &I);
3338
3339 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3340 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *RHS0 = RHS->getOperand(i_nocapture: 0);
3341 Value *LHS1 = LHS->getOperand(i_nocapture: 1), *RHS1 = RHS->getOperand(i_nocapture: 1);
3342
3343 const APInt *LHSC = nullptr, *RHSC = nullptr;
3344 match(V: LHS1, P: m_APInt(Res&: LHSC));
3345 match(V: RHS1, P: m_APInt(Res&: RHSC));
3346
3347 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3348 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3349 if (predicatesFoldable(P1: PredL, P2: PredR)) {
3350 if (LHS0 == RHS1 && LHS1 == RHS0) {
3351 PredL = ICmpInst::getSwappedPredicate(pred: PredL);
3352 std::swap(a&: LHS0, b&: LHS1);
3353 }
3354 if (LHS0 == RHS0 && LHS1 == RHS1) {
3355 unsigned Code = IsAnd ? getICmpCode(Pred: PredL) & getICmpCode(Pred: PredR)
3356 : getICmpCode(Pred: PredL) | getICmpCode(Pred: PredR);
3357 bool IsSigned = LHS->isSigned() || RHS->isSigned();
3358 return getNewICmpValue(Code, Sign: IsSigned, LHS: LHS0, RHS: LHS1, Builder);
3359 }
3360 }
3361
3362 if (Value *V =
3363 foldAndOrOfICmpEqConstantAndICmp(LHS, RHS, IsAnd, IsLogical, Builder))
3364 return V;
3365 // We can treat logical like bitwise here, because both operands are used on
3366 // the LHS, and as such poison from both will propagate.
3367 if (Value *V = foldAndOrOfICmpEqConstantAndICmp(LHS: RHS, RHS: LHS, IsAnd,
3368 /*IsLogical*/ false, Builder))
3369 return V;
3370
3371 if (Value *V =
3372 foldAndOrOfICmpsWithConstEq(Cmp0: LHS, Cmp1: RHS, IsAnd, IsLogical, Builder, Q))
3373 return V;
3374 // We can convert this case to bitwise and, because both operands are used
3375 // on the LHS, and as such poison from both will propagate.
3376 if (Value *V = foldAndOrOfICmpsWithConstEq(Cmp0: RHS, Cmp1: LHS, IsAnd,
3377 /*IsLogical=*/false, Builder, Q)) {
3378 // If RHS is still used, we should drop samesign flag.
3379 if (IsLogical && RHS->hasSameSign() && !RHS->use_empty()) {
3380 RHS->setSameSign(false);
3381 addToWorklist(I: RHS);
3382 }
3383 return V;
3384 }
3385
3386 if (Value *V = foldIsPowerOf2OrZero(Cmp0: LHS, Cmp1: RHS, IsAnd, Builder, IC&: *this))
3387 return V;
3388 if (Value *V = foldIsPowerOf2OrZero(Cmp0: RHS, Cmp1: LHS, IsAnd, Builder, IC&: *this))
3389 return V;
3390
3391 // TODO: One of these directions is fine with logical and/or, the other could
3392 // be supported by inserting freeze.
3393 if (!IsLogical) {
3394 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3395 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3396 if (Value *V = simplifyRangeCheck(Cmp0: LHS, Cmp1: RHS, /*Inverted=*/!IsAnd))
3397 return V;
3398
3399 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3400 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3401 if (Value *V = simplifyRangeCheck(Cmp0: RHS, Cmp1: LHS, /*Inverted=*/!IsAnd))
3402 return V;
3403 }
3404
3405 // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3406 if (IsAnd && !IsLogical)
3407 if (Value *V = foldSignedTruncationCheck(ICmp0: LHS, ICmp1: RHS, CxtI&: I, Builder))
3408 return V;
3409
3410 if (Value *V = foldIsPowerOf2(Cmp0: LHS, Cmp1: RHS, JoinedByAnd: IsAnd, Builder, IC&: *this))
3411 return V;
3412
3413 if (Value *V = foldPowerOf2AndShiftedMask(Cmp0: LHS, Cmp1: RHS, JoinedByAnd: IsAnd, Builder))
3414 return V;
3415
3416 // TODO: Verify whether this is safe for logical and/or.
3417 if (!IsLogical) {
3418 if (Value *X = foldUnsignedUnderflowCheck(ZeroICmp: LHS, UnsignedICmp: RHS, IsAnd, Q, Builder))
3419 return X;
3420 if (Value *X = foldUnsignedUnderflowCheck(ZeroICmp: RHS, UnsignedICmp: LHS, IsAnd, Q, Builder))
3421 return X;
3422 }
3423
3424 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3425 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3426 // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3427 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3428 PredL == PredR && match(V: LHS1, P: m_ZeroInt()) && match(V: RHS1, P: m_ZeroInt()) &&
3429 LHS0->getType() == RHS0->getType() &&
3430 (!IsLogical || isGuaranteedNotToBePoison(V: RHS0))) {
3431 Value *NewOr = Builder.CreateOr(LHS: LHS0, RHS: RHS0);
3432 return Builder.CreateICmp(P: PredL, LHS: NewOr,
3433 RHS: Constant::getNullValue(Ty: NewOr->getType()));
3434 }
3435
3436 // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3437 // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3438 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3439 PredL == PredR && match(V: LHS1, P: m_AllOnes()) && match(V: RHS1, P: m_AllOnes()) &&
3440 LHS0->getType() == RHS0->getType() &&
3441 (!IsLogical || isGuaranteedNotToBePoison(V: RHS0))) {
3442 Value *NewAnd = Builder.CreateAnd(LHS: LHS0, RHS: RHS0);
3443 return Builder.CreateICmp(P: PredL, LHS: NewAnd,
3444 RHS: Constant::getAllOnesValue(Ty: LHS0->getType()));
3445 }
3446
3447 if (!IsLogical)
3448 if (Value *V =
3449 foldAndOrOfICmpsWithPow2AndWithZero(Builder, LHS, RHS, IsAnd, Q))
3450 return V;
3451
3452 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3453 if (!LHSC || !RHSC)
3454 return nullptr;
3455
3456 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3457 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3458 // where CMAX is the all ones value for the truncated type,
3459 // iff the lower bits of C2 and CA are zero.
3460 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3461 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) {
3462 Value *V;
3463 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
3464
3465 // (trunc x) == C1 & (and x, CA) == C2
3466 // (and x, CA) == C2 & (trunc x) == C1
3467 if (match(V: RHS0, P: m_Trunc(Op: m_Value(V))) &&
3468 match(V: LHS0, P: m_And(L: m_Specific(V), R: m_APInt(Res&: AndC)))) {
3469 SmallC = RHSC;
3470 BigC = LHSC;
3471 } else if (match(V: LHS0, P: m_Trunc(Op: m_Value(V))) &&
3472 match(V: RHS0, P: m_And(L: m_Specific(V), R: m_APInt(Res&: AndC)))) {
3473 SmallC = LHSC;
3474 BigC = RHSC;
3475 }
3476
3477 if (SmallC && BigC) {
3478 unsigned BigBitSize = BigC->getBitWidth();
3479 unsigned SmallBitSize = SmallC->getBitWidth();
3480
3481 // Check that the low bits are zero.
3482 APInt Low = APInt::getLowBitsSet(numBits: BigBitSize, loBitsSet: SmallBitSize);
3483 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
3484 Value *NewAnd = Builder.CreateAnd(LHS: V, RHS: Low | *AndC);
3485 APInt N = SmallC->zext(width: BigBitSize) | *BigC;
3486 Value *NewVal = ConstantInt::get(Ty: NewAnd->getType(), V: N);
3487 return Builder.CreateICmp(P: PredL, LHS: NewAnd, RHS: NewVal);
3488 }
3489 }
3490 }
3491
3492 // Match naive pattern (and its inverted form) for checking if two values
3493 // share same sign. An example of the pattern:
3494 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3495 // Inverted form (example):
3496 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3497 bool TrueIfSignedL, TrueIfSignedR;
3498 if (isSignBitCheck(Pred: PredL, RHS: *LHSC, TrueIfSigned&: TrueIfSignedL) &&
3499 isSignBitCheck(Pred: PredR, RHS: *RHSC, TrueIfSigned&: TrueIfSignedR) &&
3500 (RHS->hasOneUse() || LHS->hasOneUse())) {
3501 Value *X, *Y;
3502 if (IsAnd) {
3503 if ((TrueIfSignedL && !TrueIfSignedR &&
3504 match(V: LHS0, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3505 match(V: RHS0, P: m_c_And(L: m_Specific(V: X), R: m_Specific(V: Y)))) ||
3506 (!TrueIfSignedL && TrueIfSignedR &&
3507 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3508 match(V: RHS0, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y))))) {
3509 Value *NewXor = Builder.CreateXor(LHS: X, RHS: Y);
3510 return Builder.CreateIsNeg(Arg: NewXor);
3511 }
3512 } else {
3513 if ((TrueIfSignedL && !TrueIfSignedR &&
3514 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3515 match(V: RHS0, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y)))) ||
3516 (!TrueIfSignedL && TrueIfSignedR &&
3517 match(V: LHS0, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
3518 match(V: RHS0, P: m_c_And(L: m_Specific(V: X), R: m_Specific(V: Y))))) {
3519 Value *NewXor = Builder.CreateXor(LHS: X, RHS: Y);
3520 return Builder.CreateIsNotNeg(Arg: NewXor);
3521 }
3522 }
3523 }
3524
3525 // (X & ExpMask) != 0 && (X & ExpMask) != ExpMask -> isnormal(X)
3526 // (X & ExpMask) == 0 || (X & ExpMask) == ExpMask -> !isnormal(X)
3527 Value *X;
3528 const APInt *MaskC;
3529 if (LHS0 == RHS0 && PredL == PredR &&
3530 PredL == (IsAnd ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ) &&
3531 !I.getFunction()->hasFnAttribute(Kind: Attribute::NoImplicitFloat) &&
3532 LHS->hasOneUse() && RHS->hasOneUse() &&
3533 match(V: LHS0, P: m_And(L: m_ElementWiseBitCast(Op: m_Value(V&: X)), R: m_APInt(Res&: MaskC))) &&
3534 X->getType()->getScalarType()->isIEEELikeFPTy() &&
3535 APFloat(X->getType()->getScalarType()->getFltSemantics(), *MaskC)
3536 .isPosInfinity() &&
3537 ((LHSC->isZero() && *RHSC == *MaskC) ||
3538 (RHSC->isZero() && *LHSC == *MaskC)))
3539 return Builder.createIsFPClass(FPNum: X, Test: IsAnd ? FPClassTest::fcNormal
3540 : ~FPClassTest::fcNormal);
3541
3542 return foldAndOrOfICmpsUsingRanges(ICmp1: LHS, ICmp2: RHS, IsAnd);
3543}
3544
3545/// If IsLogical is true, then the and/or is in select form and the transform
3546/// must be poison-safe.
3547Value *InstCombinerImpl::foldBooleanAndOr(Value *LHS, Value *RHS,
3548 Instruction &I, bool IsAnd,
3549 bool IsLogical) {
3550 if (!LHS->getType()->isIntOrIntVectorTy(BitWidth: 1))
3551 return nullptr;
3552
3553 // handle (roughly):
3554 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3555 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3556 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder,
3557 Q: SQ.getWithInstruction(I: &I)))
3558 return V;
3559
3560 if (auto *LHSCmp = dyn_cast<ICmpInst>(Val: LHS))
3561 if (auto *RHSCmp = dyn_cast<ICmpInst>(Val: RHS))
3562 if (Value *Res = foldAndOrOfICmps(LHS: LHSCmp, RHS: RHSCmp, I, IsAnd, IsLogical))
3563 return Res;
3564
3565 if (auto *LHSCmp = dyn_cast<FCmpInst>(Val: LHS))
3566 if (auto *RHSCmp = dyn_cast<FCmpInst>(Val: RHS))
3567 if (Value *Res = foldLogicOfFCmps(LHS: LHSCmp, RHS: RHSCmp, IsAnd, IsLogicalSelect: IsLogical))
3568 return Res;
3569
3570 if (Value *Res = foldEqOfParts(Cmp0: LHS, Cmp1: RHS, IsAnd))
3571 return Res;
3572
3573 return nullptr;
3574}
3575
3576static Value *foldOrOfInversions(BinaryOperator &I,
3577 InstCombiner::BuilderTy &Builder) {
3578 assert(I.getOpcode() == Instruction::Or &&
3579 "Simplification only supports or at the moment.");
3580
3581 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3582 if (!match(V: I.getOperand(i_nocapture: 0), P: m_And(L: m_Value(V&: Cmp1), R: m_Value(V&: Cmp2))) ||
3583 !match(V: I.getOperand(i_nocapture: 1), P: m_And(L: m_Value(V&: Cmp3), R: m_Value(V&: Cmp4))))
3584 return nullptr;
3585
3586 // Check if any two pairs of the and operations are inversions of each other.
3587 if (isKnownInversion(X: Cmp1, Y: Cmp3) && isKnownInversion(X: Cmp2, Y: Cmp4))
3588 return Builder.CreateXor(LHS: Cmp1, RHS: Cmp4);
3589 if (isKnownInversion(X: Cmp1, Y: Cmp4) && isKnownInversion(X: Cmp2, Y: Cmp3))
3590 return Builder.CreateXor(LHS: Cmp1, RHS: Cmp3);
3591
3592 return nullptr;
3593}
3594
3595// A decomposition of ((X & Mask) * Factor). The NUW / NSW bools
3596// track these properities for preservation. Note that we can decompose
3597// equivalent select form of this expression (e.g. (!(X & Mask) ? 0 : Mask *
3598// Factor))
3599struct DecomposedBitMaskMul {
3600 Value *X;
3601 APInt Factor;
3602 APInt Mask;
3603 bool NUW;
3604 bool NSW;
3605};
3606
3607static std::optional<DecomposedBitMaskMul> matchBitmaskMul(Value *V) {
3608 Instruction *Op = dyn_cast<Instruction>(Val: V);
3609 if (!Op)
3610 return std::nullopt;
3611
3612 // Decompose (A & N) * C) into BitMaskMul
3613 Value *Original = nullptr;
3614 const APInt *Mask = nullptr;
3615 const APInt *MulConst = nullptr;
3616 if (match(V: Op, P: m_Mul(L: m_And(L: m_Value(V&: Original), R: m_APInt(Res&: Mask)),
3617 R: m_APInt(Res&: MulConst)))) {
3618 if (MulConst->isZero() || Mask->isZero())
3619 return std::nullopt;
3620
3621 return std::optional<DecomposedBitMaskMul>(
3622 {.X: Original, .Factor: *MulConst, .Mask: *Mask,
3623 .NUW: cast<BinaryOperator>(Val: Op)->hasNoUnsignedWrap(),
3624 .NSW: cast<BinaryOperator>(Val: Op)->hasNoSignedWrap()});
3625 }
3626
3627 Value *Cond = nullptr;
3628 const APInt *EqZero = nullptr, *NeZero = nullptr;
3629
3630 // Decompose ((A & N) ? 0 : N * C) into BitMaskMul
3631 if (match(V: Op, P: m_Select(C: m_Value(V&: Cond), L: m_APInt(Res&: EqZero), R: m_APInt(Res&: NeZero)))) {
3632 auto ICmpDecompose =
3633 decomposeBitTest(Cond, /*LookThruTrunc=*/LookThroughTrunc: true,
3634 /*AllowNonZeroC=*/false, /*DecomposeBitMask=*/DecomposeAnd: true);
3635 if (!ICmpDecompose.has_value())
3636 return std::nullopt;
3637
3638 assert(ICmpInst::isEquality(ICmpDecompose->Pred) &&
3639 ICmpDecompose->C.isZero());
3640
3641 if (ICmpDecompose->Pred == ICmpInst::ICMP_NE)
3642 std::swap(a&: EqZero, b&: NeZero);
3643
3644 if (!EqZero->isZero() || NeZero->isZero())
3645 return std::nullopt;
3646
3647 if (!ICmpDecompose->Mask.isPowerOf2() || ICmpDecompose->Mask.isZero() ||
3648 NeZero->getBitWidth() != ICmpDecompose->Mask.getBitWidth())
3649 return std::nullopt;
3650
3651 if (!NeZero->urem(RHS: ICmpDecompose->Mask).isZero())
3652 return std::nullopt;
3653
3654 return std::optional<DecomposedBitMaskMul>(
3655 {.X: ICmpDecompose->X, .Factor: NeZero->udiv(RHS: ICmpDecompose->Mask),
3656 .Mask: ICmpDecompose->Mask, /*NUW=*/false, /*NSW=*/false});
3657 }
3658
3659 return std::nullopt;
3660}
3661
3662// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
3663// here. We should standardize that construct where it is needed or choose some
3664// other way to ensure that commutated variants of patterns are not missed.
3665Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
3666 if (Value *V = simplifyOrInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
3667 Q: SQ.getWithInstruction(I: &I)))
3668 return replaceInstUsesWith(I, V);
3669
3670 if (SimplifyAssociativeOrCommutative(I))
3671 return &I;
3672
3673 if (Instruction *X = foldVectorBinop(Inst&: I))
3674 return X;
3675
3676 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
3677 return Phi;
3678
3679 // See if we can simplify any instructions used by the instruction whose sole
3680 // purpose is to compute bits we don't care about.
3681 if (SimplifyDemandedInstructionBits(Inst&: I))
3682 return &I;
3683
3684 // Do this before using distributive laws to catch simple and/or/not patterns.
3685 if (Instruction *Xor = foldOrToXor(I, Builder))
3686 return Xor;
3687
3688 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
3689 return X;
3690
3691 // (A & B) | (C & D) -> A ^ D where A == ~C && B == ~D
3692 // (A & B) | (C & D) -> A ^ C where A == ~D && B == ~C
3693 if (Value *V = foldOrOfInversions(I, Builder))
3694 return replaceInstUsesWith(I, V);
3695
3696 // (A&B)|(A&C) -> A&(B|C) etc
3697 if (Value *V = foldUsingDistributiveLaws(I))
3698 return replaceInstUsesWith(I, V);
3699
3700 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
3701 Type *Ty = I.getType();
3702 if (Ty->isIntOrIntVectorTy(BitWidth: 1)) {
3703 if (auto *SI0 = dyn_cast<SelectInst>(Val: Op0)) {
3704 if (auto *R =
3705 foldAndOrOfSelectUsingImpliedCond(Op: Op1, SI&: *SI0, /* IsAnd */ false))
3706 return R;
3707 }
3708 if (auto *SI1 = dyn_cast<SelectInst>(Val: Op1)) {
3709 if (auto *R =
3710 foldAndOrOfSelectUsingImpliedCond(Op: Op0, SI&: *SI1, /* IsAnd */ false))
3711 return R;
3712 }
3713 }
3714
3715 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
3716 return FoldedLogic;
3717
3718 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true,
3719 /*MatchBitReversals*/ true))
3720 return BitOp;
3721
3722 if (Instruction *Funnel = matchFunnelShift(Or&: I, IC&: *this))
3723 return Funnel;
3724
3725 if (Value *Concat = matchOrConcat(Or&: I, Builder))
3726 return replaceInstUsesWith(I, V: Concat);
3727
3728 if (Instruction *R = foldBinOpShiftWithShift(I))
3729 return R;
3730
3731 if (Instruction *R = tryFoldInstWithCtpopWithNot(I: &I))
3732 return R;
3733
3734 if (cast<PossiblyDisjointInst>(Val&: I).isDisjoint()) {
3735 if (Instruction *R =
3736 foldAddLikeCommutative(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
3737 /*NSW=*/true, /*NUW=*/true))
3738 return R;
3739 if (Instruction *R =
3740 foldAddLikeCommutative(LHS: I.getOperand(i_nocapture: 1), RHS: I.getOperand(i_nocapture: 0),
3741 /*NSW=*/true, /*NUW=*/true))
3742 return R;
3743
3744 // (A & N) * C + (A & M) * C -> (A & (N + M)) & C
3745 // This also accepts the equivalent select form of (A & N) * C
3746 // expressions i.e. !(A & N) ? 0 : N * C)
3747 auto Decomp1 = matchBitmaskMul(V: I.getOperand(i_nocapture: 1));
3748 if (Decomp1) {
3749 auto Decomp0 = matchBitmaskMul(V: I.getOperand(i_nocapture: 0));
3750 if (Decomp0 && Decomp0->X == Decomp1->X &&
3751 (Decomp0->Mask & Decomp1->Mask).isZero() &&
3752 Decomp0->Factor == Decomp1->Factor) {
3753
3754 Value *NewAnd = Builder.CreateAnd(
3755 LHS: Decomp0->X, RHS: ConstantInt::get(Ty: Decomp0->X->getType(),
3756 V: (Decomp0->Mask + Decomp1->Mask)));
3757
3758 auto *Combined = BinaryOperator::CreateMul(
3759 V1: NewAnd, V2: ConstantInt::get(Ty: NewAnd->getType(), V: Decomp1->Factor));
3760
3761 Combined->setHasNoUnsignedWrap(Decomp0->NUW && Decomp1->NUW);
3762 Combined->setHasNoSignedWrap(Decomp0->NSW && Decomp1->NSW);
3763 return Combined;
3764 }
3765 }
3766 }
3767
3768 Value *X, *Y;
3769 const APInt *CV;
3770 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: CV))), R: m_Value(V&: Y))) &&
3771 !CV->isAllOnes() && MaskedValueIsZero(V: Y, Mask: *CV, CxtI: &I)) {
3772 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
3773 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
3774 Value *Or = Builder.CreateOr(LHS: X, RHS: Y);
3775 return BinaryOperator::CreateXor(V1: Or, V2: ConstantInt::get(Ty, V: *CV));
3776 }
3777
3778 // If the operands have no common bits set:
3779 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
3780 if (match(V: &I, P: m_c_DisjointOr(L: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: X), R: m_Value(V&: Y))),
3781 R: m_Deferred(V: X)))) {
3782 Value *IncrementY = Builder.CreateAdd(LHS: Y, RHS: ConstantInt::get(Ty, V: 1));
3783 return BinaryOperator::CreateMul(V1: X, V2: IncrementY);
3784 }
3785
3786 // (A & C) | (B & D)
3787 Value *A, *B, *C, *D;
3788 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: C))) &&
3789 match(V: Op1, P: m_And(L: m_Value(V&: B), R: m_Value(V&: D)))) {
3790
3791 // (A & C0) | (B & C1)
3792 const APInt *C0, *C1;
3793 if (match(V: C, P: m_APInt(Res&: C0)) && match(V: D, P: m_APInt(Res&: C1))) {
3794 Value *X;
3795 if (*C0 == ~*C1) {
3796 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
3797 if (match(V: A, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: B))))
3798 return BinaryOperator::CreateOr(V1: Builder.CreateAnd(LHS: X, RHS: *C0), V2: B);
3799 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
3800 if (match(V: B, P: m_c_Or(L: m_Specific(V: A), R: m_Value(V&: X))))
3801 return BinaryOperator::CreateOr(V1: Builder.CreateAnd(LHS: X, RHS: *C1), V2: A);
3802
3803 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
3804 if (match(V: A, P: m_c_Xor(L: m_Value(V&: X), R: m_Specific(V: B))))
3805 return BinaryOperator::CreateXor(V1: Builder.CreateAnd(LHS: X, RHS: *C0), V2: B);
3806 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
3807 if (match(V: B, P: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: X))))
3808 return BinaryOperator::CreateXor(V1: Builder.CreateAnd(LHS: X, RHS: *C1), V2: A);
3809 }
3810
3811 if ((*C0 & *C1).isZero()) {
3812 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
3813 // iff (C0 & C1) == 0 and (X & ~C0) == 0
3814 if (match(V: A, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: B))) &&
3815 MaskedValueIsZero(V: X, Mask: ~*C0, CxtI: &I)) {
3816 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
3817 return BinaryOperator::CreateAnd(V1: A, V2: C01);
3818 }
3819 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
3820 // iff (C0 & C1) == 0 and (X & ~C1) == 0
3821 if (match(V: B, P: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: A))) &&
3822 MaskedValueIsZero(V: X, Mask: ~*C1, CxtI: &I)) {
3823 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
3824 return BinaryOperator::CreateAnd(V1: B, V2: C01);
3825 }
3826 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
3827 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
3828 const APInt *C2, *C3;
3829 if (match(V: A, P: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: C2))) &&
3830 match(V: B, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C3))) &&
3831 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) {
3832 Value *Or = Builder.CreateOr(LHS: X, RHS: *C2 | *C3, Name: "bitfield");
3833 Constant *C01 = ConstantInt::get(Ty, V: *C0 | *C1);
3834 return BinaryOperator::CreateAnd(V1: Or, V2: C01);
3835 }
3836 }
3837 }
3838
3839 // Don't try to form a select if it's unlikely that we'll get rid of at
3840 // least one of the operands. A select is generally more expensive than the
3841 // 'or' that it is replacing.
3842 if (Op0->hasOneUse() || Op1->hasOneUse()) {
3843 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
3844 if (Value *V = matchSelectFromAndOr(A, B: C, C: B, D))
3845 return replaceInstUsesWith(I, V);
3846 if (Value *V = matchSelectFromAndOr(A, B: C, C: D, D: B))
3847 return replaceInstUsesWith(I, V);
3848 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: B, D))
3849 return replaceInstUsesWith(I, V);
3850 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: D, D: B))
3851 return replaceInstUsesWith(I, V);
3852 if (Value *V = matchSelectFromAndOr(A: B, B: D, C: A, D: C))
3853 return replaceInstUsesWith(I, V);
3854 if (Value *V = matchSelectFromAndOr(A: B, B: D, C, D: A))
3855 return replaceInstUsesWith(I, V);
3856 if (Value *V = matchSelectFromAndOr(A: D, B, C: A, D: C))
3857 return replaceInstUsesWith(I, V);
3858 if (Value *V = matchSelectFromAndOr(A: D, B, C, D: A))
3859 return replaceInstUsesWith(I, V);
3860 }
3861 }
3862
3863 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: C))) &&
3864 match(V: Op1, P: m_Not(V: m_Or(L: m_Value(V&: B), R: m_Value(V&: D)))) &&
3865 (Op0->hasOneUse() || Op1->hasOneUse())) {
3866 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
3867 if (Value *V = matchSelectFromAndOr(A, B: C, C: B, D, InvertFalseVal: true))
3868 return replaceInstUsesWith(I, V);
3869 if (Value *V = matchSelectFromAndOr(A, B: C, C: D, D: B, InvertFalseVal: true))
3870 return replaceInstUsesWith(I, V);
3871 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: B, D, InvertFalseVal: true))
3872 return replaceInstUsesWith(I, V);
3873 if (Value *V = matchSelectFromAndOr(A: C, B: A, C: D, D: B, InvertFalseVal: true))
3874 return replaceInstUsesWith(I, V);
3875 }
3876
3877 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
3878 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))))
3879 if (match(V: Op1,
3880 P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A))) ||
3881 match(V: Op1, P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: C)), R: m_Specific(V: B))))
3882 return BinaryOperator::CreateOr(V1: Op0, V2: C);
3883
3884 // ((B ^ C) ^ A) | (A ^ B) -> (A ^ B) | C
3885 if (match(V: Op1, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))))
3886 if (match(V: Op0,
3887 P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: B), R: m_Value(V&: C)), R: m_Specific(V: A))) ||
3888 match(V: Op0, P: m_c_Xor(L: m_c_Xor(L: m_Specific(V: A), R: m_Value(V&: C)), R: m_Specific(V: B))))
3889 return BinaryOperator::CreateOr(V1: Op1, V2: C);
3890
3891 if (Instruction *DeMorgan = matchDeMorgansLaws(I, IC&: *this))
3892 return DeMorgan;
3893
3894 // Canonicalize xor to the RHS.
3895 bool SwappedForXor = false;
3896 if (match(V: Op0, P: m_Xor(L: m_Value(), R: m_Value()))) {
3897 std::swap(a&: Op0, b&: Op1);
3898 SwappedForXor = true;
3899 }
3900
3901 if (match(V: Op1, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B)))) {
3902 // (A | ?) | (A ^ B) --> (A | ?) | B
3903 // (B | ?) | (A ^ B) --> (B | ?) | A
3904 if (match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Value())))
3905 return BinaryOperator::CreateOr(V1: Op0, V2: B);
3906 if (match(V: Op0, P: m_c_Or(L: m_Specific(V: B), R: m_Value())))
3907 return BinaryOperator::CreateOr(V1: Op0, V2: A);
3908
3909 // (A & B) | (A ^ B) --> A | B
3910 // (B & A) | (A ^ B) --> A | B
3911 if (match(V: Op0, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
3912 return BinaryOperator::CreateOr(V1: A, V2: B);
3913
3914 // ~A | (A ^ B) --> ~(A & B)
3915 // ~B | (A ^ B) --> ~(A & B)
3916 // The swap above should always make Op0 the 'not'.
3917 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3918 (match(V: Op0, P: m_Not(V: m_Specific(V: A))) || match(V: Op0, P: m_Not(V: m_Specific(V: B)))))
3919 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
3920
3921 // Same as above, but peek through an 'and' to the common operand:
3922 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
3923 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
3924 Instruction *And;
3925 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3926 match(V: Op0, P: m_Not(V: m_CombineAnd(L: m_Instruction(I&: And),
3927 R: m_c_And(L: m_Specific(V: A), R: m_Value())))))
3928 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: And, RHS: B));
3929 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3930 match(V: Op0, P: m_Not(V: m_CombineAnd(L: m_Instruction(I&: And),
3931 R: m_c_And(L: m_Specific(V: B), R: m_Value())))))
3932 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: And, RHS: A));
3933
3934 // (~A | C) | (A ^ B) --> ~(A & B) | C
3935 // (~B | C) | (A ^ B) --> ~(A & B) | C
3936 if (Op0->hasOneUse() && Op1->hasOneUse() &&
3937 (match(V: Op0, P: m_c_Or(L: m_Not(V: m_Specific(V: A)), R: m_Value(V&: C))) ||
3938 match(V: Op0, P: m_c_Or(L: m_Not(V: m_Specific(V: B)), R: m_Value(V&: C))))) {
3939 Value *Nand = Builder.CreateNot(V: Builder.CreateAnd(LHS: A, RHS: B), Name: "nand");
3940 return BinaryOperator::CreateOr(V1: Nand, V2: C);
3941 }
3942 }
3943
3944 if (SwappedForXor)
3945 std::swap(a&: Op0, b&: Op1);
3946
3947 if (Value *Res =
3948 foldBooleanAndOr(LHS: Op0, RHS: Op1, I, /*IsAnd=*/false, /*IsLogical=*/false))
3949 return replaceInstUsesWith(I, V: Res);
3950
3951 if (match(V: Op1, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
3952 bool IsLogical = isa<SelectInst>(Val: Op1);
3953 if (auto *V = reassociateBooleanAndOr(LHS: Op0, X, Y, I, /*IsAnd=*/false,
3954 /*RHSIsLogical=*/IsLogical))
3955 return replaceInstUsesWith(I, V);
3956 }
3957 if (match(V: Op0, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: X), R: m_Value(V&: Y))))) {
3958 bool IsLogical = isa<SelectInst>(Val: Op0);
3959 if (auto *V = reassociateBooleanAndOr(LHS: Op1, X, Y, I, /*IsAnd=*/false,
3960 /*RHSIsLogical=*/IsLogical))
3961 return replaceInstUsesWith(I, V);
3962 }
3963
3964 if (Instruction *FoldedFCmps = reassociateFCmps(BO&: I, Builder))
3965 return FoldedFCmps;
3966
3967 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
3968 return CastedOr;
3969
3970 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
3971 return Sel;
3972
3973 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
3974 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
3975 // with binop identity constant. But creating a select with non-constant
3976 // arm may not be reversible due to poison semantics. Is that a good
3977 // canonicalization?
3978 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: A))), R: m_Value(V&: B))) &&
3979 A->getType()->isIntOrIntVectorTy(BitWidth: 1))
3980 return SelectInst::Create(C: A, S1: ConstantInt::getAllOnesValue(Ty), S2: B);
3981
3982 // Note: If we've gotten to the point of visiting the outer OR, then the
3983 // inner one couldn't be simplified. If it was a constant, then it won't
3984 // be simplified by a later pass either, so we try swapping the inner/outer
3985 // ORs in the hopes that we'll be able to simplify it this way.
3986 // (X|C) | V --> (X|V) | C
3987 // Pass the disjoint flag in the following two patterns:
3988 // 1. or-disjoint (or-disjoint X, C), V -->
3989 // or-disjoint (or-disjoint X, V), C
3990 //
3991 // 2. or-disjoint (or X, C), V -->
3992 // or (or-disjoint X, V), C
3993 ConstantInt *CI;
3994 if (Op0->hasOneUse() && !match(V: Op1, P: m_ConstantInt()) &&
3995 match(V: Op0, P: m_Or(L: m_Value(V&: A), R: m_ConstantInt(CI)))) {
3996 bool IsDisjointOuter = cast<PossiblyDisjointInst>(Val&: I).isDisjoint();
3997 bool IsDisjointInner = cast<PossiblyDisjointInst>(Val: Op0)->isDisjoint();
3998 Value *Inner = Builder.CreateOr(LHS: A, RHS: Op1);
3999 cast<PossiblyDisjointInst>(Val: Inner)->setIsDisjoint(IsDisjointOuter);
4000 Inner->takeName(V: Op0);
4001 return IsDisjointOuter && IsDisjointInner
4002 ? BinaryOperator::CreateDisjointOr(V1: Inner, V2: CI)
4003 : BinaryOperator::CreateOr(V1: Inner, V2: CI);
4004 }
4005
4006 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
4007 // Since this OR statement hasn't been optimized further yet, we hope
4008 // that this transformation will allow the new ORs to be optimized.
4009 {
4010 Value *X = nullptr, *Y = nullptr;
4011 if (Op0->hasOneUse() && Op1->hasOneUse() &&
4012 match(V: Op0, P: m_Select(C: m_Value(V&: X), L: m_Value(V&: A), R: m_Value(V&: B))) &&
4013 match(V: Op1, P: m_Select(C: m_Value(V&: Y), L: m_Value(V&: C), R: m_Value(V&: D))) && X == Y) {
4014 Value *orTrue = Builder.CreateOr(LHS: A, RHS: C);
4015 Value *orFalse = Builder.CreateOr(LHS: B, RHS: D);
4016 return SelectInst::Create(C: X, S1: orTrue, S2: orFalse);
4017 }
4018 }
4019
4020 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X.
4021 {
4022 Value *X, *Y;
4023 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_AShr(
4024 L: m_NSWSub(L: m_Value(V&: Y), R: m_Value(V&: X)),
4025 R: m_SpecificInt(V: Ty->getScalarSizeInBits() - 1))),
4026 R: m_Deferred(V: X)))) {
4027 Value *NewICmpInst = Builder.CreateICmpSGT(LHS: X, RHS: Y);
4028 Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
4029 return SelectInst::Create(C: NewICmpInst, S1: AllOnes, S2: X);
4030 }
4031 }
4032
4033 {
4034 // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
4035 // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
4036 // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
4037 // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
4038 const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
4039 if (match(V: Lhs, P: m_c_Xor(L: m_And(L: m_Value(V&: A), R: m_Value(V&: B)), R: m_Deferred(V: A))) &&
4040 match(V: Rhs,
4041 P: m_c_Xor(L: m_And(L: m_Specific(V: A), R: m_Specific(V: B)), R: m_Specific(V: B)))) {
4042 return BinaryOperator::CreateXor(V1: A, V2: B);
4043 }
4044 return nullptr;
4045 };
4046
4047 if (Instruction *Result = TryXorOpt(Op0, Op1))
4048 return Result;
4049 if (Instruction *Result = TryXorOpt(Op1, Op0))
4050 return Result;
4051 }
4052
4053 if (Instruction *V =
4054 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
4055 return V;
4056
4057 CmpPredicate Pred;
4058 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv;
4059 // Check if the OR weakens the overflow condition for umul.with.overflow by
4060 // treating any non-zero result as overflow. In that case, we overflow if both
4061 // umul.with.overflow operands are != 0, as in that case the result can only
4062 // be 0, iff the multiplication overflows.
4063 if (match(V: &I,
4064 P: m_c_Or(L: m_CombineAnd(L: m_ExtractValue<1>(V: m_Value(V&: UMulWithOv)),
4065 R: m_Value(V&: Ov)),
4066 R: m_CombineAnd(
4067 L: m_SpecificICmp(MatchPred: ICmpInst::ICMP_NE,
4068 L: m_CombineAnd(L: m_ExtractValue<0>(
4069 V: m_Deferred(V: UMulWithOv)),
4070 R: m_Value(V&: Mul)),
4071 R: m_ZeroInt()),
4072 R: m_Value(V&: MulIsNotZero)))) &&
4073 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse()))) {
4074 Value *A, *B;
4075 if (match(V: UMulWithOv, P: m_Intrinsic<Intrinsic::umul_with_overflow>(
4076 Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) {
4077 Value *NotNullA = Builder.CreateIsNotNull(Arg: A);
4078 Value *NotNullB = Builder.CreateIsNotNull(Arg: B);
4079 return BinaryOperator::CreateAnd(V1: NotNullA, V2: NotNullB);
4080 }
4081 }
4082
4083 /// Res, Overflow = xxx_with_overflow X, C1
4084 /// Try to canonicalize the pattern "Overflow | icmp pred Res, C2" into
4085 /// "Overflow | icmp pred X, C2 +/- C1".
4086 const WithOverflowInst *WO;
4087 const Value *WOV;
4088 const APInt *C1, *C2;
4089 if (match(V: &I, P: m_c_Or(L: m_CombineAnd(L: m_ExtractValue<1>(V: m_CombineAnd(
4090 L: m_WithOverflowInst(I&: WO), R: m_Value(V&: WOV))),
4091 R: m_Value(V&: Ov)),
4092 R: m_OneUse(SubPattern: m_ICmp(Pred, L: m_ExtractValue<0>(V: m_Deferred(V: WOV)),
4093 R: m_APInt(Res&: C2))))) &&
4094 (WO->getBinaryOp() == Instruction::Add ||
4095 WO->getBinaryOp() == Instruction::Sub) &&
4096 (ICmpInst::isEquality(P: Pred) ||
4097 WO->isSigned() == ICmpInst::isSigned(predicate: Pred)) &&
4098 match(V: WO->getRHS(), P: m_APInt(Res&: C1))) {
4099 bool Overflow;
4100 APInt NewC = WO->getBinaryOp() == Instruction::Add
4101 ? (ICmpInst::isSigned(predicate: Pred) ? C2->ssub_ov(RHS: *C1, Overflow)
4102 : C2->usub_ov(RHS: *C1, Overflow))
4103 : (ICmpInst::isSigned(predicate: Pred) ? C2->sadd_ov(RHS: *C1, Overflow)
4104 : C2->uadd_ov(RHS: *C1, Overflow));
4105 if (!Overflow || ICmpInst::isEquality(P: Pred)) {
4106 Value *NewCmp = Builder.CreateICmp(
4107 P: Pred, LHS: WO->getLHS(), RHS: ConstantInt::get(Ty: WO->getLHS()->getType(), V: NewC));
4108 return BinaryOperator::CreateOr(V1: Ov, V2: NewCmp);
4109 }
4110 }
4111
4112 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
4113 if (sinkNotIntoOtherHandOfLogicalOp(I))
4114 return &I;
4115
4116 // Improve "get low bit mask up to and including bit X" pattern:
4117 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X)
4118 if (match(V: &I, P: m_c_Or(L: m_Add(L: m_Shl(L: m_One(), R: m_Value(V&: X)), R: m_AllOnes()),
4119 R: m_Shl(L: m_One(), R: m_Deferred(V: X)))) &&
4120 match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) {
4121 Value *Sub = Builder.CreateSub(
4122 LHS: ConstantInt::get(Ty, V: Ty->getScalarSizeInBits() - 1), RHS: X);
4123 return BinaryOperator::CreateLShr(V1: Constant::getAllOnesValue(Ty), V2: Sub);
4124 }
4125
4126 // An or recurrence w/loop invariant step is equivelent to (or start, step)
4127 PHINode *PN = nullptr;
4128 Value *Start = nullptr, *Step = nullptr;
4129 if (matchSimpleRecurrence(I: &I, P&: PN, Start, Step) && DT.dominates(Def: Step, User: PN))
4130 return replaceInstUsesWith(I, V: Builder.CreateOr(LHS: Start, RHS: Step));
4131
4132 // (A & B) | (C | D) or (C | D) | (A & B)
4133 // Can be combined if C or D is of type (A/B & X)
4134 if (match(V: &I, P: m_c_Or(L: m_OneUse(SubPattern: m_And(L: m_Value(V&: A), R: m_Value(V&: B))),
4135 R: m_OneUse(SubPattern: m_Or(L: m_Value(V&: C), R: m_Value(V&: D)))))) {
4136 // (A & B) | (C | ?) -> C | (? | (A & B))
4137 // (A & B) | (C | ?) -> C | (? | (A & B))
4138 // (A & B) | (C | ?) -> C | (? | (A & B))
4139 // (A & B) | (C | ?) -> C | (? | (A & B))
4140 // (C | ?) | (A & B) -> C | (? | (A & B))
4141 // (C | ?) | (A & B) -> C | (? | (A & B))
4142 // (C | ?) | (A & B) -> C | (? | (A & B))
4143 // (C | ?) | (A & B) -> C | (? | (A & B))
4144 if (match(V: D, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: A), R: m_Value()))) ||
4145 match(V: D, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: B), R: m_Value()))))
4146 return BinaryOperator::CreateOr(
4147 V1: C, V2: Builder.CreateOr(LHS: D, RHS: Builder.CreateAnd(LHS: A, RHS: B)));
4148 // (A & B) | (? | D) -> (? | (A & B)) | D
4149 // (A & B) | (? | D) -> (? | (A & B)) | D
4150 // (A & B) | (? | D) -> (? | (A & B)) | D
4151 // (A & B) | (? | D) -> (? | (A & B)) | D
4152 // (? | D) | (A & B) -> (? | (A & B)) | D
4153 // (? | D) | (A & B) -> (? | (A & B)) | D
4154 // (? | D) | (A & B) -> (? | (A & B)) | D
4155 // (? | D) | (A & B) -> (? | (A & B)) | D
4156 if (match(V: C, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: A), R: m_Value()))) ||
4157 match(V: C, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: B), R: m_Value()))))
4158 return BinaryOperator::CreateOr(
4159 V1: Builder.CreateOr(LHS: C, RHS: Builder.CreateAnd(LHS: A, RHS: B)), V2: D);
4160 }
4161
4162 if (Instruction *R = reassociateForUses(BO&: I, Builder))
4163 return R;
4164
4165 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
4166 return Canonicalized;
4167
4168 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
4169 return Folded;
4170
4171 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
4172 return Res;
4173
4174 // If we are setting the sign bit of a floating-point value, convert
4175 // this to fneg(fabs), then cast back to integer.
4176 //
4177 // If the result isn't immediately cast back to a float, this will increase
4178 // the number of instructions. This is still probably a better canonical form
4179 // as it enables FP value tracking.
4180 //
4181 // Assumes any IEEE-represented type has the sign bit in the high bit.
4182 //
4183 // This is generous interpretation of noimplicitfloat, this is not a true
4184 // floating-point operation.
4185 Value *CastOp;
4186 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
4187 match(V: Op1, P: m_SignMask()) &&
4188 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4189 Kind: Attribute::NoImplicitFloat)) {
4190 Type *EltTy = CastOp->getType()->getScalarType();
4191 if (EltTy->isFloatingPointTy() &&
4192 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
4193 Value *FAbs = Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: CastOp);
4194 Value *FNegFAbs = Builder.CreateFNeg(V: FAbs);
4195 return new BitCastInst(FNegFAbs, I.getType());
4196 }
4197 }
4198
4199 // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
4200 if (match(V: Op0, P: m_OneUse(SubPattern: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) &&
4201 match(V: Op1, P: m_APInt(Res&: C2))) {
4202 KnownBits KnownX = computeKnownBits(V: X, CxtI: &I);
4203 if ((KnownX.One & *C2) == *C2)
4204 return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: *C1 | *C2));
4205 }
4206
4207 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
4208 return Res;
4209
4210 if (Value *V =
4211 simplifyAndOrWithOpReplaced(V: Op0, Op: Op1, RepOp: Constant::getNullValue(Ty),
4212 /*SimplifyOnly*/ false, IC&: *this))
4213 return BinaryOperator::CreateOr(V1: V, V2: Op1);
4214 if (Value *V =
4215 simplifyAndOrWithOpReplaced(V: Op1, Op: Op0, RepOp: Constant::getNullValue(Ty),
4216 /*SimplifyOnly*/ false, IC&: *this))
4217 return BinaryOperator::CreateOr(V1: Op0, V2: V);
4218
4219 if (cast<PossiblyDisjointInst>(Val&: I).isDisjoint())
4220 if (Value *V = SimplifyAddWithRemainder(I))
4221 return replaceInstUsesWith(I, V);
4222
4223 return nullptr;
4224}
4225
4226/// A ^ B can be specified using other logic ops in a variety of patterns. We
4227/// can fold these early and efficiently by morphing an existing instruction.
4228static Instruction *foldXorToXor(BinaryOperator &I,
4229 InstCombiner::BuilderTy &Builder) {
4230 assert(I.getOpcode() == Instruction::Xor);
4231 Value *Op0 = I.getOperand(i_nocapture: 0);
4232 Value *Op1 = I.getOperand(i_nocapture: 1);
4233 Value *A, *B;
4234
4235 // There are 4 commuted variants for each of the basic patterns.
4236
4237 // (A & B) ^ (A | B) -> A ^ B
4238 // (A & B) ^ (B | A) -> A ^ B
4239 // (A | B) ^ (A & B) -> A ^ B
4240 // (A | B) ^ (B & A) -> A ^ B
4241 if (match(V: &I, P: m_c_Xor(L: m_And(L: m_Value(V&: A), R: m_Value(V&: B)),
4242 R: m_c_Or(L: m_Deferred(V: A), R: m_Deferred(V: B)))))
4243 return BinaryOperator::CreateXor(V1: A, V2: B);
4244
4245 // (A | ~B) ^ (~A | B) -> A ^ B
4246 // (~B | A) ^ (~A | B) -> A ^ B
4247 // (~A | B) ^ (A | ~B) -> A ^ B
4248 // (B | ~A) ^ (A | ~B) -> A ^ B
4249 if (match(V: &I, P: m_Xor(L: m_c_Or(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
4250 R: m_c_Or(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
4251 return BinaryOperator::CreateXor(V1: A, V2: B);
4252
4253 // (A & ~B) ^ (~A & B) -> A ^ B
4254 // (~B & A) ^ (~A & B) -> A ^ B
4255 // (~A & B) ^ (A & ~B) -> A ^ B
4256 // (B & ~A) ^ (A & ~B) -> A ^ B
4257 if (match(V: &I, P: m_Xor(L: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B))),
4258 R: m_c_And(L: m_Not(V: m_Deferred(V: A)), R: m_Deferred(V: B)))))
4259 return BinaryOperator::CreateXor(V1: A, V2: B);
4260
4261 // For the remaining cases we need to get rid of one of the operands.
4262 if (!Op0->hasOneUse() && !Op1->hasOneUse())
4263 return nullptr;
4264
4265 // (A | B) ^ ~(A & B) -> ~(A ^ B)
4266 // (A | B) ^ ~(B & A) -> ~(A ^ B)
4267 // (A & B) ^ ~(A | B) -> ~(A ^ B)
4268 // (A & B) ^ ~(B | A) -> ~(A ^ B)
4269 // Complexity sorting ensures the not will be on the right side.
4270 if ((match(V: Op0, P: m_Or(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4271 match(V: Op1, P: m_Not(V: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))) ||
4272 (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4273 match(V: Op1, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))))
4274 return BinaryOperator::CreateNot(Op: Builder.CreateXor(LHS: A, RHS: B));
4275
4276 return nullptr;
4277}
4278
4279Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
4280 BinaryOperator &I) {
4281 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
4282 I.getOperand(1) == RHS && "Should be 'xor' with these operands");
4283
4284 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
4285 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
4286 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
4287
4288 if (predicatesFoldable(P1: PredL, P2: PredR)) {
4289 if (LHS0 == RHS1 && LHS1 == RHS0) {
4290 std::swap(a&: LHS0, b&: LHS1);
4291 PredL = ICmpInst::getSwappedPredicate(pred: PredL);
4292 }
4293 if (LHS0 == RHS0 && LHS1 == RHS1) {
4294 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4295 unsigned Code = getICmpCode(Pred: PredL) ^ getICmpCode(Pred: PredR);
4296 bool IsSigned = LHS->isSigned() || RHS->isSigned();
4297 return getNewICmpValue(Code, Sign: IsSigned, LHS: LHS0, RHS: LHS1, Builder);
4298 }
4299 }
4300
4301 const APInt *LC, *RC;
4302 if (match(V: LHS1, P: m_APInt(Res&: LC)) && match(V: RHS1, P: m_APInt(Res&: RC)) &&
4303 LHS0->getType() == RHS0->getType() &&
4304 LHS0->getType()->isIntOrIntVectorTy()) {
4305 // Convert xor of signbit tests to signbit test of xor'd values:
4306 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
4307 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
4308 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
4309 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
4310 bool TrueIfSignedL, TrueIfSignedR;
4311 if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
4312 isSignBitCheck(Pred: PredL, RHS: *LC, TrueIfSigned&: TrueIfSignedL) &&
4313 isSignBitCheck(Pred: PredR, RHS: *RC, TrueIfSigned&: TrueIfSignedR)) {
4314 Value *XorLR = Builder.CreateXor(LHS: LHS0, RHS: RHS0);
4315 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(Arg: XorLR) :
4316 Builder.CreateIsNotNeg(Arg: XorLR);
4317 }
4318
4319 // Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
4320 // into a single comparison using range-based reasoning.
4321 if (LHS0 == RHS0) {
4322 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(Pred: PredL, Other: *LC);
4323 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(Pred: PredR, Other: *RC);
4324 auto CRUnion = CR1.exactUnionWith(CR: CR2);
4325 auto CRIntersect = CR1.exactIntersectWith(CR: CR2);
4326 if (CRUnion && CRIntersect)
4327 if (auto CR = CRUnion->exactIntersectWith(CR: CRIntersect->inverse())) {
4328 if (CR->isFullSet())
4329 return ConstantInt::getTrue(Ty: I.getType());
4330 if (CR->isEmptySet())
4331 return ConstantInt::getFalse(Ty: I.getType());
4332
4333 CmpInst::Predicate NewPred;
4334 APInt NewC, Offset;
4335 CR->getEquivalentICmp(Pred&: NewPred, RHS&: NewC, Offset);
4336
4337 if ((Offset.isZero() && (LHS->hasOneUse() || RHS->hasOneUse())) ||
4338 (LHS->hasOneUse() && RHS->hasOneUse())) {
4339 Value *NewV = LHS0;
4340 Type *Ty = LHS0->getType();
4341 if (!Offset.isZero())
4342 NewV = Builder.CreateAdd(LHS: NewV, RHS: ConstantInt::get(Ty, V: Offset));
4343 return Builder.CreateICmp(P: NewPred, LHS: NewV,
4344 RHS: ConstantInt::get(Ty, V: NewC));
4345 }
4346 }
4347 }
4348
4349 // Fold (icmp eq/ne (X & Pow2), 0) ^ (icmp eq/ne (Y & Pow2), 0) into
4350 // (icmp eq/ne ((X ^ Y) & Pow2), 0)
4351 Value *X, *Y, *Pow2;
4352 if (ICmpInst::isEquality(P: PredL) && ICmpInst::isEquality(P: PredR) &&
4353 LC->isZero() && RC->isZero() && LHS->hasOneUse() && RHS->hasOneUse() &&
4354 match(V: LHS0, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Pow2))) &&
4355 match(V: RHS0, P: m_And(L: m_Value(V&: Y), R: m_Specific(V: Pow2))) &&
4356 isKnownToBeAPowerOfTwo(V: Pow2, /*OrZero=*/true, CxtI: &I)) {
4357 Value *Xor = Builder.CreateXor(LHS: X, RHS: Y);
4358 Value *And = Builder.CreateAnd(LHS: Xor, RHS: Pow2);
4359 return Builder.CreateICmp(P: PredL == PredR ? ICmpInst::ICMP_NE
4360 : ICmpInst::ICMP_EQ,
4361 LHS: And, RHS: ConstantInt::getNullValue(Ty: Xor->getType()));
4362 }
4363 }
4364
4365 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
4366 // into those logic ops. That is, try to turn this into an and-of-icmps
4367 // because we have many folds for that pattern.
4368 //
4369 // This is based on a truth table definition of xor:
4370 // X ^ Y --> (X | Y) & !(X & Y)
4371 if (Value *OrICmp = simplifyBinOp(Opcode: Instruction::Or, LHS, RHS, Q: SQ)) {
4372 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
4373 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
4374 if (Value *AndICmp = simplifyBinOp(Opcode: Instruction::And, LHS, RHS, Q: SQ)) {
4375 // TODO: Independently handle cases where the 'and' side is a constant.
4376 ICmpInst *X = nullptr, *Y = nullptr;
4377 if (OrICmp == LHS && AndICmp == RHS) {
4378 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y
4379 X = LHS;
4380 Y = RHS;
4381 }
4382 if (OrICmp == RHS && AndICmp == LHS) {
4383 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X
4384 X = RHS;
4385 Y = LHS;
4386 }
4387 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(V: Y, IgnoredUser: &I))) {
4388 // Invert the predicate of 'Y', thus inverting its output.
4389 Y->setPredicate(Y->getInversePredicate());
4390 // So, are there other uses of Y?
4391 if (!Y->hasOneUse()) {
4392 // We need to adapt other uses of Y though. Get a value that matches
4393 // the original value of Y before inversion. While this increases
4394 // immediate instruction count, we have just ensured that all the
4395 // users are freely-invertible, so that 'not' *will* get folded away.
4396 BuilderTy::InsertPointGuard Guard(Builder);
4397 // Set insertion point to right after the Y.
4398 Builder.SetInsertPoint(TheBB: Y->getParent(), IP: ++(Y->getIterator()));
4399 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4400 // Replace all uses of Y (excluding the one in NotY!) with NotY.
4401 Worklist.pushUsersToWorkList(I&: *Y);
4402 Y->replaceUsesWithIf(New: NotY,
4403 ShouldReplace: [NotY](Use &U) { return U.getUser() != NotY; });
4404 }
4405 // All done.
4406 return Builder.CreateAnd(LHS, RHS);
4407 }
4408 }
4409 }
4410
4411 return nullptr;
4412}
4413
4414/// If we have a masked merge, in the canonical form of:
4415/// (assuming that A only has one use.)
4416/// | A | |B|
4417/// ((x ^ y) & M) ^ y
4418/// | D |
4419/// * If M is inverted:
4420/// | D |
4421/// ((x ^ y) & ~M) ^ y
4422/// We can canonicalize by swapping the final xor operand
4423/// to eliminate the 'not' of the mask.
4424/// ((x ^ y) & M) ^ x
4425/// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
4426/// because that shortens the dependency chain and improves analysis:
4427/// (x & M) | (y & ~M)
4428static Instruction *visitMaskedMerge(BinaryOperator &I,
4429 InstCombiner::BuilderTy &Builder) {
4430 Value *B, *X, *D;
4431 Value *M;
4432 if (!match(V: &I, P: m_c_Xor(L: m_Value(V&: B),
4433 R: m_OneUse(SubPattern: m_c_And(
4434 L: m_CombineAnd(L: m_c_Xor(L: m_Deferred(V: B), R: m_Value(V&: X)),
4435 R: m_Value(V&: D)),
4436 R: m_Value(V&: M))))))
4437 return nullptr;
4438
4439 Value *NotM;
4440 if (match(V: M, P: m_Not(V: m_Value(V&: NotM)))) {
4441 // De-invert the mask and swap the value in B part.
4442 Value *NewA = Builder.CreateAnd(LHS: D, RHS: NotM);
4443 return BinaryOperator::CreateXor(V1: NewA, V2: X);
4444 }
4445
4446 Constant *C;
4447 if (D->hasOneUse() && match(V: M, P: m_Constant(C))) {
4448 // Propagating undef is unsafe. Clamp undef elements to -1.
4449 Type *EltTy = C->getType()->getScalarType();
4450 C = Constant::replaceUndefsWith(C, Replacement: ConstantInt::getAllOnesValue(Ty: EltTy));
4451 // Unfold.
4452 Value *LHS = Builder.CreateAnd(LHS: X, RHS: C);
4453 Value *NotC = Builder.CreateNot(V: C);
4454 Value *RHS = Builder.CreateAnd(LHS: B, RHS: NotC);
4455 return BinaryOperator::CreateOr(V1: LHS, V2: RHS);
4456 }
4457
4458 return nullptr;
4459}
4460
4461static Instruction *foldNotXor(BinaryOperator &I,
4462 InstCombiner::BuilderTy &Builder) {
4463 Value *X, *Y;
4464 // FIXME: one-use check is not needed in general, but currently we are unable
4465 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4466 if (!match(V: &I, P: m_Not(V: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: X), R: m_Value(V&: Y))))))
4467 return nullptr;
4468
4469 auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
4470 return A == C || A == D || B == C || B == D;
4471 };
4472
4473 Value *A, *B, *C, *D;
4474 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4475 // 4 commuted variants
4476 if (match(V: X, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4477 match(V: Y, P: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))) && hasCommonOperand(A, B, C, D)) {
4478 Value *NotY = Builder.CreateNot(V: Y);
4479 return BinaryOperator::CreateOr(V1: X, V2: NotY);
4480 };
4481
4482 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4483 // 4 commuted variants
4484 if (match(V: Y, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
4485 match(V: X, P: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))) && hasCommonOperand(A, B, C, D)) {
4486 Value *NotX = Builder.CreateNot(V: X);
4487 return BinaryOperator::CreateOr(V1: Y, V2: NotX);
4488 };
4489
4490 return nullptr;
4491}
4492
4493/// Canonicalize a shifty way to code absolute value to the more common pattern
4494/// that uses negation and select.
4495static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4496 InstCombiner::BuilderTy &Builder) {
4497 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction.");
4498
4499 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4500 // We're relying on the fact that we only do this transform when the shift has
4501 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4502 // instructions).
4503 Value *Op0 = Xor.getOperand(i_nocapture: 0), *Op1 = Xor.getOperand(i_nocapture: 1);
4504 if (Op0->hasNUses(N: 2))
4505 std::swap(a&: Op0, b&: Op1);
4506
4507 Type *Ty = Xor.getType();
4508 Value *A;
4509 const APInt *ShAmt;
4510 if (match(V: Op1, P: m_AShr(L: m_Value(V&: A), R: m_APInt(Res&: ShAmt))) &&
4511 Op1->hasNUses(N: 2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4512 match(V: Op0, P: m_OneUse(SubPattern: m_c_Add(L: m_Specific(V: A), R: m_Specific(V: Op1))))) {
4513 // Op1 = ashr i32 A, 31 ; smear the sign bit
4514 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative
4515 // --> (A < 0) ? -A : A
4516 Value *IsNeg = Builder.CreateIsNeg(Arg: A);
4517 // Copy the nsw flags from the add to the negate.
4518 auto *Add = cast<BinaryOperator>(Val: Op0);
4519 Value *NegA = Add->hasNoUnsignedWrap()
4520 ? Constant::getNullValue(Ty: A->getType())
4521 : Builder.CreateNeg(V: A, Name: "", HasNSW: Add->hasNoSignedWrap());
4522 return SelectInst::Create(C: IsNeg, S1: NegA, S2: A);
4523 }
4524 return nullptr;
4525}
4526
4527static bool canFreelyInvert(InstCombiner &IC, Value *Op,
4528 Instruction *IgnoredUser) {
4529 auto *I = dyn_cast<Instruction>(Val: Op);
4530 return I && IC.isFreeToInvert(V: I, /*WillInvertAllUses=*/true) &&
4531 IC.canFreelyInvertAllUsersOf(V: I, IgnoredUser);
4532}
4533
4534static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
4535 Instruction *IgnoredUser) {
4536 auto *I = cast<Instruction>(Val: Op);
4537 IC.Builder.SetInsertPoint(*I->getInsertionPointAfterDef());
4538 Value *NotOp = IC.Builder.CreateNot(V: Op, Name: Op->getName() + ".not");
4539 Op->replaceUsesWithIf(New: NotOp,
4540 ShouldReplace: [NotOp](Use &U) { return U.getUser() != NotOp; });
4541 IC.freelyInvertAllUsersOf(V: NotOp, IgnoredUser);
4542 return NotOp;
4543}
4544
4545// Transform
4546// z = ~(x &/| y)
4547// into:
4548// z = ((~x) |/& (~y))
4549// iff both x and y are free to invert and all uses of z can be freely updated.
4550bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {
4551 Value *Op0, *Op1;
4552 if (!match(V: &I, P: m_LogicalOp(L: m_Value(V&: Op0), R: m_Value(V&: Op1))))
4553 return false;
4554
4555 // If this logic op has not been simplified yet, just bail out and let that
4556 // happen first. Otherwise, the code below may wrongly invert.
4557 if (Op0 == Op1)
4558 return false;
4559
4560 // If one of the operands is a user of the other,
4561 // freelyInvert->freelyInvertAllUsersOf will change the operands of I, which
4562 // may cause miscompilation.
4563 if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0))))
4564 return false;
4565
4566 Instruction::BinaryOps NewOpc =
4567 match(V: &I, P: m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4568 bool IsBinaryOp = isa<BinaryOperator>(Val: I);
4569
4570 // Can our users be adapted?
4571 if (!InstCombiner::canFreelyInvertAllUsersOf(V: &I, /*IgnoredUser=*/nullptr))
4572 return false;
4573
4574 // And can the operands be adapted?
4575 if (!canFreelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I) || !canFreelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I))
4576 return false;
4577
4578 Op0 = freelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I);
4579 Op1 = freelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I);
4580
4581 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4582 Value *NewLogicOp;
4583 if (IsBinaryOp)
4584 NewLogicOp = Builder.CreateBinOp(Opc: NewOpc, LHS: Op0, RHS: Op1, Name: I.getName() + ".not");
4585 else
4586 NewLogicOp =
4587 Builder.CreateLogicalOp(Opc: NewOpc, Cond1: Op0, Cond2: Op1, Name: I.getName() + ".not");
4588
4589 replaceInstUsesWith(I, V: NewLogicOp);
4590 // We can not just create an outer `not`, it will most likely be immediately
4591 // folded back, reconstructing our initial pattern, and causing an
4592 // infinite combine loop, so immediately manually fold it away.
4593 freelyInvertAllUsersOf(V: NewLogicOp);
4594 return true;
4595}
4596
4597// Transform
4598// z = (~x) &/| y
4599// into:
4600// z = ~(x |/& (~y))
4601// iff y is free to invert and all uses of z can be freely updated.
4602bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
4603 Value *Op0, *Op1;
4604 if (!match(V: &I, P: m_LogicalOp(L: m_Value(V&: Op0), R: m_Value(V&: Op1))))
4605 return false;
4606 Instruction::BinaryOps NewOpc =
4607 match(V: &I, P: m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4608 bool IsBinaryOp = isa<BinaryOperator>(Val: I);
4609
4610 Value *NotOp0 = nullptr;
4611 Value *NotOp1 = nullptr;
4612 Value **OpToInvert = nullptr;
4613 if (match(V: Op0, P: m_Not(V: m_Value(V&: NotOp0))) && canFreelyInvert(IC&: *this, Op: Op1, IgnoredUser: &I)) {
4614 Op0 = NotOp0;
4615 OpToInvert = &Op1;
4616 } else if (match(V: Op1, P: m_Not(V: m_Value(V&: NotOp1))) &&
4617 canFreelyInvert(IC&: *this, Op: Op0, IgnoredUser: &I)) {
4618 Op1 = NotOp1;
4619 OpToInvert = &Op0;
4620 } else
4621 return false;
4622
4623 // And can our users be adapted?
4624 if (!InstCombiner::canFreelyInvertAllUsersOf(V: &I, /*IgnoredUser=*/nullptr))
4625 return false;
4626
4627 *OpToInvert = freelyInvert(IC&: *this, Op: *OpToInvert, IgnoredUser: &I);
4628
4629 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4630 Value *NewBinOp;
4631 if (IsBinaryOp)
4632 NewBinOp = Builder.CreateBinOp(Opc: NewOpc, LHS: Op0, RHS: Op1, Name: I.getName() + ".not");
4633 else
4634 NewBinOp = Builder.CreateLogicalOp(Opc: NewOpc, Cond1: Op0, Cond2: Op1, Name: I.getName() + ".not");
4635 replaceInstUsesWith(I, V: NewBinOp);
4636 // We can not just create an outer `not`, it will most likely be immediately
4637 // folded back, reconstructing our initial pattern, and causing an
4638 // infinite combine loop, so immediately manually fold it away.
4639 freelyInvertAllUsersOf(V: NewBinOp);
4640 return true;
4641}
4642
4643Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
4644 Value *NotOp;
4645 if (!match(V: &I, P: m_Not(V: m_Value(V&: NotOp))))
4646 return nullptr;
4647
4648 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
4649 // We must eliminate the and/or (one-use) for these transforms to not increase
4650 // the instruction count.
4651 //
4652 // ~(~X & Y) --> (X | ~Y)
4653 // ~(Y & ~X) --> (X | ~Y)
4654 //
4655 // Note: The logical matches do not check for the commuted patterns because
4656 // those are handled via SimplifySelectsFeedingBinaryOp().
4657 Type *Ty = I.getType();
4658 Value *X, *Y;
4659 if (match(V: NotOp, P: m_OneUse(SubPattern: m_c_And(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
4660 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4661 return BinaryOperator::CreateOr(V1: X, V2: NotY);
4662 }
4663 if (match(V: NotOp, P: m_OneUse(SubPattern: m_LogicalAnd(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
4664 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4665 return SelectInst::Create(C: X, S1: ConstantInt::getTrue(Ty), S2: NotY);
4666 }
4667
4668 // ~(~X | Y) --> (X & ~Y)
4669 // ~(Y | ~X) --> (X & ~Y)
4670 if (match(V: NotOp, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
4671 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4672 return BinaryOperator::CreateAnd(V1: X, V2: NotY);
4673 }
4674 if (match(V: NotOp, P: m_OneUse(SubPattern: m_LogicalOr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))) {
4675 Value *NotY = Builder.CreateNot(V: Y, Name: Y->getName() + ".not");
4676 return SelectInst::Create(C: X, S1: NotY, S2: ConstantInt::getFalse(Ty));
4677 }
4678
4679 // Is this a 'not' (~) fed by a binary operator?
4680 BinaryOperator *NotVal;
4681 if (match(V: NotOp, P: m_BinOp(I&: NotVal))) {
4682 // ~((-X) | Y) --> (X - 1) & (~Y)
4683 if (match(V: NotVal,
4684 P: m_OneUse(SubPattern: m_c_Or(L: m_OneUse(SubPattern: m_Neg(V: m_Value(V&: X))), R: m_Value(V&: Y))))) {
4685 Value *DecX = Builder.CreateAdd(LHS: X, RHS: ConstantInt::getAllOnesValue(Ty));
4686 Value *NotY = Builder.CreateNot(V: Y);
4687 return BinaryOperator::CreateAnd(V1: DecX, V2: NotY);
4688 }
4689
4690 // ~(~X >>s Y) --> (X >>s Y)
4691 if (match(V: NotVal, P: m_AShr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))
4692 return BinaryOperator::CreateAShr(V1: X, V2: Y);
4693
4694 // Treat lshr with non-negative operand as ashr.
4695 // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
4696 if (match(V: NotVal, P: m_LShr(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))) &&
4697 isKnownNegative(V: X, SQ: SQ.getWithInstruction(I: NotVal)))
4698 return BinaryOperator::CreateAShr(V1: X, V2: Y);
4699
4700 // Bit-hack form of a signbit test for iN type:
4701 // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
4702 unsigned FullShift = Ty->getScalarSizeInBits() - 1;
4703 if (match(V: NotVal, P: m_OneUse(SubPattern: m_AShr(L: m_Value(V&: X), R: m_SpecificInt(V: FullShift))))) {
4704 Value *IsNotNeg = Builder.CreateIsNotNeg(Arg: X, Name: "isnotneg");
4705 return new SExtInst(IsNotNeg, Ty);
4706 }
4707
4708 // If we are inverting a right-shifted constant, we may be able to eliminate
4709 // the 'not' by inverting the constant and using the opposite shift type.
4710 // Canonicalization rules ensure that only a negative constant uses 'ashr',
4711 // but we must check that in case that transform has not fired yet.
4712
4713 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
4714 Constant *C;
4715 if (match(V: NotVal, P: m_AShr(L: m_Constant(C), R: m_Value(V&: Y))) &&
4716 match(V: C, P: m_Negative()))
4717 return BinaryOperator::CreateLShr(V1: ConstantExpr::getNot(C), V2: Y);
4718
4719 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
4720 if (match(V: NotVal, P: m_LShr(L: m_Constant(C), R: m_Value(V&: Y))) &&
4721 match(V: C, P: m_NonNegative()))
4722 return BinaryOperator::CreateAShr(V1: ConstantExpr::getNot(C), V2: Y);
4723
4724 // ~(X + C) --> ~C - X
4725 if (match(V: NotVal, P: m_Add(L: m_Value(V&: X), R: m_ImmConstant(C))))
4726 return BinaryOperator::CreateSub(V1: ConstantExpr::getNot(C), V2: X);
4727
4728 // ~(X - Y) --> ~X + Y
4729 // FIXME: is it really beneficial to sink the `not` here?
4730 if (match(V: NotVal, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y))))
4731 if (isa<Constant>(Val: X) || NotVal->hasOneUse())
4732 return BinaryOperator::CreateAdd(V1: Builder.CreateNot(V: X), V2: Y);
4733
4734 // ~(~X + Y) --> X - Y
4735 if (match(V: NotVal, P: m_c_Add(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y))))
4736 return BinaryOperator::CreateWithCopiedFlags(Opc: Instruction::Sub, V1: X, V2: Y,
4737 CopyO: NotVal);
4738 }
4739
4740 // not (cmp A, B) = !cmp A, B
4741 CmpPredicate Pred;
4742 if (match(V: NotOp, P: m_Cmp(Pred, L: m_Value(), R: m_Value())) &&
4743 (NotOp->hasOneUse() ||
4744 InstCombiner::canFreelyInvertAllUsersOf(V: cast<Instruction>(Val: NotOp),
4745 /*IgnoredUser=*/nullptr))) {
4746 cast<CmpInst>(Val: NotOp)->setPredicate(CmpInst::getInversePredicate(pred: Pred));
4747 freelyInvertAllUsersOf(V: NotOp);
4748 return &I;
4749 }
4750
4751 // Move a 'not' ahead of casts of a bool to enable logic reduction:
4752 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4753 if (match(V: NotOp, P: m_OneUse(SubPattern: m_BitCast(Op: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: X)))))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
4754 Type *SextTy = cast<BitCastOperator>(Val: NotOp)->getSrcTy();
4755 Value *NotX = Builder.CreateNot(V: X);
4756 Value *Sext = Builder.CreateSExt(V: NotX, DestTy: SextTy);
4757 return new BitCastInst(Sext, Ty);
4758 }
4759
4760 if (auto *NotOpI = dyn_cast<Instruction>(Val: NotOp))
4761 if (sinkNotIntoLogicalOp(I&: *NotOpI))
4762 return &I;
4763
4764 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
4765 // ~min(~X, ~Y) --> max(X, Y)
4766 // ~max(~X, Y) --> min(X, ~Y)
4767 auto *II = dyn_cast<IntrinsicInst>(Val: NotOp);
4768 if (II && II->hasOneUse()) {
4769 if (match(V: NotOp, P: m_c_MaxOrMin(L: m_Not(V: m_Value(V&: X)), R: m_Value(V&: Y)))) {
4770 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMaxID: II->getIntrinsicID());
4771 Value *NotY = Builder.CreateNot(V: Y);
4772 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(ID: InvID, LHS: X, RHS: NotY);
4773 return replaceInstUsesWith(I, V: InvMaxMin);
4774 }
4775
4776 if (II->getIntrinsicID() == Intrinsic::is_fpclass) {
4777 ConstantInt *ClassMask = cast<ConstantInt>(Val: II->getArgOperand(i: 1));
4778 II->setArgOperand(
4779 i: 1, v: ConstantInt::get(Ty: ClassMask->getType(),
4780 V: ~ClassMask->getZExtValue() & fcAllFlags));
4781 return replaceInstUsesWith(I, V: II);
4782 }
4783 }
4784
4785 if (NotOp->hasOneUse()) {
4786 // Pull 'not' into operands of select if both operands are one-use compares
4787 // or one is one-use compare and the other one is a constant.
4788 // Inverting the predicates eliminates the 'not' operation.
4789 // Example:
4790 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
4791 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
4792 // not (select ?, (cmp TPred, ?, ?), true -->
4793 // select ?, (cmp InvTPred, ?, ?), false
4794 if (auto *Sel = dyn_cast<SelectInst>(Val: NotOp)) {
4795 Value *TV = Sel->getTrueValue();
4796 Value *FV = Sel->getFalseValue();
4797 auto *CmpT = dyn_cast<CmpInst>(Val: TV);
4798 auto *CmpF = dyn_cast<CmpInst>(Val: FV);
4799 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(Val: TV);
4800 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(Val: FV);
4801 if (InvertibleT && InvertibleF) {
4802 if (CmpT)
4803 CmpT->setPredicate(CmpT->getInversePredicate());
4804 else
4805 Sel->setTrueValue(ConstantExpr::getNot(C: cast<Constant>(Val: TV)));
4806 if (CmpF)
4807 CmpF->setPredicate(CmpF->getInversePredicate());
4808 else
4809 Sel->setFalseValue(ConstantExpr::getNot(C: cast<Constant>(Val: FV)));
4810 return replaceInstUsesWith(I, V: Sel);
4811 }
4812 }
4813 }
4814
4815 if (Instruction *NewXor = foldNotXor(I, Builder))
4816 return NewXor;
4817
4818 // TODO: Could handle multi-use better by checking if all uses of NotOp (other
4819 // than I) can be inverted.
4820 if (Value *R = getFreelyInverted(V: NotOp, WillInvertAllUses: NotOp->hasOneUse(), Builder: &Builder))
4821 return replaceInstUsesWith(I, V: R);
4822
4823 return nullptr;
4824}
4825
4826// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4827// here. We should standardize that construct where it is needed or choose some
4828// other way to ensure that commutated variants of patterns are not missed.
4829Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4830 if (Value *V = simplifyXorInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
4831 Q: SQ.getWithInstruction(I: &I)))
4832 return replaceInstUsesWith(I, V);
4833
4834 if (SimplifyAssociativeOrCommutative(I))
4835 return &I;
4836
4837 if (Instruction *X = foldVectorBinop(Inst&: I))
4838 return X;
4839
4840 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
4841 return Phi;
4842
4843 if (Instruction *NewXor = foldXorToXor(I, Builder))
4844 return NewXor;
4845
4846 // (A&B)^(A&C) -> A&(B^C) etc
4847 if (Value *V = foldUsingDistributiveLaws(I))
4848 return replaceInstUsesWith(I, V);
4849
4850 // See if we can simplify any instructions used by the instruction whose sole
4851 // purpose is to compute bits we don't care about.
4852 if (SimplifyDemandedInstructionBits(Inst&: I))
4853 return &I;
4854
4855 if (Instruction *R = foldNot(I))
4856 return R;
4857
4858 if (Instruction *R = foldBinOpShiftWithShift(I))
4859 return R;
4860
4861 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
4862 Value *X, *Y, *M;
4863
4864 // (X | Y) ^ M -> (X ^ M) ^ Y
4865 // (X | Y) ^ M -> (Y ^ M) ^ X
4866 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_DisjointOr(L: m_Value(V&: X), R: m_Value(V&: Y))),
4867 R: m_Value(V&: M)))) {
4868 if (Value *XorAC = simplifyXorInst(LHS: X, RHS: M, Q: SQ.getWithInstruction(I: &I)))
4869 return BinaryOperator::CreateXor(V1: XorAC, V2: Y);
4870
4871 if (Value *XorBC = simplifyXorInst(LHS: Y, RHS: M, Q: SQ.getWithInstruction(I: &I)))
4872 return BinaryOperator::CreateXor(V1: XorBC, V2: X);
4873 }
4874
4875 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
4876 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
4877 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
4878 // have already taken care of those cases.
4879 if (match(V: &I, P: m_c_Xor(L: m_c_And(L: m_Not(V: m_Value(V&: M)), R: m_Value()),
4880 R: m_c_And(L: m_Deferred(V: M), R: m_Value())))) {
4881 if (isGuaranteedNotToBeUndef(V: M))
4882 return BinaryOperator::CreateDisjointOr(V1: Op0, V2: Op1);
4883 else
4884 return BinaryOperator::CreateOr(V1: Op0, V2: Op1);
4885 }
4886
4887 if (Instruction *Xor = visitMaskedMerge(I, Builder))
4888 return Xor;
4889
4890 Constant *C1;
4891 if (match(V: Op1, P: m_Constant(C&: C1))) {
4892 Constant *C2;
4893
4894 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: X), R: m_ImmConstant(C&: C2)))) &&
4895 match(V: C1, P: m_ImmConstant())) {
4896 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
4897 C2 = Constant::replaceUndefsWith(
4898 C: C2, Replacement: Constant::getAllOnesValue(Ty: C2->getType()->getScalarType()));
4899 Value *And = Builder.CreateAnd(
4900 LHS: X, RHS: Constant::mergeUndefsWith(C: ConstantExpr::getNot(C: C2), Other: C1));
4901 return BinaryOperator::CreateXor(
4902 V1: And, V2: Constant::mergeUndefsWith(C: ConstantExpr::getXor(C1, C2), Other: C1));
4903 }
4904
4905 // Use DeMorgan and reassociation to eliminate a 'not' op.
4906 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Not(V: m_Value(V&: X)), R: m_Constant(C&: C2))))) {
4907 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
4908 Value *And = Builder.CreateAnd(LHS: X, RHS: ConstantExpr::getNot(C: C2));
4909 return BinaryOperator::CreateXor(V1: And, V2: ConstantExpr::getNot(C: C1));
4910 }
4911 if (match(V: Op0, P: m_OneUse(SubPattern: m_And(L: m_Not(V: m_Value(V&: X)), R: m_Constant(C&: C2))))) {
4912 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
4913 Value *Or = Builder.CreateOr(LHS: X, RHS: ConstantExpr::getNot(C: C2));
4914 return BinaryOperator::CreateXor(V1: Or, V2: ConstantExpr::getNot(C: C1));
4915 }
4916
4917 // Convert xor ([trunc] (ashr X, BW-1)), C =>
4918 // select(X >s -1, C, ~C)
4919 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
4920 // constant depending on whether this input is less than 0.
4921 const APInt *CA;
4922 if (match(V: Op0, P: m_OneUse(SubPattern: m_TruncOrSelf(
4923 Op: m_AShr(L: m_Value(V&: X), R: m_APIntAllowPoison(Res&: CA))))) &&
4924 *CA == X->getType()->getScalarSizeInBits() - 1 &&
4925 !match(V: C1, P: m_AllOnes())) {
4926 assert(!C1->isZeroValue() && "Unexpected xor with 0");
4927 Value *IsNotNeg = Builder.CreateIsNotNeg(Arg: X);
4928 return SelectInst::Create(C: IsNotNeg, S1: Op1, S2: Builder.CreateNot(V: Op1));
4929 }
4930 }
4931
4932 Type *Ty = I.getType();
4933 {
4934 const APInt *RHSC;
4935 if (match(V: Op1, P: m_APInt(Res&: RHSC))) {
4936 Value *X;
4937 const APInt *C;
4938 // (C - X) ^ signmaskC --> (C + signmaskC) - X
4939 if (RHSC->isSignMask() && match(V: Op0, P: m_Sub(L: m_APInt(Res&: C), R: m_Value(V&: X))))
4940 return BinaryOperator::CreateSub(V1: ConstantInt::get(Ty, V: *C + *RHSC), V2: X);
4941
4942 // (X + C) ^ signmaskC --> X + (C + signmaskC)
4943 if (RHSC->isSignMask() && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C))))
4944 return BinaryOperator::CreateAdd(V1: X, V2: ConstantInt::get(Ty, V: *C + *RHSC));
4945
4946 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
4947 if (match(V: Op0, P: m_Or(L: m_Value(V&: X), R: m_APInt(Res&: C))) &&
4948 MaskedValueIsZero(V: X, Mask: *C, CxtI: &I))
4949 return BinaryOperator::CreateXor(V1: X, V2: ConstantInt::get(Ty, V: *C ^ *RHSC));
4950
4951 // When X is a power-of-two or zero and zero input is poison:
4952 // ctlz(i32 X) ^ 31 --> cttz(X)
4953 // cttz(i32 X) ^ 31 --> ctlz(X)
4954 auto *II = dyn_cast<IntrinsicInst>(Val: Op0);
4955 if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
4956 Intrinsic::ID IID = II->getIntrinsicID();
4957 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
4958 match(V: II->getArgOperand(i: 1), P: m_One()) &&
4959 isKnownToBeAPowerOfTwo(V: II->getArgOperand(i: 0), /*OrZero */ true)) {
4960 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
4961 Function *F =
4962 Intrinsic::getOrInsertDeclaration(M: II->getModule(), id: IID, Tys: Ty);
4963 return CallInst::Create(Func: F, Args: {II->getArgOperand(i: 0), Builder.getTrue()});
4964 }
4965 }
4966
4967 // If RHSC is inverting the remaining bits of shifted X,
4968 // canonicalize to a 'not' before the shift to help SCEV and codegen:
4969 // (X << C) ^ RHSC --> ~X << C
4970 if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: C)))) &&
4971 *RHSC == APInt::getAllOnes(numBits: Ty->getScalarSizeInBits()).shl(ShiftAmt: *C)) {
4972 Value *NotX = Builder.CreateNot(V: X);
4973 return BinaryOperator::CreateShl(V1: NotX, V2: ConstantInt::get(Ty, V: *C));
4974 }
4975 // (X >>u C) ^ RHSC --> ~X >>u C
4976 if (match(V: Op0, P: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C)))) &&
4977 *RHSC == APInt::getAllOnes(numBits: Ty->getScalarSizeInBits()).lshr(ShiftAmt: *C)) {
4978 Value *NotX = Builder.CreateNot(V: X);
4979 return BinaryOperator::CreateLShr(V1: NotX, V2: ConstantInt::get(Ty, V: *C));
4980 }
4981 // TODO: We could handle 'ashr' here as well. That would be matching
4982 // a 'not' op and moving it before the shift. Doing that requires
4983 // preventing the inverse fold in canShiftBinOpWithConstantRHS().
4984 }
4985
4986 // If we are XORing the sign bit of a floating-point value, convert
4987 // this to fneg, then cast back to integer.
4988 //
4989 // This is generous interpretation of noimplicitfloat, this is not a true
4990 // floating-point operation.
4991 //
4992 // Assumes any IEEE-represented type has the sign bit in the high bit.
4993 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
4994 Value *CastOp;
4995 if (match(V: Op0, P: m_ElementWiseBitCast(Op: m_Value(V&: CastOp))) &&
4996 match(V: Op1, P: m_SignMask()) &&
4997 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4998 Kind: Attribute::NoImplicitFloat)) {
4999 Type *EltTy = CastOp->getType()->getScalarType();
5000 if (EltTy->isFloatingPointTy() &&
5001 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
5002 Value *FNeg = Builder.CreateFNeg(V: CastOp);
5003 return new BitCastInst(FNeg, I.getType());
5004 }
5005 }
5006 }
5007
5008 // FIXME: This should not be limited to scalar (pull into APInt match above).
5009 {
5010 Value *X;
5011 ConstantInt *C1, *C2, *C3;
5012 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
5013 if (match(V: Op1, P: m_ConstantInt(CI&: C3)) &&
5014 match(V: Op0, P: m_LShr(L: m_Xor(L: m_Value(V&: X), R: m_ConstantInt(CI&: C1)),
5015 R: m_ConstantInt(CI&: C2))) &&
5016 Op0->hasOneUse()) {
5017 // fold (C1 >> C2) ^ C3
5018 APInt FoldConst = C1->getValue().lshr(ShiftAmt: C2->getValue());
5019 FoldConst ^= C3->getValue();
5020 // Prepare the two operands.
5021 auto *Opnd0 = Builder.CreateLShr(LHS: X, RHS: C2);
5022 Opnd0->takeName(V: Op0);
5023 return BinaryOperator::CreateXor(V1: Opnd0, V2: ConstantInt::get(Ty, V: FoldConst));
5024 }
5025 }
5026
5027 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
5028 return FoldedLogic;
5029
5030 // Y ^ (X | Y) --> X & ~Y
5031 // Y ^ (Y | X) --> X & ~Y
5032 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: Op0)))))
5033 return BinaryOperator::CreateAnd(V1: X, V2: Builder.CreateNot(V: Op0));
5034 // (X | Y) ^ Y --> X & ~Y
5035 // (Y | X) ^ Y --> X & ~Y
5036 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
5037 return BinaryOperator::CreateAnd(V1: X, V2: Builder.CreateNot(V: Op1));
5038
5039 // Y ^ (X & Y) --> ~X & Y
5040 // Y ^ (Y & X) --> ~X & Y
5041 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_And(L: m_Value(V&: X), R: m_Specific(V: Op0)))))
5042 return BinaryOperator::CreateAnd(V1: Op0, V2: Builder.CreateNot(V: X));
5043 // (X & Y) ^ Y --> ~X & Y
5044 // (Y & X) ^ Y --> ~X & Y
5045 // Canonical form is (X & C) ^ C; don't touch that.
5046 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
5047 // be fixed to prefer that (otherwise we get infinite looping).
5048 if (!match(V: Op1, P: m_Constant()) &&
5049 match(V: Op0, P: m_OneUse(SubPattern: m_c_And(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
5050 return BinaryOperator::CreateAnd(V1: Op1, V2: Builder.CreateNot(V: X));
5051
5052 Value *A, *B, *C;
5053 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
5054 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))),
5055 R: m_OneUse(SubPattern: m_c_Or(L: m_Deferred(V: A), R: m_Value(V&: C))))))
5056 return BinaryOperator::CreateXor(
5057 V1: Builder.CreateAnd(LHS: Builder.CreateNot(V: A), RHS: C), V2: B);
5058
5059 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
5060 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))),
5061 R: m_OneUse(SubPattern: m_c_Or(L: m_Deferred(V: B), R: m_Value(V&: C))))))
5062 return BinaryOperator::CreateXor(
5063 V1: Builder.CreateAnd(LHS: Builder.CreateNot(V: B), RHS: C), V2: A);
5064
5065 // (A & B) ^ (A ^ B) -> (A | B)
5066 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) &&
5067 match(V: Op1, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
5068 return BinaryOperator::CreateOr(V1: A, V2: B);
5069 // (A ^ B) ^ (A & B) -> (A | B)
5070 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
5071 match(V: Op1, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
5072 return BinaryOperator::CreateOr(V1: A, V2: B);
5073
5074 // (A & ~B) ^ ~A -> ~(A & B)
5075 // (~B & A) ^ ~A -> ~(A & B)
5076 if (match(V: Op0, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) &&
5077 match(V: Op1, P: m_Not(V: m_Specific(V: A))))
5078 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: A, RHS: B));
5079
5080 // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
5081 if (match(V: &I, P: m_c_Xor(L: m_c_And(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B)), R: m_Deferred(V: A))))
5082 return BinaryOperator::CreateOr(V1: A, V2: B);
5083
5084 // (~A | B) ^ A --> ~(A & B)
5085 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Specific(V: Op1)), R: m_Value(V&: B)))))
5086 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Op1, RHS: B));
5087
5088 // A ^ (~A | B) --> ~(A & B)
5089 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_Or(L: m_Not(V: m_Specific(V: Op0)), R: m_Value(V&: B)))))
5090 return BinaryOperator::CreateNot(Op: Builder.CreateAnd(LHS: Op0, RHS: B));
5091
5092 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
5093 // TODO: Loosen one-use restriction if common operand is a constant.
5094 Value *D;
5095 if (match(V: Op0, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: A), R: m_Value(V&: B)))) &&
5096 match(V: Op1, P: m_OneUse(SubPattern: m_Or(L: m_Value(V&: C), R: m_Value(V&: D))))) {
5097 if (B == C || B == D)
5098 std::swap(a&: A, b&: B);
5099 if (A == C)
5100 std::swap(a&: C, b&: D);
5101 if (A == D) {
5102 Value *NotA = Builder.CreateNot(V: A);
5103 return BinaryOperator::CreateAnd(V1: Builder.CreateXor(LHS: B, RHS: C), V2: NotA);
5104 }
5105 }
5106
5107 // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
5108 if (I.getType()->isIntOrIntVectorTy(BitWidth: 1) &&
5109 match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_LogicalAnd(L: m_Value(V&: A), R: m_Value(V&: B))),
5110 R: m_OneUse(SubPattern: m_LogicalOr(L: m_Value(V&: C), R: m_Value(V&: D)))))) {
5111 bool NeedFreeze = isa<SelectInst>(Val: Op0) && isa<SelectInst>(Val: Op1) && B == D;
5112 if (B == C || B == D)
5113 std::swap(a&: A, b&: B);
5114 if (A == C)
5115 std::swap(a&: C, b&: D);
5116 if (A == D) {
5117 if (NeedFreeze)
5118 A = Builder.CreateFreeze(V: A);
5119 Value *NotB = Builder.CreateNot(V: B);
5120 return SelectInst::Create(C: A, S1: NotB, S2: C);
5121 }
5122 }
5123
5124 if (auto *LHS = dyn_cast<ICmpInst>(Val: I.getOperand(i_nocapture: 0)))
5125 if (auto *RHS = dyn_cast<ICmpInst>(Val: I.getOperand(i_nocapture: 1)))
5126 if (Value *V = foldXorOfICmps(LHS, RHS, I))
5127 return replaceInstUsesWith(I, V);
5128
5129 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
5130 return CastedXor;
5131
5132 if (Instruction *Abs = canonicalizeAbs(Xor&: I, Builder))
5133 return Abs;
5134
5135 // Otherwise, if all else failed, try to hoist the xor-by-constant:
5136 // (X ^ C) ^ Y --> (X ^ Y) ^ C
5137 // Just like we do in other places, we completely avoid the fold
5138 // for constantexprs, at least to avoid endless combine loop.
5139 if (match(V: &I, P: m_c_Xor(L: m_OneUse(SubPattern: m_Xor(L: m_CombineAnd(L: m_Value(V&: X),
5140 R: m_Unless(M: m_ConstantExpr())),
5141 R: m_ImmConstant(C&: C1))),
5142 R: m_Value(V&: Y))))
5143 return BinaryOperator::CreateXor(V1: Builder.CreateXor(LHS: X, RHS: Y), V2: C1);
5144
5145 if (Instruction *R = reassociateForUses(BO&: I, Builder))
5146 return R;
5147
5148 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
5149 return Canonicalized;
5150
5151 if (Instruction *Folded = foldLogicOfIsFPClass(BO&: I, Op0, Op1))
5152 return Folded;
5153
5154 if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I))
5155 return Folded;
5156
5157 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
5158 return Res;
5159
5160 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
5161 return Res;
5162
5163 return nullptr;
5164}
5165