1//===- InstCombinePHI.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitPHINode function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/SmallPtrSet.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/InstructionSimplify.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/IR/PatternMatch.h"
20#include "llvm/Support/CommandLine.h"
21#include "llvm/Transforms/InstCombine/InstCombiner.h"
22#include "llvm/Transforms/Utils/Local.h"
23#include <optional>
24
25using namespace llvm;
26using namespace llvm::PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
30static cl::opt<unsigned>
31MaxNumPhis("instcombine-max-num-phis", cl::init(Val: 512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40/// The PHI arguments will be folded into a single operation with a PHI node
41/// as input. The debug location of the single operation will be the merged
42/// locations of the original PHI node arguments.
43void InstCombinerImpl::PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN) {
44 auto *FirstInst = cast<Instruction>(Val: PN.getIncomingValue(i: 0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
51 auto *I = cast<Instruction>(Val: V);
52 Inst->applyMergedLocation(LocA: Inst->getDebugLoc(), LocB: I->getDebugLoc());
53 }
54}
55
56/// If the phi is within a phi web, which is formed by the def-use chain
57/// of phis and all the phis in the web are only used in the other phis.
58/// In this case, these phis are dead and we will remove all of them.
59bool InstCombinerImpl::foldDeadPhiWeb(PHINode &PN) {
60 SmallVector<PHINode *, 16> Stack;
61 SmallPtrSet<PHINode *, 16> Visited;
62 Stack.push_back(Elt: &PN);
63 while (!Stack.empty()) {
64 PHINode *Phi = Stack.pop_back_val();
65 if (!Visited.insert(Ptr: Phi).second)
66 continue;
67 // Early stop if the set of PHIs is large
68 if (Visited.size() == 16)
69 return false;
70 for (User *Use : Phi->users()) {
71 if (PHINode *PhiUse = dyn_cast<PHINode>(Val: Use))
72 Stack.push_back(Elt: PhiUse);
73 else
74 return false;
75 }
76 }
77 for (PHINode *Phi : Visited)
78 replaceInstUsesWith(I&: *Phi, V: PoisonValue::get(T: Phi->getType()));
79 for (PHINode *Phi : Visited)
80 eraseInstFromFunction(I&: *Phi);
81 return true;
82}
83
84// Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
85// If there is an existing pointer typed PHI that produces the same value as PN,
86// replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
87// PHI node:
88//
89// Case-1:
90// bb1:
91// int_init = PtrToInt(ptr_init)
92// br label %bb2
93// bb2:
94// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
95// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
96// ptr_val2 = IntToPtr(int_val)
97// ...
98// use(ptr_val2)
99// ptr_val_inc = ...
100// inc_val_inc = PtrToInt(ptr_val_inc)
101//
102// ==>
103// bb1:
104// br label %bb2
105// bb2:
106// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
107// ...
108// use(ptr_val)
109// ptr_val_inc = ...
110//
111// Case-2:
112// bb1:
113// int_ptr = BitCast(ptr_ptr)
114// int_init = Load(int_ptr)
115// br label %bb2
116// bb2:
117// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
118// ptr_val2 = IntToPtr(int_val)
119// ...
120// use(ptr_val2)
121// ptr_val_inc = ...
122// inc_val_inc = PtrToInt(ptr_val_inc)
123// ==>
124// bb1:
125// ptr_init = Load(ptr_ptr)
126// br label %bb2
127// bb2:
128// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
129// ...
130// use(ptr_val)
131// ptr_val_inc = ...
132// ...
133//
134bool InstCombinerImpl::foldIntegerTypedPHI(PHINode &PN) {
135 if (!PN.getType()->isIntegerTy())
136 return false;
137 if (!PN.hasOneUse())
138 return false;
139
140 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val: PN.user_back());
141 if (!IntToPtr)
142 return false;
143
144 // Check if the pointer is actually used as pointer:
145 auto HasPointerUse = [](Instruction *IIP) {
146 for (User *U : IIP->users()) {
147 Value *Ptr = nullptr;
148 if (LoadInst *LoadI = dyn_cast<LoadInst>(Val: U)) {
149 Ptr = LoadI->getPointerOperand();
150 } else if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) {
151 Ptr = SI->getPointerOperand();
152 } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(Val: U)) {
153 Ptr = GI->getPointerOperand();
154 }
155
156 if (Ptr && Ptr == IIP)
157 return true;
158 }
159 return false;
160 };
161
162 if (!HasPointerUse(IntToPtr))
163 return false;
164
165 if (DL.getPointerSizeInBits(AS: IntToPtr->getAddressSpace()) !=
166 DL.getTypeSizeInBits(Ty: IntToPtr->getOperand(i_nocapture: 0)->getType()))
167 return false;
168
169 SmallVector<Value *, 4> AvailablePtrVals;
170 for (auto Incoming : zip(t: PN.blocks(), u: PN.incoming_values())) {
171 BasicBlock *BB = std::get<0>(t&: Incoming);
172 Value *Arg = std::get<1>(t&: Incoming);
173
174 // Arg could be a constant, constant expr, etc., which we don't cover here.
175 if (!isa<Instruction>(Val: Arg) && !isa<Argument>(Val: Arg))
176 return false;
177
178 // First look backward:
179 if (auto *PI = dyn_cast<PtrToIntInst>(Val: Arg)) {
180 AvailablePtrVals.emplace_back(Args: PI->getOperand(i_nocapture: 0));
181 continue;
182 }
183
184 // Next look forward:
185 Value *ArgIntToPtr = nullptr;
186 for (User *U : Arg->users()) {
187 if (isa<IntToPtrInst>(Val: U) && U->getType() == IntToPtr->getType() &&
188 (DT.dominates(Def: cast<Instruction>(Val: U), BB) ||
189 cast<Instruction>(Val: U)->getParent() == BB)) {
190 ArgIntToPtr = U;
191 break;
192 }
193 }
194
195 if (ArgIntToPtr) {
196 AvailablePtrVals.emplace_back(Args&: ArgIntToPtr);
197 continue;
198 }
199
200 // If Arg is defined by a PHI, allow it. This will also create
201 // more opportunities iteratively.
202 if (isa<PHINode>(Val: Arg)) {
203 AvailablePtrVals.emplace_back(Args&: Arg);
204 continue;
205 }
206
207 // For a single use integer load:
208 auto *LoadI = dyn_cast<LoadInst>(Val: Arg);
209 if (!LoadI)
210 return false;
211
212 if (!LoadI->hasOneUse())
213 return false;
214
215 // Push the integer typed Load instruction into the available
216 // value set, and fix it up later when the pointer typed PHI
217 // is synthesized.
218 AvailablePtrVals.emplace_back(Args&: LoadI);
219 }
220
221 // Now search for a matching PHI
222 auto *BB = PN.getParent();
223 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
224 "Not enough available ptr typed incoming values");
225 PHINode *MatchingPtrPHI = nullptr;
226 unsigned NumPhis = 0;
227 for (PHINode &PtrPHI : BB->phis()) {
228 // FIXME: consider handling this in AggressiveInstCombine
229 if (NumPhis++ > MaxNumPhis)
230 return false;
231 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
232 continue;
233 if (any_of(Range: zip(t: PN.blocks(), u&: AvailablePtrVals),
234 P: [&](const auto &BlockAndValue) {
235 BasicBlock *BB = std::get<0>(BlockAndValue);
236 Value *V = std::get<1>(BlockAndValue);
237 return PtrPHI.getIncomingValueForBlock(BB) != V;
238 }))
239 continue;
240 MatchingPtrPHI = &PtrPHI;
241 break;
242 }
243
244 if (MatchingPtrPHI) {
245 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
246 "Phi's Type does not match with IntToPtr");
247 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
248 // to make sure another transform can't undo it in the meantime.
249 replaceInstUsesWith(I&: *IntToPtr, V: MatchingPtrPHI);
250 eraseInstFromFunction(I&: *IntToPtr);
251 eraseInstFromFunction(I&: PN);
252 return true;
253 }
254
255 // If it requires a conversion for every PHI operand, do not do it.
256 if (all_of(Range&: AvailablePtrVals, P: [&](Value *V) {
257 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(Val: V);
258 }))
259 return false;
260
261 // If any of the operand that requires casting is a terminator
262 // instruction, do not do it. Similarly, do not do the transform if the value
263 // is PHI in a block with no insertion point, for example, a catchswitch
264 // block, since we will not be able to insert a cast after the PHI.
265 if (any_of(Range&: AvailablePtrVals, P: [&](Value *V) {
266 if (V->getType() == IntToPtr->getType())
267 return false;
268 auto *Inst = dyn_cast<Instruction>(Val: V);
269 if (!Inst)
270 return false;
271 if (Inst->isTerminator())
272 return true;
273 auto *BB = Inst->getParent();
274 if (isa<PHINode>(Val: Inst) && BB->getFirstInsertionPt() == BB->end())
275 return true;
276 return false;
277 }))
278 return false;
279
280 PHINode *NewPtrPHI = PHINode::Create(
281 Ty: IntToPtr->getType(), NumReservedValues: PN.getNumIncomingValues(), NameStr: PN.getName() + ".ptr");
282
283 InsertNewInstBefore(New: NewPtrPHI, Old: PN.getIterator());
284 SmallDenseMap<Value *, Instruction *> Casts;
285 for (auto Incoming : zip(t: PN.blocks(), u&: AvailablePtrVals)) {
286 auto *IncomingBB = std::get<0>(t&: Incoming);
287 auto *IncomingVal = std::get<1>(t&: Incoming);
288
289 if (IncomingVal->getType() == IntToPtr->getType()) {
290 NewPtrPHI->addIncoming(V: IncomingVal, BB: IncomingBB);
291 continue;
292 }
293
294#ifndef NDEBUG
295 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
296 assert((isa<PHINode>(IncomingVal) ||
297 IncomingVal->getType()->isPointerTy() ||
298 (LoadI && LoadI->hasOneUse())) &&
299 "Can not replace LoadInst with multiple uses");
300#endif
301 // Need to insert a BitCast.
302 // For an integer Load instruction with a single use, the load + IntToPtr
303 // cast will be simplified into a pointer load:
304 // %v = load i64, i64* %a.ip, align 8
305 // %v.cast = inttoptr i64 %v to float **
306 // ==>
307 // %v.ptrp = bitcast i64 * %a.ip to float **
308 // %v.cast = load float *, float ** %v.ptrp, align 8
309 Instruction *&CI = Casts[IncomingVal];
310 if (!CI) {
311 CI = CastInst::CreateBitOrPointerCast(S: IncomingVal, Ty: IntToPtr->getType(),
312 Name: IncomingVal->getName() + ".ptr");
313 if (auto *IncomingI = dyn_cast<Instruction>(Val: IncomingVal)) {
314 BasicBlock::iterator InsertPos(IncomingI);
315 InsertPos++;
316 BasicBlock *BB = IncomingI->getParent();
317 if (isa<PHINode>(Val: IncomingI))
318 InsertPos = BB->getFirstInsertionPt();
319 assert(InsertPos != BB->end() && "should have checked above");
320 InsertNewInstBefore(New: CI, Old: InsertPos);
321 } else {
322 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
323 InsertNewInstBefore(New: CI, Old: InsertBB->getFirstInsertionPt());
324 }
325 }
326 NewPtrPHI->addIncoming(V: CI, BB: IncomingBB);
327 }
328
329 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
330 // to make sure another transform can't undo it in the meantime.
331 replaceInstUsesWith(I&: *IntToPtr, V: NewPtrPHI);
332 eraseInstFromFunction(I&: *IntToPtr);
333 eraseInstFromFunction(I&: PN);
334 return true;
335}
336
337// Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
338// fold Phi-operand to bitcast.
339Instruction *InstCombinerImpl::foldPHIArgIntToPtrToPHI(PHINode &PN) {
340 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
341 // Make sure all uses of phi are ptr2int.
342 if (!all_of(Range: PN.users(), P: [](User *U) { return isa<PtrToIntInst>(Val: U); }))
343 return nullptr;
344
345 // Iterating over all operands to check presence of target pointers for
346 // optimization.
347 bool OperandWithRoundTripCast = false;
348 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
349 if (auto *NewOp =
350 simplifyIntToPtrRoundTripCast(Val: PN.getIncomingValue(i: OpNum))) {
351 replaceOperand(I&: PN, OpNum, V: NewOp);
352 OperandWithRoundTripCast = true;
353 }
354 }
355 if (!OperandWithRoundTripCast)
356 return nullptr;
357 return &PN;
358}
359
360/// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
361/// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
362Instruction *
363InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN) {
364 auto *FirstIVI = cast<InsertValueInst>(Val: PN.getIncomingValue(i: 0));
365
366 // Scan to see if all operands are `insertvalue`'s with the same indices,
367 // and all have a single use.
368 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
369 auto *I = dyn_cast<InsertValueInst>(Val: V);
370 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
371 return nullptr;
372 }
373
374 // For each operand of an `insertvalue`
375 std::array<PHINode *, 2> NewOperands;
376 for (int OpIdx : {0, 1}) {
377 auto *&NewOperand = NewOperands[OpIdx];
378 // Create a new PHI node to receive the values the operand has in each
379 // incoming basic block.
380 NewOperand = PHINode::Create(
381 Ty: FirstIVI->getOperand(i_nocapture: OpIdx)->getType(), NumReservedValues: PN.getNumIncomingValues(),
382 NameStr: FirstIVI->getOperand(i_nocapture: OpIdx)->getName() + ".pn");
383 // And populate each operand's PHI with said values.
384 for (auto Incoming : zip(t: PN.blocks(), u: PN.incoming_values()))
385 NewOperand->addIncoming(
386 V: cast<InsertValueInst>(Val&: std::get<1>(t&: Incoming))->getOperand(i_nocapture: OpIdx),
387 BB: std::get<0>(t&: Incoming));
388 InsertNewInstBefore(New: NewOperand, Old: PN.getIterator());
389 }
390
391 // And finally, create `insertvalue` over the newly-formed PHI nodes.
392 auto *NewIVI = InsertValueInst::Create(Agg: NewOperands[0], Val: NewOperands[1],
393 Idxs: FirstIVI->getIndices(), NameStr: PN.getName());
394
395 PHIArgMergedDebugLoc(Inst: NewIVI, PN);
396 ++NumPHIsOfInsertValues;
397 return NewIVI;
398}
399
400/// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
401/// turn this into a phi[a,b] and a single extractvalue.
402Instruction *
403InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN) {
404 auto *FirstEVI = cast<ExtractValueInst>(Val: PN.getIncomingValue(i: 0));
405
406 // Scan to see if all operands are `extractvalue`'s with the same indices,
407 // and all have a single use.
408 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
409 auto *I = dyn_cast<ExtractValueInst>(Val: V);
410 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
411 I->getAggregateOperand()->getType() !=
412 FirstEVI->getAggregateOperand()->getType())
413 return nullptr;
414 }
415
416 // Create a new PHI node to receive the values the aggregate operand has
417 // in each incoming basic block.
418 auto *NewAggregateOperand = PHINode::Create(
419 Ty: FirstEVI->getAggregateOperand()->getType(), NumReservedValues: PN.getNumIncomingValues(),
420 NameStr: FirstEVI->getAggregateOperand()->getName() + ".pn");
421 // And populate the PHI with said values.
422 for (auto Incoming : zip(t: PN.blocks(), u: PN.incoming_values()))
423 NewAggregateOperand->addIncoming(
424 V: cast<ExtractValueInst>(Val&: std::get<1>(t&: Incoming))->getAggregateOperand(),
425 BB: std::get<0>(t&: Incoming));
426 InsertNewInstBefore(New: NewAggregateOperand, Old: PN.getIterator());
427
428 // And finally, create `extractvalue` over the newly-formed PHI nodes.
429 auto *NewEVI = ExtractValueInst::Create(Agg: NewAggregateOperand,
430 Idxs: FirstEVI->getIndices(), NameStr: PN.getName());
431
432 PHIArgMergedDebugLoc(Inst: NewEVI, PN);
433 ++NumPHIsOfExtractValues;
434 return NewEVI;
435}
436
437/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
438/// adds all have a single user, turn this into a phi and a single binop.
439Instruction *InstCombinerImpl::foldPHIArgBinOpIntoPHI(PHINode &PN) {
440 Instruction *FirstInst = cast<Instruction>(Val: PN.getIncomingValue(i: 0));
441 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
442 unsigned Opc = FirstInst->getOpcode();
443 Value *LHSVal = FirstInst->getOperand(i: 0);
444 Value *RHSVal = FirstInst->getOperand(i: 1);
445
446 Type *LHSType = LHSVal->getType();
447 Type *RHSType = RHSVal->getType();
448
449 // Scan to see if all operands are the same opcode, and all have one user.
450 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
451 Instruction *I = dyn_cast<Instruction>(Val: V);
452 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
453 // Verify type of the LHS matches so we don't fold cmp's of different
454 // types.
455 I->getOperand(i: 0)->getType() != LHSType ||
456 I->getOperand(i: 1)->getType() != RHSType)
457 return nullptr;
458
459 // If they are CmpInst instructions, check their predicates
460 if (CmpInst *CI = dyn_cast<CmpInst>(Val: I))
461 if (CI->getPredicate() != cast<CmpInst>(Val: FirstInst)->getPredicate())
462 return nullptr;
463
464 // Keep track of which operand needs a phi node.
465 if (I->getOperand(i: 0) != LHSVal) LHSVal = nullptr;
466 if (I->getOperand(i: 1) != RHSVal) RHSVal = nullptr;
467 }
468
469 // If both LHS and RHS would need a PHI, don't do this transformation,
470 // because it would increase the number of PHIs entering the block,
471 // which leads to higher register pressure. This is especially
472 // bad when the PHIs are in the header of a loop.
473 if (!LHSVal && !RHSVal)
474 return nullptr;
475
476 // Otherwise, this is safe to transform!
477
478 Value *InLHS = FirstInst->getOperand(i: 0);
479 Value *InRHS = FirstInst->getOperand(i: 1);
480 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
481 if (!LHSVal) {
482 NewLHS = PHINode::Create(Ty: LHSType, NumReservedValues: PN.getNumIncomingValues(),
483 NameStr: FirstInst->getOperand(i: 0)->getName() + ".pn");
484 NewLHS->addIncoming(V: InLHS, BB: PN.getIncomingBlock(i: 0));
485 InsertNewInstBefore(New: NewLHS, Old: PN.getIterator());
486 LHSVal = NewLHS;
487 }
488
489 if (!RHSVal) {
490 NewRHS = PHINode::Create(Ty: RHSType, NumReservedValues: PN.getNumIncomingValues(),
491 NameStr: FirstInst->getOperand(i: 1)->getName() + ".pn");
492 NewRHS->addIncoming(V: InRHS, BB: PN.getIncomingBlock(i: 0));
493 InsertNewInstBefore(New: NewRHS, Old: PN.getIterator());
494 RHSVal = NewRHS;
495 }
496
497 // Add all operands to the new PHIs.
498 if (NewLHS || NewRHS) {
499 for (auto Incoming : drop_begin(RangeOrContainer: zip(t: PN.blocks(), u: PN.incoming_values()))) {
500 BasicBlock *InBB = std::get<0>(t&: Incoming);
501 Value *InVal = std::get<1>(t&: Incoming);
502 Instruction *InInst = cast<Instruction>(Val: InVal);
503 if (NewLHS) {
504 Value *NewInLHS = InInst->getOperand(i: 0);
505 NewLHS->addIncoming(V: NewInLHS, BB: InBB);
506 }
507 if (NewRHS) {
508 Value *NewInRHS = InInst->getOperand(i: 1);
509 NewRHS->addIncoming(V: NewInRHS, BB: InBB);
510 }
511 }
512 }
513
514 if (CmpInst *CIOp = dyn_cast<CmpInst>(Val: FirstInst)) {
515 CmpInst *NewCI = CmpInst::Create(Op: CIOp->getOpcode(), Pred: CIOp->getPredicate(),
516 S1: LHSVal, S2: RHSVal);
517 PHIArgMergedDebugLoc(Inst: NewCI, PN);
518 return NewCI;
519 }
520
521 BinaryOperator *BinOp = cast<BinaryOperator>(Val: FirstInst);
522 BinaryOperator *NewBinOp =
523 BinaryOperator::Create(Op: BinOp->getOpcode(), S1: LHSVal, S2: RHSVal);
524
525 NewBinOp->copyIRFlags(V: PN.getIncomingValue(i: 0));
526
527 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values()))
528 NewBinOp->andIRFlags(V);
529
530 PHIArgMergedDebugLoc(Inst: NewBinOp, PN);
531 return NewBinOp;
532}
533
534Instruction *InstCombinerImpl::foldPHIArgGEPIntoPHI(PHINode &PN) {
535 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(Val: PN.getIncomingValue(i: 0));
536
537 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
538 FirstInst->op_end());
539 // This is true if all GEP bases are allocas and if all indices into them are
540 // constants.
541 bool AllBasePointersAreAllocas = true;
542
543 // We don't want to replace this phi if the replacement would require
544 // more than one phi, which leads to higher register pressure. This is
545 // especially bad when the PHIs are in the header of a loop.
546 bool NeededPhi = false;
547
548 // Remember flags of the first phi-operand getelementptr.
549 GEPNoWrapFlags NW = FirstInst->getNoWrapFlags();
550
551 // Scan to see if all operands are the same opcode, and all have one user.
552 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
553 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: V);
554 if (!GEP || !GEP->hasOneUser() ||
555 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
556 GEP->getNumOperands() != FirstInst->getNumOperands())
557 return nullptr;
558
559 NW &= GEP->getNoWrapFlags();
560
561 // Keep track of whether or not all GEPs are of alloca pointers.
562 if (AllBasePointersAreAllocas &&
563 (!isa<AllocaInst>(Val: GEP->getOperand(i_nocapture: 0)) ||
564 !GEP->hasAllConstantIndices()))
565 AllBasePointersAreAllocas = false;
566
567 // Compare the operand lists.
568 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
569 if (FirstInst->getOperand(i_nocapture: Op) == GEP->getOperand(i_nocapture: Op))
570 continue;
571
572 // Don't merge two GEPs when two operands differ (introducing phi nodes)
573 // if one of the PHIs has a constant for the index. The index may be
574 // substantially cheaper to compute for the constants, so making it a
575 // variable index could pessimize the path. This also handles the case
576 // for struct indices, which must always be constant.
577 if (isa<Constant>(Val: FirstInst->getOperand(i_nocapture: Op)) ||
578 isa<Constant>(Val: GEP->getOperand(i_nocapture: Op)))
579 return nullptr;
580
581 if (FirstInst->getOperand(i_nocapture: Op)->getType() !=
582 GEP->getOperand(i_nocapture: Op)->getType())
583 return nullptr;
584
585 // If we already needed a PHI for an earlier operand, and another operand
586 // also requires a PHI, we'd be introducing more PHIs than we're
587 // eliminating, which increases register pressure on entry to the PHI's
588 // block.
589 if (NeededPhi)
590 return nullptr;
591
592 FixedOperands[Op] = nullptr; // Needs a PHI.
593 NeededPhi = true;
594 }
595 }
596
597 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
598 // bother doing this transformation. At best, this will just save a bit of
599 // offset calculation, but all the predecessors will have to materialize the
600 // stack address into a register anyway. We'd actually rather *clone* the
601 // load up into the predecessors so that we have a load of a gep of an alloca,
602 // which can usually all be folded into the load.
603 if (AllBasePointersAreAllocas)
604 return nullptr;
605
606 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
607 // that is variable.
608 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
609
610 bool HasAnyPHIs = false;
611 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
612 if (FixedOperands[I])
613 continue; // operand doesn't need a phi.
614 Value *FirstOp = FirstInst->getOperand(i_nocapture: I);
615 PHINode *NewPN =
616 PHINode::Create(Ty: FirstOp->getType(), NumReservedValues: E, NameStr: FirstOp->getName() + ".pn");
617 InsertNewInstBefore(New: NewPN, Old: PN.getIterator());
618
619 NewPN->addIncoming(V: FirstOp, BB: PN.getIncomingBlock(i: 0));
620 OperandPhis[I] = NewPN;
621 FixedOperands[I] = NewPN;
622 HasAnyPHIs = true;
623 }
624
625 // Add all operands to the new PHIs.
626 if (HasAnyPHIs) {
627 for (auto Incoming : drop_begin(RangeOrContainer: zip(t: PN.blocks(), u: PN.incoming_values()))) {
628 BasicBlock *InBB = std::get<0>(t&: Incoming);
629 Value *InVal = std::get<1>(t&: Incoming);
630 GetElementPtrInst *InGEP = cast<GetElementPtrInst>(Val: InVal);
631
632 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
633 if (PHINode *OpPhi = OperandPhis[Op])
634 OpPhi->addIncoming(V: InGEP->getOperand(i_nocapture: Op), BB: InBB);
635 }
636 }
637
638 Value *Base = FixedOperands[0];
639 GetElementPtrInst *NewGEP =
640 GetElementPtrInst::Create(PointeeType: FirstInst->getSourceElementType(), Ptr: Base,
641 IdxList: ArrayRef(FixedOperands).slice(N: 1), NW);
642 PHIArgMergedDebugLoc(Inst: NewGEP, PN);
643 return NewGEP;
644}
645
646/// Return true if we know that it is safe to sink the load out of the block
647/// that defines it. This means that it must be obvious the value of the load is
648/// not changed from the point of the load to the end of the block it is in.
649///
650/// Finally, it is safe, but not profitable, to sink a load targeting a
651/// non-address-taken alloca. Doing so will cause us to not promote the alloca
652/// to a register.
653static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
654 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
655
656 for (++BBI; BBI != E; ++BBI)
657 if (BBI->mayWriteToMemory()) {
658 // Calls that only access inaccessible memory do not block sinking the
659 // load.
660 if (auto *CB = dyn_cast<CallBase>(Val&: BBI))
661 if (CB->onlyAccessesInaccessibleMemory())
662 continue;
663 return false;
664 }
665
666 // Check for non-address taken alloca. If not address-taken already, it isn't
667 // profitable to do this xform.
668 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: L->getOperand(i_nocapture: 0))) {
669 bool IsAddressTaken = false;
670 for (User *U : AI->users()) {
671 if (isa<LoadInst>(Val: U)) continue;
672 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) {
673 // If storing TO the alloca, then the address isn't taken.
674 if (SI->getOperand(i_nocapture: 1) == AI) continue;
675 }
676 IsAddressTaken = true;
677 break;
678 }
679
680 if (!IsAddressTaken && AI->isStaticAlloca())
681 return false;
682 }
683
684 // If this load is a load from a GEP with a constant offset from an alloca,
685 // then we don't want to sink it. In its present form, it will be
686 // load [constant stack offset]. Sinking it will cause us to have to
687 // materialize the stack addresses in each predecessor in a register only to
688 // do a shared load from register in the successor.
689 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: L->getOperand(i_nocapture: 0)))
690 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: GEP->getOperand(i_nocapture: 0)))
691 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
692 return false;
693
694 return true;
695}
696
697Instruction *InstCombinerImpl::foldPHIArgLoadIntoPHI(PHINode &PN) {
698 LoadInst *FirstLI = cast<LoadInst>(Val: PN.getIncomingValue(i: 0));
699
700 // Can't forward swifterror through a phi.
701 if (FirstLI->getOperand(i_nocapture: 0)->isSwiftError())
702 return nullptr;
703
704 // FIXME: This is overconservative; this transform is allowed in some cases
705 // for atomic operations.
706 if (FirstLI->isAtomic())
707 return nullptr;
708
709 // When processing loads, we need to propagate two bits of information to the
710 // sunk load: whether it is volatile, and what its alignment is.
711 bool IsVolatile = FirstLI->isVolatile();
712 Align LoadAlignment = FirstLI->getAlign();
713 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
714
715 // We can't sink the load if the loaded value could be modified between the
716 // load and the PHI.
717 if (FirstLI->getParent() != PN.getIncomingBlock(i: 0) ||
718 !isSafeAndProfitableToSinkLoad(L: FirstLI))
719 return nullptr;
720
721 // If the PHI is of volatile loads and the load block has multiple
722 // successors, sinking it would remove a load of the volatile value from
723 // the path through the other successor.
724 if (IsVolatile &&
725 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
726 return nullptr;
727
728 for (auto Incoming : drop_begin(RangeOrContainer: zip(t: PN.blocks(), u: PN.incoming_values()))) {
729 BasicBlock *InBB = std::get<0>(t&: Incoming);
730 Value *InVal = std::get<1>(t&: Incoming);
731 LoadInst *LI = dyn_cast<LoadInst>(Val: InVal);
732 if (!LI || !LI->hasOneUser() || LI->isAtomic())
733 return nullptr;
734
735 // Make sure all arguments are the same type of operation.
736 if (LI->isVolatile() != IsVolatile ||
737 LI->getPointerAddressSpace() != LoadAddrSpace)
738 return nullptr;
739
740 // Can't forward swifterror through a phi.
741 if (LI->getOperand(i_nocapture: 0)->isSwiftError())
742 return nullptr;
743
744 // We can't sink the load if the loaded value could be modified between
745 // the load and the PHI.
746 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(L: LI))
747 return nullptr;
748
749 LoadAlignment = std::min(a: LoadAlignment, b: LI->getAlign());
750
751 // If the PHI is of volatile loads and the load block has multiple
752 // successors, sinking it would remove a load of the volatile value from
753 // the path through the other successor.
754 if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
755 return nullptr;
756 }
757
758 // Okay, they are all the same operation. Create a new PHI node of the
759 // correct type, and PHI together all of the LHS's of the instructions.
760 PHINode *NewPN = PHINode::Create(Ty: FirstLI->getOperand(i_nocapture: 0)->getType(),
761 NumReservedValues: PN.getNumIncomingValues(),
762 NameStr: PN.getName()+".in");
763
764 Value *InVal = FirstLI->getOperand(i_nocapture: 0);
765 NewPN->addIncoming(V: InVal, BB: PN.getIncomingBlock(i: 0));
766 LoadInst *NewLI =
767 new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
768 NewLI->copyMetadata(SrcInst: *FirstLI);
769
770 // Add all operands to the new PHI and combine TBAA metadata.
771 for (auto Incoming : drop_begin(RangeOrContainer: zip(t: PN.blocks(), u: PN.incoming_values()))) {
772 BasicBlock *BB = std::get<0>(t&: Incoming);
773 Value *V = std::get<1>(t&: Incoming);
774 LoadInst *LI = cast<LoadInst>(Val: V);
775 combineMetadataForCSE(K: NewLI, J: LI, DoesKMove: true);
776 Value *NewInVal = LI->getOperand(i_nocapture: 0);
777 if (NewInVal != InVal)
778 InVal = nullptr;
779 NewPN->addIncoming(V: NewInVal, BB);
780 }
781
782 if (InVal) {
783 // The new PHI unions all of the same values together. This is really
784 // common, so we handle it intelligently here for compile-time speed.
785 NewLI->setOperand(i_nocapture: 0, Val_nocapture: InVal);
786 delete NewPN;
787 } else {
788 InsertNewInstBefore(New: NewPN, Old: PN.getIterator());
789 }
790
791 // If this was a volatile load that we are merging, make sure to loop through
792 // and mark all the input loads as non-volatile. If we don't do this, we will
793 // insert a new volatile load and the old ones will not be deletable.
794 if (IsVolatile)
795 for (Value *IncValue : PN.incoming_values())
796 cast<LoadInst>(Val: IncValue)->setVolatile(false);
797
798 PHIArgMergedDebugLoc(Inst: NewLI, PN);
799 return NewLI;
800}
801
802/// TODO: This function could handle other cast types, but then it might
803/// require special-casing a cast from the 'i1' type. See the comment in
804/// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
805Instruction *InstCombinerImpl::foldPHIArgZextsIntoPHI(PHINode &Phi) {
806 // We cannot create a new instruction after the PHI if the terminator is an
807 // EHPad because there is no valid insertion point.
808 if (Instruction *TI = Phi.getParent()->getTerminator())
809 if (TI->isEHPad())
810 return nullptr;
811
812 // Early exit for the common case of a phi with two operands. These are
813 // handled elsewhere. See the comment below where we check the count of zexts
814 // and constants for more details.
815 unsigned NumIncomingValues = Phi.getNumIncomingValues();
816 if (NumIncomingValues < 3)
817 return nullptr;
818
819 // Find the narrower type specified by the first zext.
820 Type *NarrowType = nullptr;
821 for (Value *V : Phi.incoming_values()) {
822 if (auto *Zext = dyn_cast<ZExtInst>(Val: V)) {
823 NarrowType = Zext->getSrcTy();
824 break;
825 }
826 }
827 if (!NarrowType)
828 return nullptr;
829
830 // Walk the phi operands checking that we only have zexts or constants that
831 // we can shrink for free. Store the new operands for the new phi.
832 SmallVector<Value *, 4> NewIncoming;
833 unsigned NumZexts = 0;
834 unsigned NumConsts = 0;
835 for (Value *V : Phi.incoming_values()) {
836 if (auto *Zext = dyn_cast<ZExtInst>(Val: V)) {
837 // All zexts must be identical and have one user.
838 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
839 return nullptr;
840 NewIncoming.push_back(Elt: Zext->getOperand(i_nocapture: 0));
841 NumZexts++;
842 } else if (auto *C = dyn_cast<Constant>(Val: V)) {
843 // Make sure that constants can fit in the new type.
844 Constant *Trunc = getLosslessUnsignedTrunc(C, TruncTy: NarrowType);
845 if (!Trunc)
846 return nullptr;
847 NewIncoming.push_back(Elt: Trunc);
848 NumConsts++;
849 } else {
850 // If it's not a cast or a constant, bail out.
851 return nullptr;
852 }
853 }
854
855 // The more common cases of a phi with no constant operands or just one
856 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
857 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
858 // performed here. It tries to replicate a cast in the phi operand's basic
859 // block to expose other folding opportunities. Thus, InstCombine will
860 // infinite loop without this check.
861 if (NumConsts == 0 || NumZexts < 2)
862 return nullptr;
863
864 // All incoming values are zexts or constants that are safe to truncate.
865 // Create a new phi node of the narrow type, phi together all of the new
866 // operands, and zext the result back to the original type.
867 PHINode *NewPhi = PHINode::Create(Ty: NarrowType, NumReservedValues: NumIncomingValues,
868 NameStr: Phi.getName() + ".shrunk");
869 for (unsigned I = 0; I != NumIncomingValues; ++I)
870 NewPhi->addIncoming(V: NewIncoming[I], BB: Phi.getIncomingBlock(i: I));
871
872 InsertNewInstBefore(New: NewPhi, Old: Phi.getIterator());
873 auto *CI = CastInst::CreateZExtOrBitCast(S: NewPhi, Ty: Phi.getType());
874
875 // We use a dropped location here because the new ZExt is necessarily a merge
876 // of ZExtInsts and at least one constant from incoming branches; the presence
877 // of the constant means we have no viable DebugLoc from that branch, and
878 // therefore we must use a dropped location.
879 CI->setDebugLoc(DebugLoc::getDropped());
880 return CI;
881}
882
883/// If all operands to a PHI node are the same "unary" operator and they all are
884/// only used by the PHI, PHI together their inputs, and do the operation once,
885/// to the result of the PHI.
886Instruction *InstCombinerImpl::foldPHIArgOpIntoPHI(PHINode &PN) {
887 // We cannot create a new instruction after the PHI if the terminator is an
888 // EHPad because there is no valid insertion point.
889 if (Instruction *TI = PN.getParent()->getTerminator())
890 if (TI->isEHPad())
891 return nullptr;
892
893 Instruction *FirstInst = cast<Instruction>(Val: PN.getIncomingValue(i: 0));
894
895 if (isa<GetElementPtrInst>(Val: FirstInst))
896 return foldPHIArgGEPIntoPHI(PN);
897 if (isa<LoadInst>(Val: FirstInst))
898 return foldPHIArgLoadIntoPHI(PN);
899 if (isa<InsertValueInst>(Val: FirstInst))
900 return foldPHIArgInsertValueInstructionIntoPHI(PN);
901 if (isa<ExtractValueInst>(Val: FirstInst))
902 return foldPHIArgExtractValueInstructionIntoPHI(PN);
903
904 // Scan the instruction, looking for input operations that can be folded away.
905 // If all input operands to the phi are the same instruction (e.g. a cast from
906 // the same type or "+42") we can pull the operation through the PHI, reducing
907 // code size and simplifying code.
908 Constant *ConstantOp = nullptr;
909 Type *CastSrcTy = nullptr;
910
911 if (isa<CastInst>(Val: FirstInst)) {
912 CastSrcTy = FirstInst->getOperand(i: 0)->getType();
913
914 // Be careful about transforming integer PHIs. We don't want to pessimize
915 // the code by turning an i32 into an i1293.
916 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
917 if (!shouldChangeType(From: PN.getType(), To: CastSrcTy))
918 return nullptr;
919 }
920 } else if (isa<BinaryOperator>(Val: FirstInst) || isa<CmpInst>(Val: FirstInst)) {
921 // Can fold binop, compare or shift here if the RHS is a constant,
922 // otherwise call FoldPHIArgBinOpIntoPHI.
923 ConstantOp = dyn_cast<Constant>(Val: FirstInst->getOperand(i: 1));
924 if (!ConstantOp)
925 return foldPHIArgBinOpIntoPHI(PN);
926 } else {
927 return nullptr; // Cannot fold this operation.
928 }
929
930 // Check to see if all arguments are the same operation.
931 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values())) {
932 Instruction *I = dyn_cast<Instruction>(Val: V);
933 if (!I || !I->hasOneUser() || !I->isSameOperationAs(I: FirstInst))
934 return nullptr;
935 if (CastSrcTy) {
936 if (I->getOperand(i: 0)->getType() != CastSrcTy)
937 return nullptr; // Cast operation must match.
938 } else if (I->getOperand(i: 1) != ConstantOp) {
939 return nullptr;
940 }
941 }
942
943 // Okay, they are all the same operation. Create a new PHI node of the
944 // correct type, and PHI together all of the LHS's of the instructions.
945 PHINode *NewPN = PHINode::Create(Ty: FirstInst->getOperand(i: 0)->getType(),
946 NumReservedValues: PN.getNumIncomingValues(),
947 NameStr: PN.getName()+".in");
948
949 Value *InVal = FirstInst->getOperand(i: 0);
950 NewPN->addIncoming(V: InVal, BB: PN.getIncomingBlock(i: 0));
951
952 // Add all operands to the new PHI.
953 for (auto Incoming : drop_begin(RangeOrContainer: zip(t: PN.blocks(), u: PN.incoming_values()))) {
954 BasicBlock *BB = std::get<0>(t&: Incoming);
955 Value *V = std::get<1>(t&: Incoming);
956 Value *NewInVal = cast<Instruction>(Val: V)->getOperand(i: 0);
957 if (NewInVal != InVal)
958 InVal = nullptr;
959 NewPN->addIncoming(V: NewInVal, BB);
960 }
961
962 Value *PhiVal;
963 if (InVal) {
964 // The new PHI unions all of the same values together. This is really
965 // common, so we handle it intelligently here for compile-time speed.
966 PhiVal = InVal;
967 delete NewPN;
968 } else {
969 InsertNewInstBefore(New: NewPN, Old: PN.getIterator());
970 PhiVal = NewPN;
971 }
972
973 // Insert and return the new operation.
974 if (CastInst *FirstCI = dyn_cast<CastInst>(Val: FirstInst)) {
975 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), S: PhiVal,
976 Ty: PN.getType());
977 PHIArgMergedDebugLoc(Inst: NewCI, PN);
978 return NewCI;
979 }
980
981 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: FirstInst)) {
982 BinOp = BinaryOperator::Create(Op: BinOp->getOpcode(), S1: PhiVal, S2: ConstantOp);
983 BinOp->copyIRFlags(V: PN.getIncomingValue(i: 0));
984
985 for (Value *V : drop_begin(RangeOrContainer: PN.incoming_values()))
986 BinOp->andIRFlags(V);
987
988 PHIArgMergedDebugLoc(Inst: BinOp, PN);
989 return BinOp;
990 }
991
992 CmpInst *CIOp = cast<CmpInst>(Val: FirstInst);
993 CmpInst *NewCI = CmpInst::Create(Op: CIOp->getOpcode(), Pred: CIOp->getPredicate(),
994 S1: PhiVal, S2: ConstantOp);
995 PHIArgMergedDebugLoc(Inst: NewCI, PN);
996 return NewCI;
997}
998
999/// Return true if this phi node is always equal to NonPhiInVal.
1000/// This happens with mutually cyclic phi nodes like:
1001/// z = some value; x = phi (y, z); y = phi (x, z)
1002static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
1003 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
1004 // See if we already saw this PHI node.
1005 if (!ValueEqualPHIs.insert(Ptr: PN).second)
1006 return true;
1007
1008 // Don't scan crazily complex things.
1009 if (ValueEqualPHIs.size() == 16)
1010 return false;
1011
1012 // Scan the operands to see if they are either phi nodes or are equal to
1013 // the value.
1014 for (Value *Op : PN->incoming_values()) {
1015 if (PHINode *OpPN = dyn_cast<PHINode>(Val: Op)) {
1016 if (!PHIsEqualValue(PN: OpPN, NonPhiInVal, ValueEqualPHIs)) {
1017 if (NonPhiInVal)
1018 return false;
1019 NonPhiInVal = OpPN;
1020 }
1021 } else if (Op != NonPhiInVal)
1022 return false;
1023 }
1024
1025 return true;
1026}
1027
1028/// Return an existing non-zero constant if this phi node has one, otherwise
1029/// return constant 1.
1030static ConstantInt *getAnyNonZeroConstInt(PHINode &PN) {
1031 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1032 for (Value *V : PN.operands())
1033 if (auto *ConstVA = dyn_cast<ConstantInt>(Val: V))
1034 if (!ConstVA->isZero())
1035 return ConstVA;
1036 return ConstantInt::get(Ty: cast<IntegerType>(Val: PN.getType()), V: 1);
1037}
1038
1039namespace {
1040struct PHIUsageRecord {
1041 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1042 unsigned Shift; // The amount shifted.
1043 Instruction *Inst; // The trunc instruction.
1044
1045 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1046 : PHIId(Pn), Shift(Sh), Inst(User) {}
1047
1048 bool operator<(const PHIUsageRecord &RHS) const {
1049 if (PHIId < RHS.PHIId) return true;
1050 if (PHIId > RHS.PHIId) return false;
1051 if (Shift < RHS.Shift) return true;
1052 if (Shift > RHS.Shift) return false;
1053 return Inst->getType()->getPrimitiveSizeInBits() <
1054 RHS.Inst->getType()->getPrimitiveSizeInBits();
1055 }
1056};
1057
1058struct LoweredPHIRecord {
1059 PHINode *PN; // The PHI that was lowered.
1060 unsigned Shift; // The amount shifted.
1061 unsigned Width; // The width extracted.
1062
1063 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1064 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1065
1066 // Ctor form used by DenseMap.
1067 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1068};
1069} // namespace
1070
1071namespace llvm {
1072 template<>
1073 struct DenseMapInfo<LoweredPHIRecord> {
1074 static inline LoweredPHIRecord getEmptyKey() {
1075 return LoweredPHIRecord(nullptr, 0);
1076 }
1077 static inline LoweredPHIRecord getTombstoneKey() {
1078 return LoweredPHIRecord(nullptr, 1);
1079 }
1080 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1081 return DenseMapInfo<PHINode*>::getHashValue(PtrVal: Val.PN) ^ (Val.Shift>>3) ^
1082 (Val.Width>>3);
1083 }
1084 static bool isEqual(const LoweredPHIRecord &LHS,
1085 const LoweredPHIRecord &RHS) {
1086 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1087 LHS.Width == RHS.Width;
1088 }
1089 };
1090} // namespace llvm
1091
1092
1093/// This is an integer PHI and we know that it has an illegal type: see if it is
1094/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1095/// the various pieces being extracted. This sort of thing is introduced when
1096/// SROA promotes an aggregate to large integer values.
1097///
1098/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1099/// inttoptr. We should produce new PHIs in the right type.
1100///
1101Instruction *InstCombinerImpl::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
1102 // PHIUsers - Keep track of all of the truncated values extracted from a set
1103 // of PHIs, along with their offset. These are the things we want to rewrite.
1104 SmallVector<PHIUsageRecord, 16> PHIUsers;
1105
1106 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1107 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1108 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1109 // check the uses of (to ensure they are all extracts).
1110 SmallVector<PHINode*, 8> PHIsToSlice;
1111 SmallPtrSet<PHINode*, 8> PHIsInspected;
1112
1113 PHIsToSlice.push_back(Elt: &FirstPhi);
1114 PHIsInspected.insert(Ptr: &FirstPhi);
1115
1116 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1117 PHINode *PN = PHIsToSlice[PHIId];
1118
1119 // Scan the input list of the PHI. If any input is an invoke, and if the
1120 // input is defined in the predecessor, then we won't be split the critical
1121 // edge which is required to insert a truncate. Because of this, we have to
1122 // bail out.
1123 for (auto Incoming : zip(t: PN->blocks(), u: PN->incoming_values())) {
1124 BasicBlock *BB = std::get<0>(t&: Incoming);
1125 Value *V = std::get<1>(t&: Incoming);
1126 InvokeInst *II = dyn_cast<InvokeInst>(Val: V);
1127 if (!II)
1128 continue;
1129 if (II->getParent() != BB)
1130 continue;
1131
1132 // If we have a phi, and if it's directly in the predecessor, then we have
1133 // a critical edge where we need to put the truncate. Since we can't
1134 // split the edge in instcombine, we have to bail out.
1135 return nullptr;
1136 }
1137
1138 // If the incoming value is a PHI node before a catchswitch, we cannot
1139 // extract the value within that BB because we cannot insert any non-PHI
1140 // instructions in the BB.
1141 for (auto *Pred : PN->blocks())
1142 if (Pred->getFirstInsertionPt() == Pred->end())
1143 return nullptr;
1144
1145 for (User *U : PN->users()) {
1146 Instruction *UserI = cast<Instruction>(Val: U);
1147
1148 // If the user is a PHI, inspect its uses recursively.
1149 if (PHINode *UserPN = dyn_cast<PHINode>(Val: UserI)) {
1150 if (PHIsInspected.insert(Ptr: UserPN).second)
1151 PHIsToSlice.push_back(Elt: UserPN);
1152 continue;
1153 }
1154
1155 // Truncates are always ok.
1156 if (isa<TruncInst>(Val: UserI)) {
1157 PHIUsers.push_back(Elt: PHIUsageRecord(PHIId, 0, UserI));
1158 continue;
1159 }
1160
1161 // Otherwise it must be a lshr which can only be used by one trunc.
1162 if (UserI->getOpcode() != Instruction::LShr ||
1163 !UserI->hasOneUse() || !isa<TruncInst>(Val: UserI->user_back()) ||
1164 !isa<ConstantInt>(Val: UserI->getOperand(i: 1)))
1165 return nullptr;
1166
1167 // Bail on out of range shifts.
1168 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1169 if (cast<ConstantInt>(Val: UserI->getOperand(i: 1))->getValue().uge(RHS: SizeInBits))
1170 return nullptr;
1171
1172 unsigned Shift = cast<ConstantInt>(Val: UserI->getOperand(i: 1))->getZExtValue();
1173 PHIUsers.push_back(Elt: PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1174 }
1175 }
1176
1177 // If we have no users, they must be all self uses, just nuke the PHI.
1178 if (PHIUsers.empty())
1179 return replaceInstUsesWith(I&: FirstPhi, V: PoisonValue::get(T: FirstPhi.getType()));
1180
1181 // If this phi node is transformable, create new PHIs for all the pieces
1182 // extracted out of it. First, sort the users by their offset and size.
1183 array_pod_sort(Start: PHIUsers.begin(), End: PHIUsers.end());
1184
1185 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1186 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1187 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1188
1189 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1190 // hoisted out here to avoid construction/destruction thrashing.
1191 DenseMap<BasicBlock*, Value*> PredValues;
1192
1193 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1194 // introduce redundant PHIs.
1195 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
1196
1197 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1198 unsigned PHIId = PHIUsers[UserI].PHIId;
1199 PHINode *PN = PHIsToSlice[PHIId];
1200 unsigned Offset = PHIUsers[UserI].Shift;
1201 Type *Ty = PHIUsers[UserI].Inst->getType();
1202
1203 PHINode *EltPHI;
1204
1205 // If we've already lowered a user like this, reuse the previously lowered
1206 // value.
1207 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1208
1209 // Otherwise, Create the new PHI node for this user.
1210 EltPHI = PHINode::Create(Ty, NumReservedValues: PN->getNumIncomingValues(),
1211 NameStr: PN->getName() + ".off" + Twine(Offset),
1212 InsertBefore: PN->getIterator());
1213 assert(EltPHI->getType() != PN->getType() &&
1214 "Truncate didn't shrink phi?");
1215
1216 for (auto Incoming : zip(t: PN->blocks(), u: PN->incoming_values())) {
1217 BasicBlock *Pred = std::get<0>(t&: Incoming);
1218 Value *InVal = std::get<1>(t&: Incoming);
1219 Value *&PredVal = PredValues[Pred];
1220
1221 // If we already have a value for this predecessor, reuse it.
1222 if (PredVal) {
1223 EltPHI->addIncoming(V: PredVal, BB: Pred);
1224 continue;
1225 }
1226
1227 // Handle the PHI self-reuse case.
1228 if (InVal == PN) {
1229 PredVal = EltPHI;
1230 EltPHI->addIncoming(V: PredVal, BB: Pred);
1231 continue;
1232 }
1233
1234 if (PHINode *InPHI = dyn_cast<PHINode>(Val: PN)) {
1235 // If the incoming value was a PHI, and if it was one of the PHIs we
1236 // already rewrote it, just use the lowered value.
1237 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1238 PredVal = Res;
1239 EltPHI->addIncoming(V: PredVal, BB: Pred);
1240 continue;
1241 }
1242 }
1243
1244 // Otherwise, do an extract in the predecessor.
1245 Builder.SetInsertPoint(Pred->getTerminator());
1246 Value *Res = InVal;
1247 if (Offset)
1248 Res = Builder.CreateLShr(
1249 LHS: Res, RHS: ConstantInt::get(Ty: InVal->getType(), V: Offset), Name: "extract");
1250 Res = Builder.CreateTrunc(V: Res, DestTy: Ty, Name: "extract.t");
1251 PredVal = Res;
1252 EltPHI->addIncoming(V: Res, BB: Pred);
1253
1254 // If the incoming value was a PHI, and if it was one of the PHIs we are
1255 // rewriting, we will ultimately delete the code we inserted. This
1256 // means we need to revisit that PHI to make sure we extract out the
1257 // needed piece.
1258 if (PHINode *OldInVal = dyn_cast<PHINode>(Val: InVal))
1259 if (PHIsInspected.count(Ptr: OldInVal)) {
1260 unsigned RefPHIId =
1261 find(Range&: PHIsToSlice, Val: OldInVal) - PHIsToSlice.begin();
1262 PHIUsers.push_back(
1263 Elt: PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Val: Res)));
1264 ++UserE;
1265 }
1266 }
1267 PredValues.clear();
1268
1269 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1270 << *EltPHI << '\n');
1271 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1272 }
1273
1274 // Replace the use of this piece with the PHI node.
1275 replaceInstUsesWith(I&: *PHIUsers[UserI].Inst, V: EltPHI);
1276 }
1277
1278 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1279 // with poison.
1280 Value *Poison = PoisonValue::get(T: FirstPhi.getType());
1281 for (PHINode *PHI : drop_begin(RangeOrContainer&: PHIsToSlice))
1282 replaceInstUsesWith(I&: *PHI, V: Poison);
1283 return replaceInstUsesWith(I&: FirstPhi, V: Poison);
1284}
1285
1286static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
1287 const DominatorTree &DT) {
1288 // Simplify the following patterns:
1289 // if (cond)
1290 // / \
1291 // ... ...
1292 // \ /
1293 // phi [true] [false]
1294 // and
1295 // switch (cond)
1296 // case v1: / \ case v2:
1297 // ... ...
1298 // \ /
1299 // phi [v1] [v2]
1300 // Make sure all inputs are constants.
1301 if (!all_of(Range: PN.operands(), P: [](Value *V) { return isa<ConstantInt>(Val: V); }))
1302 return nullptr;
1303
1304 BasicBlock *BB = PN.getParent();
1305 // Do not bother with unreachable instructions.
1306 if (!DT.isReachableFromEntry(A: BB))
1307 return nullptr;
1308
1309 // Determine which value the condition of the idom has for which successor.
1310 LLVMContext &Context = PN.getContext();
1311 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1312 Value *Cond;
1313 SmallDenseMap<ConstantInt *, BasicBlock *, 8> SuccForValue;
1314 SmallDenseMap<BasicBlock *, unsigned, 8> SuccCount;
1315 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1316 SuccForValue[C] = Succ;
1317 ++SuccCount[Succ];
1318 };
1319 if (auto *BI = dyn_cast<BranchInst>(Val: IDom->getTerminator())) {
1320 if (BI->isUnconditional())
1321 return nullptr;
1322
1323 Cond = BI->getCondition();
1324 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(i: 0));
1325 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(i: 1));
1326 } else if (auto *SI = dyn_cast<SwitchInst>(Val: IDom->getTerminator())) {
1327 Cond = SI->getCondition();
1328 ++SuccCount[SI->getDefaultDest()];
1329 for (auto Case : SI->cases())
1330 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1331 } else {
1332 return nullptr;
1333 }
1334
1335 if (Cond->getType() != PN.getType())
1336 return nullptr;
1337
1338 // Check that edges outgoing from the idom's terminators dominate respective
1339 // inputs of the Phi.
1340 std::optional<bool> Invert;
1341 for (auto Pair : zip(t: PN.incoming_values(), u: PN.blocks())) {
1342 auto *Input = cast<ConstantInt>(Val&: std::get<0>(t&: Pair));
1343 BasicBlock *Pred = std::get<1>(t&: Pair);
1344 auto IsCorrectInput = [&](ConstantInt *Input) {
1345 // The input needs to be dominated by the corresponding edge of the idom.
1346 // This edge cannot be a multi-edge, as that would imply that multiple
1347 // different condition values follow the same edge.
1348 auto It = SuccForValue.find(Val: Input);
1349 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1350 DT.dominates(BBE1: BasicBlockEdge(IDom, It->second),
1351 BBE2: BasicBlockEdge(Pred, BB));
1352 };
1353
1354 // Depending on the constant, the condition may need to be inverted.
1355 bool NeedsInvert;
1356 if (IsCorrectInput(Input))
1357 NeedsInvert = false;
1358 else if (IsCorrectInput(cast<ConstantInt>(Val: ConstantExpr::getNot(C: Input))))
1359 NeedsInvert = true;
1360 else
1361 return nullptr;
1362
1363 // Make sure the inversion requirement is always the same.
1364 if (Invert && *Invert != NeedsInvert)
1365 return nullptr;
1366
1367 Invert = NeedsInvert;
1368 }
1369
1370 if (!*Invert)
1371 return Cond;
1372
1373 // This Phi is actually opposite to branching condition of IDom. We invert
1374 // the condition that will potentially open up some opportunities for
1375 // sinking.
1376 auto InsertPt = BB->getFirstInsertionPt();
1377 if (InsertPt != BB->end()) {
1378 Self.Builder.SetInsertPoint(TheBB: &*BB, IP: InsertPt);
1379 return Self.Builder.CreateNot(V: Cond);
1380 }
1381
1382 return nullptr;
1383}
1384
1385// Fold iv = phi(start, iv.next = iv2.next op start)
1386// where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1387// and iv2.start op start = start
1388// to iv = iv2 op start
1389static Value *foldDependentIVs(PHINode &PN, IRBuilderBase &Builder) {
1390 BasicBlock *BB = PN.getParent();
1391 if (PN.getNumIncomingValues() != 2)
1392 return nullptr;
1393
1394 Value *Start;
1395 Instruction *IvNext;
1396 BinaryOperator *Iv2Next;
1397 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1398 if (match(V: V2, P: m_c_BinOp(L: m_Specific(V: V1), R: m_BinOp(I&: Iv2Next))) ||
1399 match(V: V2, P: m_GEP(Ops: m_Specific(V: V1), Ops: m_BinOp(I&: Iv2Next)))) {
1400 Start = V1;
1401 IvNext = cast<Instruction>(Val: V2);
1402 return true;
1403 }
1404 return false;
1405 };
1406
1407 if (!MatchOuterIV(PN.getIncomingValue(i: 0), PN.getIncomingValue(i: 1)) &&
1408 !MatchOuterIV(PN.getIncomingValue(i: 1), PN.getIncomingValue(i: 0)))
1409 return nullptr;
1410
1411 PHINode *Iv2;
1412 Value *Iv2Start, *Iv2Step;
1413 if (!matchSimpleRecurrence(I: Iv2Next, P&: Iv2, Start&: Iv2Start, Step&: Iv2Step) ||
1414 Iv2->getParent() != BB)
1415 return nullptr;
1416
1417 auto *BO = dyn_cast<BinaryOperator>(Val: IvNext);
1418 Constant *Identity =
1419 BO ? ConstantExpr::getBinOpIdentity(Opcode: BO->getOpcode(), Ty: Iv2Start->getType())
1420 : Constant::getNullValue(Ty: Iv2Start->getType());
1421 if (Iv2Start != Identity)
1422 return nullptr;
1423
1424 Builder.SetInsertPoint(TheBB: &*BB, IP: BB->getFirstInsertionPt());
1425 if (!BO) {
1426 auto *GEP = cast<GEPOperator>(Val: IvNext);
1427 return Builder.CreateGEP(Ty: GEP->getSourceElementType(), Ptr: Start, IdxList: Iv2, Name: "",
1428 NW: cast<GEPOperator>(Val: IvNext)->getNoWrapFlags());
1429 }
1430
1431 assert(BO->isCommutative() && "Must be commutative");
1432 Value *Res = Builder.CreateBinOp(Opc: BO->getOpcode(), LHS: Iv2, RHS: Start);
1433 cast<Instruction>(Val: Res)->copyIRFlags(V: BO);
1434 return Res;
1435}
1436
1437// PHINode simplification
1438//
1439Instruction *InstCombinerImpl::visitPHINode(PHINode &PN) {
1440 if (Value *V = simplifyInstruction(I: &PN, Q: SQ.getWithInstruction(I: &PN)))
1441 return replaceInstUsesWith(I&: PN, V);
1442
1443 if (Instruction *Result = foldPHIArgZextsIntoPHI(Phi&: PN))
1444 return Result;
1445
1446 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1447 return Result;
1448
1449 // If all PHI operands are the same operation, pull them through the PHI,
1450 // reducing code size.
1451 auto *Inst0 = dyn_cast<Instruction>(Val: PN.getIncomingValue(i: 0));
1452 auto *Inst1 = dyn_cast<Instruction>(Val: PN.getIncomingValue(i: 1));
1453 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1454 Inst0->hasOneUser())
1455 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1456 return Result;
1457
1458 // If the incoming values are pointer casts of the same original value,
1459 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1460 if (PN.getType()->isPointerTy() &&
1461 PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1462 Value *IV0 = PN.getIncomingValue(i: 0);
1463 Value *IV0Stripped = IV0->stripPointerCasts();
1464 // Set to keep track of values known to be equal to IV0Stripped after
1465 // stripping pointer casts.
1466 SmallPtrSet<Value *, 4> CheckedIVs;
1467 CheckedIVs.insert(Ptr: IV0);
1468 if (IV0 != IV0Stripped &&
1469 all_of(Range: PN.incoming_values(), P: [&CheckedIVs, IV0Stripped](Value *IV) {
1470 return !CheckedIVs.insert(Ptr: IV).second ||
1471 IV0Stripped == IV->stripPointerCasts();
1472 })) {
1473 return CastInst::CreatePointerCast(S: IV0Stripped, Ty: PN.getType());
1474 }
1475 }
1476
1477 if (foldDeadPhiWeb(PN))
1478 return nullptr;
1479
1480 // Optimization when the phi only has one use
1481 if (PN.hasOneUse()) {
1482 if (foldIntegerTypedPHI(PN))
1483 return nullptr;
1484
1485 // If this phi has a single use, and if that use just computes a value for
1486 // the next iteration of a loop, delete the phi. This occurs with unused
1487 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1488 // common case here is good because the only other things that catch this
1489 // are induction variable analysis (sometimes) and ADCE, which is only run
1490 // late.
1491 Instruction *PHIUser = cast<Instruction>(Val: PN.user_back());
1492 if (PHIUser->hasOneUse() &&
1493 (isa<BinaryOperator>(Val: PHIUser) || isa<UnaryOperator>(Val: PHIUser) ||
1494 isa<GetElementPtrInst>(Val: PHIUser)) &&
1495 PHIUser->user_back() == &PN) {
1496 return replaceInstUsesWith(I&: PN, V: PoisonValue::get(T: PN.getType()));
1497 }
1498 }
1499
1500 // When a PHI is used only to be compared with zero, it is safe to replace
1501 // an incoming value proved as known nonzero with any non-zero constant.
1502 // For example, in the code below, the incoming value %v can be replaced
1503 // with any non-zero constant based on the fact that the PHI is only used to
1504 // be compared with zero and %v is a known non-zero value:
1505 // %v = select %cond, 1, 2
1506 // %p = phi [%v, BB] ...
1507 // icmp eq, %p, 0
1508 // FIXME: To be simple, handle only integer type for now.
1509 // This handles a small number of uses to keep the complexity down, and an
1510 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1511 // "or" will only add bits.
1512 if (!PN.hasNUsesOrMore(N: 3)) {
1513 SmallVector<Instruction *> DropPoisonFlags;
1514 bool AllUsesOfPhiEndsInCmp = all_of(Range: PN.users(), P: [&](User *U) {
1515 auto *CmpInst = dyn_cast<ICmpInst>(Val: U);
1516 if (!CmpInst) {
1517 // This is always correct as OR only add bits and we are checking
1518 // against 0.
1519 if (U->hasOneUse() && match(V: U, P: m_c_Or(L: m_Specific(V: &PN), R: m_Value()))) {
1520 DropPoisonFlags.push_back(Elt: cast<Instruction>(Val: U));
1521 CmpInst = dyn_cast<ICmpInst>(Val: U->user_back());
1522 }
1523 }
1524 if (!CmpInst || !isa<IntegerType>(Val: PN.getType()) ||
1525 !CmpInst->isEquality() || !match(V: CmpInst->getOperand(i_nocapture: 1), P: m_Zero())) {
1526 return false;
1527 }
1528 return true;
1529 });
1530 // All uses of PHI results in a compare with zero.
1531 if (AllUsesOfPhiEndsInCmp) {
1532 ConstantInt *NonZeroConst = nullptr;
1533 bool MadeChange = false;
1534 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1535 Instruction *CtxI = PN.getIncomingBlock(i: I)->getTerminator();
1536 Value *VA = PN.getIncomingValue(i: I);
1537 if (isKnownNonZero(V: VA, Q: getSimplifyQuery().getWithInstruction(I: CtxI))) {
1538 if (!NonZeroConst)
1539 NonZeroConst = getAnyNonZeroConstInt(PN);
1540 if (NonZeroConst != VA) {
1541 replaceOperand(I&: PN, OpNum: I, V: NonZeroConst);
1542 // The "disjoint" flag may no longer hold after the transform.
1543 for (Instruction *I : DropPoisonFlags)
1544 I->dropPoisonGeneratingFlags();
1545 MadeChange = true;
1546 }
1547 }
1548 }
1549 if (MadeChange)
1550 return &PN;
1551 }
1552 }
1553
1554 // We sometimes end up with phi cycles that non-obviously end up being the
1555 // same value, for example:
1556 // z = some value; x = phi (y, z); y = phi (x, z)
1557 // where the phi nodes don't necessarily need to be in the same block. Do a
1558 // quick check to see if the PHI node only contains a single non-phi value, if
1559 // so, scan to see if the phi cycle is actually equal to that value. If the
1560 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1561 // one of the phis itself does not have a single input.
1562 {
1563 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1564 // Scan for the first non-phi operand.
1565 while (InValNo != NumIncomingVals &&
1566 isa<PHINode>(Val: PN.getIncomingValue(i: InValNo)))
1567 ++InValNo;
1568
1569 Value *NonPhiInVal =
1570 InValNo != NumIncomingVals ? PN.getIncomingValue(i: InValNo) : nullptr;
1571
1572 // Scan the rest of the operands to see if there are any conflicts, if so
1573 // there is no need to recursively scan other phis.
1574 if (NonPhiInVal)
1575 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1576 Value *OpVal = PN.getIncomingValue(i: InValNo);
1577 if (OpVal != NonPhiInVal && !isa<PHINode>(Val: OpVal))
1578 break;
1579 }
1580
1581 // If we scanned over all operands, then we have one unique value plus
1582 // phi values. Scan PHI nodes to see if they all merge in each other or
1583 // the value.
1584 if (InValNo == NumIncomingVals) {
1585 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1586 if (PHIsEqualValue(PN: &PN, NonPhiInVal, ValueEqualPHIs))
1587 return replaceInstUsesWith(I&: PN, V: NonPhiInVal);
1588 }
1589 }
1590
1591 // If there are multiple PHIs, sort their operands so that they all list
1592 // the blocks in the same order. This will help identical PHIs be eliminated
1593 // by other passes. Other passes shouldn't depend on this for correctness
1594 // however.
1595 auto Res = PredOrder.try_emplace(Key: PN.getParent());
1596 if (!Res.second) {
1597 const auto &Preds = Res.first->second;
1598 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1599 BasicBlock *BBA = PN.getIncomingBlock(i: I);
1600 BasicBlock *BBB = Preds[I];
1601 if (BBA != BBB) {
1602 Value *VA = PN.getIncomingValue(i: I);
1603 unsigned J = PN.getBasicBlockIndex(BB: BBB);
1604 Value *VB = PN.getIncomingValue(i: J);
1605 PN.setIncomingBlock(i: I, BB: BBB);
1606 PN.setIncomingValue(i: I, V: VB);
1607 PN.setIncomingBlock(i: J, BB: BBA);
1608 PN.setIncomingValue(i: J, V: VA);
1609 // NOTE: Instcombine normally would want us to "return &PN" if we
1610 // modified any of the operands of an instruction. However, since we
1611 // aren't adding or removing uses (just rearranging them) we don't do
1612 // this in this case.
1613 }
1614 }
1615 } else {
1616 // Remember the block order of the first encountered phi node.
1617 append_range(C&: Res.first->second, R: PN.blocks());
1618 }
1619
1620 // Is there an identical PHI node in this basic block?
1621 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1622 // Ignore the PHI node itself.
1623 if (&IdenticalPN == &PN)
1624 continue;
1625 // Note that even though we've just canonicalized this PHI, due to the
1626 // worklist visitation order, there are no guarantess that *every* PHI
1627 // has been canonicalized, so we can't just compare operands ranges.
1628 if (!PN.isIdenticalToWhenDefined(I: &IdenticalPN))
1629 continue;
1630 // Just use that PHI instead then.
1631 ++NumPHICSEs;
1632 return replaceInstUsesWith(I&: PN, V: &IdenticalPN);
1633 }
1634
1635 // If this is an integer PHI and we know that it has an illegal type, see if
1636 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1637 // PHI into the various pieces being extracted. This sort of thing is
1638 // introduced when SROA promotes an aggregate to a single large integer type.
1639 if (PN.getType()->isIntegerTy() &&
1640 !DL.isLegalInteger(Width: PN.getType()->getPrimitiveSizeInBits()))
1641 if (Instruction *Res = SliceUpIllegalIntegerPHI(FirstPhi&: PN))
1642 return Res;
1643
1644 // Ultimately, try to replace this Phi with a dominating condition.
1645 if (auto *V = simplifyUsingControlFlow(Self&: *this, PN, DT))
1646 return replaceInstUsesWith(I&: PN, V);
1647
1648 if (Value *Res = foldDependentIVs(PN, Builder))
1649 return replaceInstUsesWith(I&: PN, V: Res);
1650
1651 return nullptr;
1652}
1653