1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/Analysis/Loads.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/IntrinsicInst.h"
21#include "llvm/IR/LLVMContext.h"
22#include "llvm/IR/PatternMatch.h"
23#include "llvm/Transforms/InstCombine/InstCombiner.h"
24#include "llvm/Transforms/Utils/Local.h"
25using namespace llvm;
26using namespace PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
30namespace llvm {
31extern cl::opt<bool> ProfcheckDisableMetadataFixes;
32}
33
34STATISTIC(NumDeadStore, "Number of dead stores eliminated");
35STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
36
37static cl::opt<unsigned> MaxCopiedFromConstantUsers(
38 "instcombine-max-copied-from-constant-users", cl::init(Val: 300),
39 cl::desc("Maximum users to visit in copy from constant transform"),
40 cl::Hidden);
41
42/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
43/// pointer to an alloca. Ignore any reads of the pointer, return false if we
44/// see any stores or other unknown uses. If we see pointer arithmetic, keep
45/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
46/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
47/// the alloca, and if the source pointer is a pointer to a constant memory
48/// location, we can optimize this.
49static bool
50isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
51 MemTransferInst *&TheCopy,
52 SmallVectorImpl<Instruction *> &ToDelete) {
53 // We track lifetime intrinsics as we encounter them. If we decide to go
54 // ahead and replace the value with the memory location, this lets the caller
55 // quickly eliminate the markers.
56
57 using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
58 SmallVector<ValueAndIsOffset, 32> Worklist;
59 SmallPtrSet<ValueAndIsOffset, 32> Visited;
60 Worklist.emplace_back(Args&: V, Args: false);
61 while (!Worklist.empty()) {
62 ValueAndIsOffset Elem = Worklist.pop_back_val();
63 if (!Visited.insert(Ptr: Elem).second)
64 continue;
65 if (Visited.size() > MaxCopiedFromConstantUsers)
66 return false;
67
68 const auto [Value, IsOffset] = Elem;
69 for (auto &U : Value->uses()) {
70 auto *I = cast<Instruction>(Val: U.getUser());
71
72 if (auto *LI = dyn_cast<LoadInst>(Val: I)) {
73 // Ignore non-volatile loads, they are always ok.
74 if (!LI->isSimple()) return false;
75 continue;
76 }
77
78 if (isa<PHINode, SelectInst>(Val: I)) {
79 // We set IsOffset=true, to forbid the memcpy from occurring after the
80 // phi: If one of the phi operands is not based on the alloca, we
81 // would incorrectly omit a write.
82 Worklist.emplace_back(Args&: I, Args: true);
83 continue;
84 }
85 if (isa<BitCastInst, AddrSpaceCastInst>(Val: I)) {
86 // If uses of the bitcast are ok, we are ok.
87 Worklist.emplace_back(Args&: I, Args: IsOffset);
88 continue;
89 }
90 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I)) {
91 // If the GEP has all zero indices, it doesn't offset the pointer. If it
92 // doesn't, it does.
93 Worklist.emplace_back(Args&: I, Args: IsOffset || !GEP->hasAllZeroIndices());
94 continue;
95 }
96
97 if (auto *Call = dyn_cast<CallBase>(Val: I)) {
98 // If this is the function being called then we treat it like a load and
99 // ignore it.
100 if (Call->isCallee(U: &U))
101 continue;
102
103 unsigned DataOpNo = Call->getDataOperandNo(U: &U);
104 bool IsArgOperand = Call->isArgOperand(U: &U);
105
106 // Inalloca arguments are clobbered by the call.
107 if (IsArgOperand && Call->isInAllocaArgument(ArgNo: DataOpNo))
108 return false;
109
110 // If this call site doesn't modify the memory, then we know it is just
111 // a load (but one that potentially returns the value itself), so we can
112 // ignore it if we know that the value isn't captured.
113 bool NoCapture = Call->doesNotCapture(OpNo: DataOpNo);
114 if (NoCapture &&
115 (Call->onlyReadsMemory() || Call->onlyReadsMemory(OpNo: DataOpNo)))
116 continue;
117 }
118
119 // Lifetime intrinsics can be handled by the caller.
120 if (I->isLifetimeStartOrEnd()) {
121 assert(I->use_empty() && "Lifetime markers have no result to use!");
122 ToDelete.push_back(Elt: I);
123 continue;
124 }
125
126 // If this is isn't our memcpy/memmove, reject it as something we can't
127 // handle.
128 MemTransferInst *MI = dyn_cast<MemTransferInst>(Val: I);
129 if (!MI)
130 return false;
131
132 // If the transfer is volatile, reject it.
133 if (MI->isVolatile())
134 return false;
135
136 // If the transfer is using the alloca as a source of the transfer, then
137 // ignore it since it is a load (unless the transfer is volatile).
138 if (U.getOperandNo() == 1)
139 continue;
140
141 // If we already have seen a copy, reject the second one.
142 if (TheCopy) return false;
143
144 // If the pointer has been offset from the start of the alloca, we can't
145 // safely handle this.
146 if (IsOffset) return false;
147
148 // If the memintrinsic isn't using the alloca as the dest, reject it.
149 if (U.getOperandNo() != 0) return false;
150
151 // If the source of the memcpy/move is not constant, reject it.
152 if (isModSet(MRI: AA->getModRefInfoMask(P: MI->getSource())))
153 return false;
154
155 // Otherwise, the transform is safe. Remember the copy instruction.
156 TheCopy = MI;
157 }
158 }
159 return true;
160}
161
162/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
163/// modified by a copy from a constant memory location. If we can prove this, we
164/// can replace any uses of the alloca with uses of the memory location
165/// directly.
166static MemTransferInst *
167isOnlyCopiedFromConstantMemory(AAResults *AA,
168 AllocaInst *AI,
169 SmallVectorImpl<Instruction *> &ToDelete) {
170 MemTransferInst *TheCopy = nullptr;
171 if (isOnlyCopiedFromConstantMemory(AA, V: AI, TheCopy, ToDelete))
172 return TheCopy;
173 return nullptr;
174}
175
176/// Returns true if V is dereferenceable for size of alloca.
177static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
178 const DataLayout &DL) {
179 std::optional<TypeSize> AllocaSize = AI->getAllocationSize(DL);
180 if (!AllocaSize || AllocaSize->isScalable())
181 return false;
182 return isDereferenceableAndAlignedPointer(V, Alignment: AI->getAlign(),
183 Size: APInt(64, *AllocaSize), DL);
184}
185
186static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
187 AllocaInst &AI, DominatorTree &DT) {
188 // Check for array size of 1 (scalar allocation).
189 if (!AI.isArrayAllocation()) {
190 // i32 1 is the canonical array size for scalar allocations.
191 if (AI.getArraySize()->getType()->isIntegerTy(Bitwidth: 32))
192 return nullptr;
193
194 // Canonicalize it.
195 return IC.replaceOperand(I&: AI, OpNum: 0, V: IC.Builder.getInt32(C: 1));
196 }
197
198 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
199 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: AI.getArraySize())) {
200 if (C->getValue().getActiveBits() <= 64) {
201 Type *NewTy = ArrayType::get(ElementType: AI.getAllocatedType(), NumElements: C->getZExtValue());
202 AllocaInst *New = IC.Builder.CreateAlloca(Ty: NewTy, AddrSpace: AI.getAddressSpace(),
203 ArraySize: nullptr, Name: AI.getName());
204 New->setAlignment(AI.getAlign());
205 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
206
207 replaceAllDbgUsesWith(From&: AI, To&: *New, DomPoint&: *New, DT);
208 return IC.replaceInstUsesWith(I&: AI, V: New);
209 }
210 }
211
212 if (isa<UndefValue>(Val: AI.getArraySize()))
213 return IC.replaceInstUsesWith(I&: AI, V: Constant::getNullValue(Ty: AI.getType()));
214
215 // Ensure that the alloca array size argument has type equal to the offset
216 // size of the alloca() pointer, which, in the tyical case, is intptr_t,
217 // so that any casting is exposed early.
218 Type *PtrIdxTy = IC.getDataLayout().getIndexType(PtrTy: AI.getType());
219 if (AI.getArraySize()->getType() != PtrIdxTy) {
220 Value *V = IC.Builder.CreateIntCast(V: AI.getArraySize(), DestTy: PtrIdxTy, isSigned: false);
221 return IC.replaceOperand(I&: AI, OpNum: 0, V);
222 }
223
224 return nullptr;
225}
226
227namespace {
228// If I and V are pointers in different address space, it is not allowed to
229// use replaceAllUsesWith since I and V have different types. A
230// non-target-specific transformation should not use addrspacecast on V since
231// the two address space may be disjoint depending on target.
232//
233// This class chases down uses of the old pointer until reaching the load
234// instructions, then replaces the old pointer in the load instructions with
235// the new pointer. If during the chasing it sees bitcast or GEP, it will
236// create new bitcast or GEP with the new pointer and use them in the load
237// instruction.
238class PointerReplacer {
239public:
240 PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)
241 : IC(IC), Root(Root), FromAS(SrcAS) {}
242
243 bool collectUsers();
244 void replacePointer(Value *V);
245
246private:
247 void replace(Instruction *I);
248 Value *getReplacement(Value *V) const { return WorkMap.lookup(Key: V); }
249 bool isAvailable(Instruction *I) const {
250 return I == &Root || UsersToReplace.contains(key: I);
251 }
252
253 bool isEqualOrValidAddrSpaceCast(const Instruction *I,
254 unsigned FromAS) const {
255 const auto *ASC = dyn_cast<AddrSpaceCastInst>(Val: I);
256 if (!ASC)
257 return false;
258 unsigned ToAS = ASC->getDestAddressSpace();
259 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
260 }
261
262 SmallSetVector<Instruction *, 32> UsersToReplace;
263 MapVector<Value *, Value *> WorkMap;
264 InstCombinerImpl &IC;
265 Instruction &Root;
266 unsigned FromAS;
267};
268} // end anonymous namespace
269
270bool PointerReplacer::collectUsers() {
271 SmallVector<Instruction *> Worklist;
272 SmallSetVector<Instruction *, 32> ValuesToRevisit;
273
274 auto PushUsersToWorklist = [&](Instruction *Inst) {
275 for (auto *U : Inst->users())
276 if (auto *I = dyn_cast<Instruction>(Val: U))
277 if (!isAvailable(I) && !ValuesToRevisit.contains(key: I))
278 Worklist.emplace_back(Args&: I);
279 };
280
281 auto TryPushInstOperand = [&](Instruction *InstOp) {
282 if (!UsersToReplace.contains(key: InstOp)) {
283 if (!ValuesToRevisit.insert(X: InstOp))
284 return false;
285 Worklist.emplace_back(Args&: InstOp);
286 }
287 return true;
288 };
289
290 PushUsersToWorklist(&Root);
291 while (!Worklist.empty()) {
292 Instruction *Inst = Worklist.pop_back_val();
293 if (auto *Load = dyn_cast<LoadInst>(Val: Inst)) {
294 if (Load->isVolatile())
295 return false;
296 UsersToReplace.insert(X: Load);
297 } else if (auto *PHI = dyn_cast<PHINode>(Val: Inst)) {
298 /// TODO: Handle poison and null pointers for PHI and select.
299 // If all incoming values are available, mark this PHI as
300 // replacable and push it's users into the worklist.
301 bool IsReplaceable = all_of(Range: PHI->incoming_values(),
302 P: [](Value *V) { return isa<Instruction>(Val: V); });
303 if (IsReplaceable && all_of(Range: PHI->incoming_values(), P: [&](Value *V) {
304 return isAvailable(I: cast<Instruction>(Val: V));
305 })) {
306 UsersToReplace.insert(X: PHI);
307 PushUsersToWorklist(PHI);
308 continue;
309 }
310
311 // Either an incoming value is not an instruction or not all
312 // incoming values are available. If this PHI was already
313 // visited prior to this iteration, return false.
314 if (!IsReplaceable || !ValuesToRevisit.insert(X: PHI))
315 return false;
316
317 // Push PHI back into the stack, followed by unavailable
318 // incoming values.
319 Worklist.emplace_back(Args&: PHI);
320 for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {
321 if (!TryPushInstOperand(cast<Instruction>(Val: PHI->getIncomingValue(i: Idx))))
322 return false;
323 }
324 } else if (auto *SI = dyn_cast<SelectInst>(Val: Inst)) {
325 auto *TrueInst = dyn_cast<Instruction>(Val: SI->getTrueValue());
326 auto *FalseInst = dyn_cast<Instruction>(Val: SI->getFalseValue());
327 if (!TrueInst || !FalseInst)
328 return false;
329
330 if (isAvailable(I: TrueInst) && isAvailable(I: FalseInst)) {
331 UsersToReplace.insert(X: SI);
332 PushUsersToWorklist(SI);
333 continue;
334 }
335
336 // Push select back onto the stack, followed by unavailable true/false
337 // value.
338 Worklist.emplace_back(Args&: SI);
339 if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst))
340 return false;
341 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: Inst)) {
342 auto *PtrOp = dyn_cast<Instruction>(Val: GEP->getPointerOperand());
343 if (!PtrOp)
344 return false;
345 if (isAvailable(I: PtrOp)) {
346 UsersToReplace.insert(X: GEP);
347 PushUsersToWorklist(GEP);
348 continue;
349 }
350
351 Worklist.emplace_back(Args&: GEP);
352 if (!TryPushInstOperand(PtrOp))
353 return false;
354 } else if (auto *MI = dyn_cast<MemTransferInst>(Val: Inst)) {
355 if (MI->isVolatile())
356 return false;
357 UsersToReplace.insert(X: Inst);
358 } else if (isEqualOrValidAddrSpaceCast(I: Inst, FromAS)) {
359 UsersToReplace.insert(X: Inst);
360 PushUsersToWorklist(Inst);
361 } else if (Inst->isLifetimeStartOrEnd()) {
362 continue;
363 } else {
364 // TODO: For arbitrary uses with address space mismatches, should we check
365 // if we can introduce a valid addrspacecast?
366 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');
367 return false;
368 }
369 }
370
371 return true;
372}
373
374void PointerReplacer::replacePointer(Value *V) {
375 assert(cast<PointerType>(Root.getType()) != cast<PointerType>(V->getType()) &&
376 "Invalid usage");
377 WorkMap[&Root] = V;
378 SmallVector<Instruction *> Worklist;
379 SetVector<Instruction *> PostOrderWorklist;
380 SmallPtrSet<Instruction *, 32> Visited;
381
382 // Perform a postorder traversal of the users of Root.
383 Worklist.push_back(Elt: &Root);
384 while (!Worklist.empty()) {
385 Instruction *I = Worklist.back();
386
387 // If I has not been processed before, push each of its
388 // replacable users into the worklist.
389 if (Visited.insert(Ptr: I).second) {
390 for (auto *U : I->users()) {
391 auto *UserInst = cast<Instruction>(Val: U);
392 if (UsersToReplace.contains(key: UserInst) && !Visited.contains(Ptr: UserInst))
393 Worklist.push_back(Elt: UserInst);
394 }
395 // Otherwise, users of I have already been pushed into
396 // the PostOrderWorklist. Push I as well.
397 } else {
398 PostOrderWorklist.insert(X: I);
399 Worklist.pop_back();
400 }
401 }
402
403 // Replace pointers in reverse-postorder.
404 for (Instruction *I : reverse(C&: PostOrderWorklist))
405 replace(I);
406}
407
408void PointerReplacer::replace(Instruction *I) {
409 if (getReplacement(V: I))
410 return;
411
412 if (auto *LT = dyn_cast<LoadInst>(Val: I)) {
413 auto *V = getReplacement(V: LT->getPointerOperand());
414 assert(V && "Operand not replaced");
415 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
416 LT->getAlign(), LT->getOrdering(),
417 LT->getSyncScopeID());
418 NewI->takeName(V: LT);
419 copyMetadataForLoad(Dest&: *NewI, Source: *LT);
420
421 IC.InsertNewInstWith(New: NewI, Old: LT->getIterator());
422 IC.replaceInstUsesWith(I&: *LT, V: NewI);
423 // LT has actually been replaced by NewI. It is useless to insert LT into
424 // the map. Instead, we insert NewI into the map to indicate this is the
425 // replacement (new value).
426 WorkMap[NewI] = NewI;
427 } else if (auto *PHI = dyn_cast<PHINode>(Val: I)) {
428 // Create a new PHI by replacing any incoming value that is a user of the
429 // root pointer and has a replacement.
430 Value *V = WorkMap.lookup(Key: PHI->getIncomingValue(i: 0));
431 PHI->mutateType(Ty: V ? V->getType() : PHI->getIncomingValue(i: 0)->getType());
432 for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {
433 Value *V = WorkMap.lookup(Key: PHI->getIncomingValue(i: I));
434 PHI->setIncomingValue(i: I, V: V ? V : PHI->getIncomingValue(i: I));
435 }
436 WorkMap[PHI] = PHI;
437 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I)) {
438 auto *V = getReplacement(V: GEP->getPointerOperand());
439 assert(V && "Operand not replaced");
440 SmallVector<Value *, 8> Indices(GEP->indices());
441 auto *NewI =
442 GetElementPtrInst::Create(PointeeType: GEP->getSourceElementType(), Ptr: V, IdxList: Indices);
443 IC.InsertNewInstWith(New: NewI, Old: GEP->getIterator());
444 NewI->takeName(V: GEP);
445 NewI->setNoWrapFlags(GEP->getNoWrapFlags());
446 WorkMap[GEP] = NewI;
447 } else if (auto *SI = dyn_cast<SelectInst>(Val: I)) {
448 Value *TrueValue = SI->getTrueValue();
449 Value *FalseValue = SI->getFalseValue();
450 if (Value *Replacement = getReplacement(V: TrueValue))
451 TrueValue = Replacement;
452 if (Value *Replacement = getReplacement(V: FalseValue))
453 FalseValue = Replacement;
454 auto *NewSI = SelectInst::Create(C: SI->getCondition(), S1: TrueValue, S2: FalseValue,
455 NameStr: SI->getName(), InsertBefore: nullptr, MDFrom: SI);
456 IC.InsertNewInstWith(New: NewSI, Old: SI->getIterator());
457 NewSI->takeName(V: SI);
458 WorkMap[SI] = NewSI;
459 } else if (auto *MemCpy = dyn_cast<MemTransferInst>(Val: I)) {
460 auto *DestV = MemCpy->getRawDest();
461 auto *SrcV = MemCpy->getRawSource();
462
463 if (auto *DestReplace = getReplacement(V: DestV))
464 DestV = DestReplace;
465 if (auto *SrcReplace = getReplacement(V: SrcV))
466 SrcV = SrcReplace;
467
468 IC.Builder.SetInsertPoint(MemCpy);
469 auto *NewI = IC.Builder.CreateMemTransferInst(
470 IntrID: MemCpy->getIntrinsicID(), Dst: DestV, DstAlign: MemCpy->getDestAlign(), Src: SrcV,
471 SrcAlign: MemCpy->getSourceAlign(), Size: MemCpy->getLength(), isVolatile: MemCpy->isVolatile());
472 AAMDNodes AAMD = MemCpy->getAAMetadata();
473 if (AAMD)
474 NewI->setAAMetadata(AAMD);
475
476 IC.eraseInstFromFunction(I&: *MemCpy);
477 WorkMap[MemCpy] = NewI;
478 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(Val: I)) {
479 auto *V = getReplacement(V: ASC->getPointerOperand());
480 assert(V && "Operand not replaced");
481 assert(isEqualOrValidAddrSpaceCast(
482 ASC, V->getType()->getPointerAddressSpace()) &&
483 "Invalid address space cast!");
484
485 if (V->getType()->getPointerAddressSpace() !=
486 ASC->getType()->getPointerAddressSpace()) {
487 auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");
488 NewI->takeName(V: ASC);
489 IC.InsertNewInstWith(New: NewI, Old: ASC->getIterator());
490 WorkMap[ASC] = NewI;
491 } else {
492 WorkMap[ASC] = V;
493 }
494
495 } else {
496 llvm_unreachable("should never reach here");
497 }
498}
499
500Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
501 if (auto *I = simplifyAllocaArraySize(IC&: *this, AI, DT))
502 return I;
503
504 // Move all alloca's of zero byte objects to the entry block and merge them
505 // together. Note that we only do this for alloca's, because malloc should
506 // allocate and return a unique pointer, even for a zero byte allocation.
507 std::optional<TypeSize> Size = AI.getAllocationSize(DL);
508 if (Size && Size->isZero()) {
509 // For a zero sized alloca there is no point in doing an array allocation.
510 // This is helpful if the array size is a complicated expression not used
511 // elsewhere.
512 if (AI.isArrayAllocation())
513 return replaceOperand(I&: AI, OpNum: 0,
514 V: ConstantInt::get(Ty: AI.getArraySize()->getType(), V: 1));
515
516 // Get the first instruction in the entry block.
517 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
518 BasicBlock::iterator FirstInst = EntryBlock.getFirstNonPHIOrDbg();
519 if (&*FirstInst != &AI) {
520 // If the entry block doesn't start with a zero-size alloca then move
521 // this one to the start of the entry block. There is no problem with
522 // dominance as the array size was forced to a constant earlier already.
523 AllocaInst *EntryAI = dyn_cast<AllocaInst>(Val&: FirstInst);
524 std::optional<TypeSize> EntryAISize =
525 EntryAI ? EntryAI->getAllocationSize(DL) : std::nullopt;
526 if (!EntryAISize || !EntryAISize->isZero()) {
527 AI.moveBefore(InsertPos: FirstInst);
528 return &AI;
529 }
530
531 // Replace this zero-sized alloca with the one at the start of the entry
532 // block after ensuring that the address will be aligned enough for both
533 // types.
534 const Align MaxAlign = std::max(a: EntryAI->getAlign(), b: AI.getAlign());
535 EntryAI->setAlignment(MaxAlign);
536 return replaceInstUsesWith(I&: AI, V: EntryAI);
537 }
538 }
539
540 // Check to see if this allocation is only modified by a memcpy/memmove from
541 // a memory location whose alignment is equal to or exceeds that of the
542 // allocation. If this is the case, we can change all users to use the
543 // constant memory location instead. This is commonly produced by the CFE by
544 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
545 // is only subsequently read.
546 SmallVector<Instruction *, 4> ToDelete;
547 if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, AI: &AI, ToDelete)) {
548 Value *TheSrc = Copy->getSource();
549 Align AllocaAlign = AI.getAlign();
550 Align SourceAlign = getOrEnforceKnownAlignment(
551 V: TheSrc, PrefAlign: AllocaAlign, DL, CxtI: &AI, AC: &AC, DT: &DT);
552 if (AllocaAlign <= SourceAlign &&
553 isDereferenceableForAllocaSize(V: TheSrc, AI: &AI, DL) &&
554 !isa<Instruction>(Val: TheSrc)) {
555 // FIXME: Can we sink instructions without violating dominance when TheSrc
556 // is an instruction instead of a constant or argument?
557 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
558 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
559 unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
560 if (AI.getAddressSpace() == SrcAddrSpace) {
561 for (Instruction *Delete : ToDelete)
562 eraseInstFromFunction(I&: *Delete);
563
564 Instruction *NewI = replaceInstUsesWith(I&: AI, V: TheSrc);
565 eraseInstFromFunction(I&: *Copy);
566 ++NumGlobalCopies;
567 return NewI;
568 }
569
570 PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);
571 if (PtrReplacer.collectUsers()) {
572 for (Instruction *Delete : ToDelete)
573 eraseInstFromFunction(I&: *Delete);
574
575 PtrReplacer.replacePointer(V: TheSrc);
576 ++NumGlobalCopies;
577 }
578 }
579 }
580
581 // At last, use the generic allocation site handler to aggressively remove
582 // unused allocas.
583 return visitAllocSite(FI&: AI);
584}
585
586// Are we allowed to form a atomic load or store of this type?
587static bool isSupportedAtomicType(Type *Ty) {
588 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
589}
590
591/// Helper to combine a load to a new type.
592///
593/// This just does the work of combining a load to a new type. It handles
594/// metadata, etc., and returns the new instruction. The \c NewTy should be the
595/// loaded *value* type. This will convert it to a pointer, cast the operand to
596/// that pointer type, load it, etc.
597///
598/// Note that this will create all of the instructions with whatever insert
599/// point the \c InstCombinerImpl currently is using.
600LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
601 const Twine &Suffix) {
602 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
603 "can't fold an atomic load to requested type");
604
605 LoadInst *NewLoad =
606 Builder.CreateAlignedLoad(Ty: NewTy, Ptr: LI.getPointerOperand(), Align: LI.getAlign(),
607 isVolatile: LI.isVolatile(), Name: LI.getName() + Suffix);
608 NewLoad->setAtomic(Ordering: LI.getOrdering(), SSID: LI.getSyncScopeID());
609 copyMetadataForLoad(Dest&: *NewLoad, Source: LI);
610 return NewLoad;
611}
612
613/// Combine a store to a new type.
614///
615/// Returns the newly created store instruction.
616static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
617 Value *V) {
618 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
619 "can't fold an atomic store of requested type");
620
621 Value *Ptr = SI.getPointerOperand();
622 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
623 SI.getAllMetadata(MDs&: MD);
624
625 StoreInst *NewStore =
626 IC.Builder.CreateAlignedStore(Val: V, Ptr, Align: SI.getAlign(), isVolatile: SI.isVolatile());
627 NewStore->setAtomic(Ordering: SI.getOrdering(), SSID: SI.getSyncScopeID());
628 for (const auto &MDPair : MD) {
629 unsigned ID = MDPair.first;
630 MDNode *N = MDPair.second;
631 // Note, essentially every kind of metadata should be preserved here! This
632 // routine is supposed to clone a store instruction changing *only its
633 // type*. The only metadata it makes sense to drop is metadata which is
634 // invalidated when the pointer type changes. This should essentially
635 // never be the case in LLVM, but we explicitly switch over only known
636 // metadata to be conservatively correct. If you are adding metadata to
637 // LLVM which pertains to stores, you almost certainly want to add it
638 // here.
639 switch (ID) {
640 case LLVMContext::MD_dbg:
641 case LLVMContext::MD_DIAssignID:
642 case LLVMContext::MD_tbaa:
643 case LLVMContext::MD_prof:
644 case LLVMContext::MD_fpmath:
645 case LLVMContext::MD_tbaa_struct:
646 case LLVMContext::MD_alias_scope:
647 case LLVMContext::MD_noalias:
648 case LLVMContext::MD_nontemporal:
649 case LLVMContext::MD_mem_parallel_loop_access:
650 case LLVMContext::MD_access_group:
651 // All of these directly apply.
652 NewStore->setMetadata(KindID: ID, Node: N);
653 break;
654 case LLVMContext::MD_invariant_load:
655 case LLVMContext::MD_nonnull:
656 case LLVMContext::MD_noundef:
657 case LLVMContext::MD_range:
658 case LLVMContext::MD_align:
659 case LLVMContext::MD_dereferenceable:
660 case LLVMContext::MD_dereferenceable_or_null:
661 // These don't apply for stores.
662 break;
663 }
664 }
665
666 return NewStore;
667}
668
669/// Combine loads to match the type of their uses' value after looking
670/// through intervening bitcasts.
671///
672/// The core idea here is that if the result of a load is used in an operation,
673/// we should load the type most conducive to that operation. For example, when
674/// loading an integer and converting that immediately to a pointer, we should
675/// instead directly load a pointer.
676///
677/// However, this routine must never change the width of a load or the number of
678/// loads as that would introduce a semantic change. This combine is expected to
679/// be a semantic no-op which just allows loads to more closely model the types
680/// of their consuming operations.
681///
682/// Currently, we also refuse to change the precise type used for an atomic load
683/// or a volatile load. This is debatable, and might be reasonable to change
684/// later. However, it is risky in case some backend or other part of LLVM is
685/// relying on the exact type loaded to select appropriate atomic operations.
686static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
687 LoadInst &Load) {
688 // FIXME: We could probably with some care handle both volatile and ordered
689 // atomic loads here but it isn't clear that this is important.
690 if (!Load.isUnordered())
691 return nullptr;
692
693 if (Load.use_empty())
694 return nullptr;
695
696 // swifterror values can't be bitcasted.
697 if (Load.getPointerOperand()->isSwiftError())
698 return nullptr;
699
700 // Fold away bit casts of the loaded value by loading the desired type.
701 // Note that we should not do this for pointer<->integer casts,
702 // because that would result in type punning.
703 if (Load.hasOneUse()) {
704 // Don't transform when the type is x86_amx, it makes the pass that lower
705 // x86_amx type happy.
706 Type *LoadTy = Load.getType();
707 if (auto *BC = dyn_cast<BitCastInst>(Val: Load.user_back())) {
708 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
709 if (BC->getType()->isX86_AMXTy())
710 return nullptr;
711 }
712
713 if (auto *CastUser = dyn_cast<CastInst>(Val: Load.user_back())) {
714 Type *DestTy = CastUser->getDestTy();
715 if (CastUser->isNoopCast(DL: IC.getDataLayout()) &&
716 LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
717 (!Load.isAtomic() || isSupportedAtomicType(Ty: DestTy))) {
718 LoadInst *NewLoad = IC.combineLoadToNewType(LI&: Load, NewTy: DestTy);
719 CastUser->replaceAllUsesWith(V: NewLoad);
720 IC.eraseInstFromFunction(I&: *CastUser);
721 return &Load;
722 }
723 }
724 }
725
726 // FIXME: We should also canonicalize loads of vectors when their elements are
727 // cast to other types.
728 return nullptr;
729}
730
731static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
732 // FIXME: We could probably with some care handle both volatile and atomic
733 // stores here but it isn't clear that this is important.
734 if (!LI.isSimple())
735 return nullptr;
736
737 Type *T = LI.getType();
738 if (!T->isAggregateType())
739 return nullptr;
740
741 StringRef Name = LI.getName();
742
743 if (auto *ST = dyn_cast<StructType>(Val: T)) {
744 // If the struct only have one element, we unpack.
745 auto NumElements = ST->getNumElements();
746 if (NumElements == 1) {
747 LoadInst *NewLoad = IC.combineLoadToNewType(LI, NewTy: ST->getTypeAtIndex(N: 0U),
748 Suffix: ".unpack");
749 NewLoad->setAAMetadata(LI.getAAMetadata());
750 // Copy invariant metadata from parent load.
751 NewLoad->copyMetadata(SrcInst: LI, WL: LLVMContext::MD_invariant_load);
752 return IC.replaceInstUsesWith(I&: LI, V: IC.Builder.CreateInsertValue(
753 Agg: PoisonValue::get(T), Val: NewLoad, Idxs: 0, Name));
754 }
755
756 // We don't want to break loads with padding here as we'd loose
757 // the knowledge that padding exists for the rest of the pipeline.
758 const DataLayout &DL = IC.getDataLayout();
759 auto *SL = DL.getStructLayout(Ty: ST);
760
761 if (SL->hasPadding())
762 return nullptr;
763
764 const auto Align = LI.getAlign();
765 auto *Addr = LI.getPointerOperand();
766 auto *IdxType = DL.getIndexType(PtrTy: Addr->getType());
767
768 Value *V = PoisonValue::get(T);
769 for (unsigned i = 0; i < NumElements; i++) {
770 auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
771 Ptr: Addr, Offset: IC.Builder.CreateTypeSize(Ty: IdxType, Size: SL->getElementOffset(Idx: i)),
772 Name: Name + ".elt");
773 auto *L = IC.Builder.CreateAlignedLoad(
774 Ty: ST->getElementType(N: i), Ptr,
775 Align: commonAlignment(A: Align, Offset: SL->getElementOffset(Idx: i).getKnownMinValue()),
776 Name: Name + ".unpack");
777 // Propagate AA metadata. It'll still be valid on the narrowed load.
778 L->setAAMetadata(LI.getAAMetadata());
779 // Copy invariant metadata from parent load.
780 L->copyMetadata(SrcInst: LI, WL: LLVMContext::MD_invariant_load);
781 V = IC.Builder.CreateInsertValue(Agg: V, Val: L, Idxs: i);
782 }
783
784 V->setName(Name);
785 return IC.replaceInstUsesWith(I&: LI, V);
786 }
787
788 if (auto *AT = dyn_cast<ArrayType>(Val: T)) {
789 auto *ET = AT->getElementType();
790 auto NumElements = AT->getNumElements();
791 if (NumElements == 1) {
792 LoadInst *NewLoad = IC.combineLoadToNewType(LI, NewTy: ET, Suffix: ".unpack");
793 NewLoad->setAAMetadata(LI.getAAMetadata());
794 return IC.replaceInstUsesWith(I&: LI, V: IC.Builder.CreateInsertValue(
795 Agg: PoisonValue::get(T), Val: NewLoad, Idxs: 0, Name));
796 }
797
798 // Bail out if the array is too large. Ideally we would like to optimize
799 // arrays of arbitrary size but this has a terrible impact on compile time.
800 // The threshold here is chosen arbitrarily, maybe needs a little bit of
801 // tuning.
802 if (NumElements > IC.MaxArraySizeForCombine)
803 return nullptr;
804
805 const DataLayout &DL = IC.getDataLayout();
806 TypeSize EltSize = DL.getTypeAllocSize(Ty: ET);
807 const auto Align = LI.getAlign();
808
809 auto *Addr = LI.getPointerOperand();
810 auto *IdxType = Type::getInt64Ty(C&: T->getContext());
811 auto *Zero = ConstantInt::get(Ty: IdxType, V: 0);
812
813 Value *V = PoisonValue::get(T);
814 TypeSize Offset = TypeSize::getZero();
815 for (uint64_t i = 0; i < NumElements; i++) {
816 Value *Indices[2] = {
817 Zero,
818 ConstantInt::get(Ty: IdxType, V: i),
819 };
820 auto *Ptr = IC.Builder.CreateInBoundsGEP(Ty: AT, Ptr: Addr, IdxList: ArrayRef(Indices),
821 Name: Name + ".elt");
822 auto EltAlign = commonAlignment(A: Align, Offset: Offset.getKnownMinValue());
823 auto *L = IC.Builder.CreateAlignedLoad(Ty: AT->getElementType(), Ptr,
824 Align: EltAlign, Name: Name + ".unpack");
825 L->setAAMetadata(LI.getAAMetadata());
826 V = IC.Builder.CreateInsertValue(Agg: V, Val: L, Idxs: i);
827 Offset += EltSize;
828 }
829
830 V->setName(Name);
831 return IC.replaceInstUsesWith(I&: LI, V);
832 }
833
834 return nullptr;
835}
836
837// If we can determine that all possible objects pointed to by the provided
838// pointer value are, not only dereferenceable, but also definitively less than
839// or equal to the provided maximum size, then return true. Otherwise, return
840// false (constant global values and allocas fall into this category).
841//
842// FIXME: This should probably live in ValueTracking (or similar).
843static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
844 const DataLayout &DL) {
845 SmallPtrSet<Value *, 4> Visited;
846 SmallVector<Value *, 4> Worklist(1, V);
847
848 do {
849 Value *P = Worklist.pop_back_val();
850 P = P->stripPointerCasts();
851
852 if (!Visited.insert(Ptr: P).second)
853 continue;
854
855 if (SelectInst *SI = dyn_cast<SelectInst>(Val: P)) {
856 Worklist.push_back(Elt: SI->getTrueValue());
857 Worklist.push_back(Elt: SI->getFalseValue());
858 continue;
859 }
860
861 if (PHINode *PN = dyn_cast<PHINode>(Val: P)) {
862 append_range(C&: Worklist, R: PN->incoming_values());
863 continue;
864 }
865
866 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: P)) {
867 if (GA->isInterposable())
868 return false;
869 Worklist.push_back(Elt: GA->getAliasee());
870 continue;
871 }
872
873 // If we know how big this object is, and it is less than MaxSize, continue
874 // searching. Otherwise, return false.
875 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: P)) {
876 std::optional<TypeSize> AllocSize = AI->getAllocationSize(DL);
877 if (!AllocSize || AllocSize->isScalable() ||
878 AllocSize->getFixedValue() > MaxSize)
879 return false;
880 continue;
881 }
882
883 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Val: P)) {
884 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
885 return false;
886
887 uint64_t InitSize = GV->getGlobalSize(DL);
888 if (InitSize > MaxSize)
889 return false;
890 continue;
891 }
892
893 return false;
894 } while (!Worklist.empty());
895
896 return true;
897}
898
899// If we're indexing into an object of a known size, and the outer index is
900// not a constant, but having any value but zero would lead to undefined
901// behavior, replace it with zero.
902//
903// For example, if we have:
904// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
905// ...
906// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
907// ... = load i32* %arrayidx, align 4
908// Then we know that we can replace %x in the GEP with i64 0.
909//
910// FIXME: We could fold any GEP index to zero that would cause UB if it were
911// not zero. Currently, we only handle the first such index. Also, we could
912// also search through non-zero constant indices if we kept track of the
913// offsets those indices implied.
914static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
915 GetElementPtrInst *GEPI, Instruction *MemI,
916 unsigned &Idx) {
917 if (GEPI->getNumOperands() < 2)
918 return false;
919
920 // Find the first non-zero index of a GEP. If all indices are zero, return
921 // one past the last index.
922 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
923 unsigned I = 1;
924 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
925 Value *V = GEPI->getOperand(i_nocapture: I);
926 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val: V))
927 if (CI->isZero())
928 continue;
929
930 break;
931 }
932
933 return I;
934 };
935
936 // Skip through initial 'zero' indices, and find the corresponding pointer
937 // type. See if the next index is not a constant.
938 Idx = FirstNZIdx(GEPI);
939 if (Idx == GEPI->getNumOperands())
940 return false;
941 if (isa<Constant>(Val: GEPI->getOperand(i_nocapture: Idx)))
942 return false;
943
944 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
945 Type *SourceElementType = GEPI->getSourceElementType();
946 // Size information about scalable vectors is not available, so we cannot
947 // deduce whether indexing at n is undefined behaviour or not. Bail out.
948 if (SourceElementType->isScalableTy())
949 return false;
950
951 Type *AllocTy = GetElementPtrInst::getIndexedType(Ty: SourceElementType, IdxList: Ops);
952 if (!AllocTy || !AllocTy->isSized())
953 return false;
954 const DataLayout &DL = IC.getDataLayout();
955 uint64_t TyAllocSize = DL.getTypeAllocSize(Ty: AllocTy).getFixedValue();
956
957 // If there are more indices after the one we might replace with a zero, make
958 // sure they're all non-negative. If any of them are negative, the overall
959 // address being computed might be before the base address determined by the
960 // first non-zero index.
961 auto IsAllNonNegative = [&]() {
962 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
963 KnownBits Known = IC.computeKnownBits(V: GEPI->getOperand(i_nocapture: i), CxtI: MemI);
964 if (Known.isNonNegative())
965 continue;
966 return false;
967 }
968
969 return true;
970 };
971
972 // FIXME: If the GEP is not inbounds, and there are extra indices after the
973 // one we'll replace, those could cause the address computation to wrap
974 // (rendering the IsAllNonNegative() check below insufficient). We can do
975 // better, ignoring zero indices (and other indices we can prove small
976 // enough not to wrap).
977 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
978 return false;
979
980 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
981 // also known to be dereferenceable.
982 return isObjectSizeLessThanOrEq(V: GEPI->getOperand(i_nocapture: 0), MaxSize: TyAllocSize, DL) &&
983 IsAllNonNegative();
984}
985
986// If we're indexing into an object with a variable index for the memory
987// access, but the object has only one element, we can assume that the index
988// will always be zero. If we replace the GEP, return it.
989static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
990 Instruction &MemI) {
991 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Val: Ptr)) {
992 unsigned Idx;
993 if (canReplaceGEPIdxWithZero(IC, GEPI, MemI: &MemI, Idx)) {
994 Instruction *NewGEPI = GEPI->clone();
995 NewGEPI->setOperand(i: Idx,
996 Val: ConstantInt::get(Ty: GEPI->getOperand(i_nocapture: Idx)->getType(), V: 0));
997 IC.InsertNewInstBefore(New: NewGEPI, Old: GEPI->getIterator());
998 return NewGEPI;
999 }
1000 }
1001
1002 return nullptr;
1003}
1004
1005static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
1006 if (NullPointerIsDefined(F: SI.getFunction(), AS: SI.getPointerAddressSpace()))
1007 return false;
1008
1009 auto *Ptr = SI.getPointerOperand();
1010 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Val: Ptr))
1011 Ptr = GEPI->getOperand(i_nocapture: 0);
1012 return (isa<ConstantPointerNull>(Val: Ptr) &&
1013 !NullPointerIsDefined(F: SI.getFunction(), AS: SI.getPointerAddressSpace()));
1014}
1015
1016static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
1017 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Val: Op)) {
1018 const Value *GEPI0 = GEPI->getOperand(i_nocapture: 0);
1019 if (isa<ConstantPointerNull>(Val: GEPI0) &&
1020 !NullPointerIsDefined(F: LI.getFunction(), AS: GEPI->getPointerAddressSpace()))
1021 return true;
1022 }
1023 if (isa<UndefValue>(Val: Op) ||
1024 (isa<ConstantPointerNull>(Val: Op) &&
1025 !NullPointerIsDefined(F: LI.getFunction(), AS: LI.getPointerAddressSpace())))
1026 return true;
1027 return false;
1028}
1029
1030Value *InstCombinerImpl::simplifyNonNullOperand(Value *V,
1031 bool HasDereferenceable,
1032 unsigned Depth) {
1033 if (auto *Sel = dyn_cast<SelectInst>(Val: V)) {
1034 if (isa<ConstantPointerNull>(Val: Sel->getOperand(i_nocapture: 1)))
1035 return Sel->getOperand(i_nocapture: 2);
1036
1037 if (isa<ConstantPointerNull>(Val: Sel->getOperand(i_nocapture: 2)))
1038 return Sel->getOperand(i_nocapture: 1);
1039 }
1040
1041 if (!V->hasOneUse())
1042 return nullptr;
1043
1044 constexpr unsigned RecursionLimit = 3;
1045 if (Depth == RecursionLimit)
1046 return nullptr;
1047
1048 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: V)) {
1049 if (HasDereferenceable || GEP->isInBounds()) {
1050 if (auto *Res = simplifyNonNullOperand(V: GEP->getPointerOperand(),
1051 HasDereferenceable, Depth: Depth + 1)) {
1052 replaceOperand(I&: *GEP, OpNum: 0, V: Res);
1053 addToWorklist(I: GEP);
1054 return nullptr;
1055 }
1056 }
1057 }
1058
1059 if (auto *PHI = dyn_cast<PHINode>(Val: V)) {
1060 bool Changed = false;
1061 for (Use &U : PHI->incoming_values()) {
1062 // We set Depth to RecursionLimit to avoid expensive recursion.
1063 if (auto *Res = simplifyNonNullOperand(V: U.get(), HasDereferenceable,
1064 Depth: RecursionLimit)) {
1065 replaceUse(U, NewValue: Res);
1066 Changed = true;
1067 }
1068 }
1069 if (Changed)
1070 addToWorklist(I: PHI);
1071 return nullptr;
1072 }
1073
1074 return nullptr;
1075}
1076
1077Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {
1078 Value *Op = LI.getOperand(i_nocapture: 0);
1079 if (Value *Res = simplifyLoadInst(LI: &LI, PtrOp: Op, Q: SQ.getWithInstruction(I: &LI)))
1080 return replaceInstUsesWith(I&: LI, V: Res);
1081
1082 // Try to canonicalize the loaded type.
1083 if (Instruction *Res = combineLoadToOperationType(IC&: *this, Load&: LI))
1084 return Res;
1085
1086 // Replace GEP indices if possible.
1087 if (Instruction *NewGEPI = replaceGEPIdxWithZero(IC&: *this, Ptr: Op, MemI&: LI))
1088 return replaceOperand(I&: LI, OpNum: 0, V: NewGEPI);
1089
1090 if (Instruction *Res = unpackLoadToAggregate(IC&: *this, LI))
1091 return Res;
1092
1093 // Do really simple store-to-load forwarding and load CSE, to catch cases
1094 // where there are several consecutive memory accesses to the same location,
1095 // separated by a few arithmetic operations.
1096 bool IsLoadCSE = false;
1097 BatchAAResults BatchAA(*AA);
1098 if (Value *AvailableVal = FindAvailableLoadedValue(Load: &LI, AA&: BatchAA, IsLoadCSE: &IsLoadCSE)) {
1099 if (IsLoadCSE)
1100 combineMetadataForCSE(K: cast<LoadInst>(Val: AvailableVal), J: &LI, DoesKMove: false);
1101
1102 return replaceInstUsesWith(
1103 I&: LI, V: Builder.CreateBitOrPointerCast(V: AvailableVal, DestTy: LI.getType(),
1104 Name: LI.getName() + ".cast"));
1105 }
1106
1107 // None of the following transforms are legal for volatile/ordered atomic
1108 // loads. Most of them do apply for unordered atomics.
1109 if (!LI.isUnordered()) return nullptr;
1110
1111 // load(gep null, ...) -> unreachable
1112 // load null/undef -> unreachable
1113 // TODO: Consider a target hook for valid address spaces for this xforms.
1114 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1115 CreateNonTerminatorUnreachable(InsertAt: &LI);
1116 return replaceInstUsesWith(I&: LI, V: PoisonValue::get(T: LI.getType()));
1117 }
1118
1119 if (Op->hasOneUse()) {
1120 // Change select and PHI nodes to select values instead of addresses: this
1121 // helps alias analysis out a lot, allows many others simplifications, and
1122 // exposes redundancy in the code.
1123 //
1124 // Note that we cannot do the transformation unless we know that the
1125 // introduced loads cannot trap! Something like this is valid as long as
1126 // the condition is always false: load (select bool %C, int* null, int* %G),
1127 // but it would not be valid if we transformed it to load from null
1128 // unconditionally.
1129 //
1130
1131 AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(Val: Op);
1132 Value *SelectOp = Op;
1133 if (ASC && ASC->getOperand(i_nocapture: 0)->hasOneUse())
1134 SelectOp = ASC->getOperand(i_nocapture: 0);
1135 if (SelectInst *SI = dyn_cast<SelectInst>(Val: SelectOp)) {
1136 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1137 // or
1138 // load (addrspacecast(select (Cond, &V1, &V2))) -->
1139 // select(Cond, load (addrspacecast(&V1)), load (addrspacecast(&V2))).
1140 Align Alignment = LI.getAlign();
1141 if (isSafeToLoadUnconditionally(V: SI->getOperand(i_nocapture: 1), Ty: LI.getType(),
1142 Alignment, DL, ScanFrom: SI) &&
1143 isSafeToLoadUnconditionally(V: SI->getOperand(i_nocapture: 2), Ty: LI.getType(),
1144 Alignment, DL, ScanFrom: SI)) {
1145
1146 auto MaybeCastedLoadOperand = [&](Value *Op) {
1147 if (ASC)
1148 return Builder.CreateAddrSpaceCast(V: Op, DestTy: ASC->getType(),
1149 Name: Op->getName() + ".cast");
1150 return Op;
1151 };
1152 Value *LoadOp1 = MaybeCastedLoadOperand(SI->getOperand(i_nocapture: 1));
1153 LoadInst *V1 = Builder.CreateLoad(Ty: LI.getType(), Ptr: LoadOp1,
1154 Name: LoadOp1->getName() + ".val");
1155
1156 Value *LoadOp2 = MaybeCastedLoadOperand(SI->getOperand(i_nocapture: 2));
1157 LoadInst *V2 = Builder.CreateLoad(Ty: LI.getType(), Ptr: LoadOp2,
1158 Name: LoadOp2->getName() + ".val");
1159 assert(LI.isUnordered() && "implied by above");
1160 V1->setAlignment(Alignment);
1161 V1->setAtomic(Ordering: LI.getOrdering(), SSID: LI.getSyncScopeID());
1162 V2->setAlignment(Alignment);
1163 V2->setAtomic(Ordering: LI.getOrdering(), SSID: LI.getSyncScopeID());
1164 // It is safe to copy any metadata that does not trigger UB. Copy any
1165 // poison-generating metadata.
1166 V1->copyMetadata(SrcInst: LI, WL: Metadata::PoisonGeneratingIDs);
1167 V2->copyMetadata(SrcInst: LI, WL: Metadata::PoisonGeneratingIDs);
1168 return SelectInst::Create(C: SI->getCondition(), S1: V1, S2: V2, NameStr: "", InsertBefore: nullptr,
1169 MDFrom: ProfcheckDisableMetadataFixes ? nullptr : SI);
1170 }
1171 }
1172 }
1173
1174 if (!NullPointerIsDefined(F: LI.getFunction(), AS: LI.getPointerAddressSpace()))
1175 if (Value *V = simplifyNonNullOperand(V: Op, /*HasDereferenceable=*/true))
1176 return replaceOperand(I&: LI, OpNum: 0, V);
1177
1178 return nullptr;
1179}
1180
1181/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1182///
1183/// \returns underlying value that was "cast", or nullptr otherwise.
1184///
1185/// For example, if we have:
1186///
1187/// %E0 = extractelement <2 x double> %U, i32 0
1188/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1189/// %E1 = extractelement <2 x double> %U, i32 1
1190/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1191///
1192/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1193/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1194/// Note that %U may contain non-undef values where %V1 has undef.
1195static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {
1196 Value *U = nullptr;
1197 while (auto *IV = dyn_cast<InsertValueInst>(Val: V)) {
1198 auto *E = dyn_cast<ExtractElementInst>(Val: IV->getInsertedValueOperand());
1199 if (!E)
1200 return nullptr;
1201 auto *W = E->getVectorOperand();
1202 if (!U)
1203 U = W;
1204 else if (U != W)
1205 return nullptr;
1206 auto *CI = dyn_cast<ConstantInt>(Val: E->getIndexOperand());
1207 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1208 return nullptr;
1209 V = IV->getAggregateOperand();
1210 }
1211 if (!match(V, P: m_Undef()) || !U)
1212 return nullptr;
1213
1214 auto *UT = cast<VectorType>(Val: U->getType());
1215 auto *VT = V->getType();
1216 // Check that types UT and VT are bitwise isomorphic.
1217 const auto &DL = IC.getDataLayout();
1218 if (DL.getTypeStoreSizeInBits(Ty: UT) != DL.getTypeStoreSizeInBits(Ty: VT)) {
1219 return nullptr;
1220 }
1221 if (auto *AT = dyn_cast<ArrayType>(Val: VT)) {
1222 if (AT->getNumElements() != cast<FixedVectorType>(Val: UT)->getNumElements())
1223 return nullptr;
1224 } else {
1225 auto *ST = cast<StructType>(Val: VT);
1226 if (ST->getNumElements() != cast<FixedVectorType>(Val: UT)->getNumElements())
1227 return nullptr;
1228 for (const auto *EltT : ST->elements()) {
1229 if (EltT != UT->getElementType())
1230 return nullptr;
1231 }
1232 }
1233 return U;
1234}
1235
1236/// Combine stores to match the type of value being stored.
1237///
1238/// The core idea here is that the memory does not have any intrinsic type and
1239/// where we can we should match the type of a store to the type of value being
1240/// stored.
1241///
1242/// However, this routine must never change the width of a store or the number of
1243/// stores as that would introduce a semantic change. This combine is expected to
1244/// be a semantic no-op which just allows stores to more closely model the types
1245/// of their incoming values.
1246///
1247/// Currently, we also refuse to change the precise type used for an atomic or
1248/// volatile store. This is debatable, and might be reasonable to change later.
1249/// However, it is risky in case some backend or other part of LLVM is relying
1250/// on the exact type stored to select appropriate atomic operations.
1251///
1252/// \returns true if the store was successfully combined away. This indicates
1253/// the caller must erase the store instruction. We have to let the caller erase
1254/// the store instruction as otherwise there is no way to signal whether it was
1255/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1256static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {
1257 // FIXME: We could probably with some care handle both volatile and ordered
1258 // atomic stores here but it isn't clear that this is important.
1259 if (!SI.isUnordered())
1260 return false;
1261
1262 // swifterror values can't be bitcasted.
1263 if (SI.getPointerOperand()->isSwiftError())
1264 return false;
1265
1266 Value *V = SI.getValueOperand();
1267
1268 // Fold away bit casts of the stored value by storing the original type.
1269 if (auto *BC = dyn_cast<BitCastInst>(Val: V)) {
1270 assert(!BC->getType()->isX86_AMXTy() &&
1271 "store to x86_amx* should not happen!");
1272 V = BC->getOperand(i_nocapture: 0);
1273 // Don't transform when the type is x86_amx, it makes the pass that lower
1274 // x86_amx type happy.
1275 if (V->getType()->isX86_AMXTy())
1276 return false;
1277 if (!SI.isAtomic() || isSupportedAtomicType(Ty: V->getType())) {
1278 combineStoreToNewValue(IC, SI, V);
1279 return true;
1280 }
1281 }
1282
1283 if (Value *U = likeBitCastFromVector(IC, V))
1284 if (!SI.isAtomic() || isSupportedAtomicType(Ty: U->getType())) {
1285 combineStoreToNewValue(IC, SI, V: U);
1286 return true;
1287 }
1288
1289 // FIXME: We should also canonicalize stores of vectors when their elements
1290 // are cast to other types.
1291 return false;
1292}
1293
1294static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
1295 // FIXME: We could probably with some care handle both volatile and atomic
1296 // stores here but it isn't clear that this is important.
1297 if (!SI.isSimple())
1298 return false;
1299
1300 Value *V = SI.getValueOperand();
1301 Type *T = V->getType();
1302
1303 if (!T->isAggregateType())
1304 return false;
1305
1306 if (auto *ST = dyn_cast<StructType>(Val: T)) {
1307 // If the struct only have one element, we unpack.
1308 unsigned Count = ST->getNumElements();
1309 if (Count == 1) {
1310 V = IC.Builder.CreateExtractValue(Agg: V, Idxs: 0);
1311 combineStoreToNewValue(IC, SI, V);
1312 return true;
1313 }
1314
1315 // We don't want to break loads with padding here as we'd loose
1316 // the knowledge that padding exists for the rest of the pipeline.
1317 const DataLayout &DL = IC.getDataLayout();
1318 auto *SL = DL.getStructLayout(Ty: ST);
1319
1320 if (SL->hasPadding())
1321 return false;
1322
1323 const auto Align = SI.getAlign();
1324
1325 SmallString<16> EltName = V->getName();
1326 EltName += ".elt";
1327 auto *Addr = SI.getPointerOperand();
1328 SmallString<16> AddrName = Addr->getName();
1329 AddrName += ".repack";
1330
1331 auto *IdxType = DL.getIndexType(PtrTy: Addr->getType());
1332 for (unsigned i = 0; i < Count; i++) {
1333 auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
1334 Ptr: Addr, Offset: IC.Builder.CreateTypeSize(Ty: IdxType, Size: SL->getElementOffset(Idx: i)),
1335 Name: AddrName);
1336 auto *Val = IC.Builder.CreateExtractValue(Agg: V, Idxs: i, Name: EltName);
1337 auto EltAlign =
1338 commonAlignment(A: Align, Offset: SL->getElementOffset(Idx: i).getKnownMinValue());
1339 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, Align: EltAlign);
1340 NS->setAAMetadata(SI.getAAMetadata());
1341 }
1342
1343 return true;
1344 }
1345
1346 if (auto *AT = dyn_cast<ArrayType>(Val: T)) {
1347 // If the array only have one element, we unpack.
1348 auto NumElements = AT->getNumElements();
1349 if (NumElements == 1) {
1350 V = IC.Builder.CreateExtractValue(Agg: V, Idxs: 0);
1351 combineStoreToNewValue(IC, SI, V);
1352 return true;
1353 }
1354
1355 // Bail out if the array is too large. Ideally we would like to optimize
1356 // arrays of arbitrary size but this has a terrible impact on compile time.
1357 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1358 // tuning.
1359 if (NumElements > IC.MaxArraySizeForCombine)
1360 return false;
1361
1362 const DataLayout &DL = IC.getDataLayout();
1363 TypeSize EltSize = DL.getTypeAllocSize(Ty: AT->getElementType());
1364 const auto Align = SI.getAlign();
1365
1366 SmallString<16> EltName = V->getName();
1367 EltName += ".elt";
1368 auto *Addr = SI.getPointerOperand();
1369 SmallString<16> AddrName = Addr->getName();
1370 AddrName += ".repack";
1371
1372 auto *IdxType = Type::getInt64Ty(C&: T->getContext());
1373 auto *Zero = ConstantInt::get(Ty: IdxType, V: 0);
1374
1375 TypeSize Offset = TypeSize::getZero();
1376 for (uint64_t i = 0; i < NumElements; i++) {
1377 Value *Indices[2] = {
1378 Zero,
1379 ConstantInt::get(Ty: IdxType, V: i),
1380 };
1381 auto *Ptr =
1382 IC.Builder.CreateInBoundsGEP(Ty: AT, Ptr: Addr, IdxList: ArrayRef(Indices), Name: AddrName);
1383 auto *Val = IC.Builder.CreateExtractValue(Agg: V, Idxs: i, Name: EltName);
1384 auto EltAlign = commonAlignment(A: Align, Offset: Offset.getKnownMinValue());
1385 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, Align: EltAlign);
1386 NS->setAAMetadata(SI.getAAMetadata());
1387 Offset += EltSize;
1388 }
1389
1390 return true;
1391 }
1392
1393 return false;
1394}
1395
1396/// equivalentAddressValues - Test if A and B will obviously have the same
1397/// value. This includes recognizing that %t0 and %t1 will have the same
1398/// value in code like this:
1399/// %t0 = getelementptr \@a, 0, 3
1400/// store i32 0, i32* %t0
1401/// %t1 = getelementptr \@a, 0, 3
1402/// %t2 = load i32* %t1
1403///
1404static bool equivalentAddressValues(Value *A, Value *B) {
1405 // Test if the values are trivially equivalent.
1406 if (A == B) return true;
1407
1408 // Test if the values come form identical arithmetic instructions.
1409 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1410 // its only used to compare two uses within the same basic block, which
1411 // means that they'll always either have the same value or one of them
1412 // will have an undefined value.
1413 if (isa<BinaryOperator>(Val: A) ||
1414 isa<CastInst>(Val: A) ||
1415 isa<PHINode>(Val: A) ||
1416 isa<GetElementPtrInst>(Val: A))
1417 if (Instruction *BI = dyn_cast<Instruction>(Val: B))
1418 if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI))
1419 return true;
1420
1421 // Otherwise they may not be equivalent.
1422 return false;
1423}
1424
1425Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
1426 Value *Val = SI.getOperand(i_nocapture: 0);
1427 Value *Ptr = SI.getOperand(i_nocapture: 1);
1428
1429 // Try to canonicalize the stored type.
1430 if (combineStoreToValueType(IC&: *this, SI))
1431 return eraseInstFromFunction(I&: SI);
1432
1433 // Try to canonicalize the stored type.
1434 if (unpackStoreToAggregate(IC&: *this, SI))
1435 return eraseInstFromFunction(I&: SI);
1436
1437 // Replace GEP indices if possible.
1438 if (Instruction *NewGEPI = replaceGEPIdxWithZero(IC&: *this, Ptr, MemI&: SI))
1439 return replaceOperand(I&: SI, OpNum: 1, V: NewGEPI);
1440
1441 // Don't hack volatile/ordered stores.
1442 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1443 if (!SI.isUnordered()) return nullptr;
1444
1445 // If the RHS is an alloca with a single use, zapify the store, making the
1446 // alloca dead.
1447 if (Ptr->hasOneUse()) {
1448 if (isa<AllocaInst>(Val: Ptr))
1449 return eraseInstFromFunction(I&: SI);
1450 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: Ptr)) {
1451 if (isa<AllocaInst>(Val: GEP->getOperand(i_nocapture: 0))) {
1452 if (GEP->getOperand(i_nocapture: 0)->hasOneUse())
1453 return eraseInstFromFunction(I&: SI);
1454 }
1455 }
1456 }
1457
1458 // If we have a store to a location which is known constant, we can conclude
1459 // that the store must be storing the constant value (else the memory
1460 // wouldn't be constant), and this must be a noop.
1461 if (!isModSet(MRI: AA->getModRefInfoMask(P: Ptr)))
1462 return eraseInstFromFunction(I&: SI);
1463
1464 // Do really simple DSE, to catch cases where there are several consecutive
1465 // stores to the same location, separated by a few arithmetic operations. This
1466 // situation often occurs with bitfield accesses.
1467 BasicBlock::iterator BBI(SI);
1468 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1469 --ScanInsts) {
1470 --BBI;
1471 // Don't count debug info directives, lest they affect codegen,
1472 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1473 if (BBI->isDebugOrPseudoInst()) {
1474 ScanInsts++;
1475 continue;
1476 }
1477
1478 if (StoreInst *PrevSI = dyn_cast<StoreInst>(Val&: BBI)) {
1479 // Prev store isn't volatile, and stores to the same location?
1480 if (PrevSI->isUnordered() &&
1481 equivalentAddressValues(A: PrevSI->getOperand(i_nocapture: 1), B: SI.getOperand(i_nocapture: 1)) &&
1482 PrevSI->getValueOperand()->getType() ==
1483 SI.getValueOperand()->getType()) {
1484 ++NumDeadStore;
1485 // Manually add back the original store to the worklist now, so it will
1486 // be processed after the operands of the removed store, as this may
1487 // expose additional DSE opportunities.
1488 Worklist.push(I: &SI);
1489 eraseInstFromFunction(I&: *PrevSI);
1490 return nullptr;
1491 }
1492 break;
1493 }
1494
1495 // If this is a load, we have to stop. However, if the loaded value is from
1496 // the pointer we're loading and is producing the pointer we're storing,
1497 // then *this* store is dead (X = load P; store X -> P).
1498 if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) {
1499 if (LI == Val && equivalentAddressValues(A: LI->getOperand(i_nocapture: 0), B: Ptr)) {
1500 assert(SI.isUnordered() && "can't eliminate ordering operation");
1501 return eraseInstFromFunction(I&: SI);
1502 }
1503
1504 // Otherwise, this is a load from some other location. Stores before it
1505 // may not be dead.
1506 break;
1507 }
1508
1509 // Don't skip over loads, throws or things that can modify memory.
1510 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1511 break;
1512 }
1513
1514 // store X, null -> turns into 'unreachable' in SimplifyCFG
1515 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1516 if (canSimplifyNullStoreOrGEP(SI)) {
1517 if (!isa<PoisonValue>(Val))
1518 return replaceOperand(I&: SI, OpNum: 0, V: PoisonValue::get(T: Val->getType()));
1519 return nullptr; // Do not modify these!
1520 }
1521
1522 // This is a non-terminator unreachable marker. Don't remove it.
1523 if (isa<UndefValue>(Val: Ptr)) {
1524 // Remove guaranteed-to-transfer instructions before the marker.
1525 removeInstructionsBeforeUnreachable(I&: SI);
1526
1527 // Remove all instructions after the marker and handle dead blocks this
1528 // implies.
1529 SmallVector<BasicBlock *> Worklist;
1530 handleUnreachableFrom(I: SI.getNextNode(), Worklist);
1531 handlePotentiallyDeadBlocks(Worklist);
1532 return nullptr;
1533 }
1534
1535 // store undef, Ptr -> noop
1536 // FIXME: This is technically incorrect because it might overwrite a poison
1537 // value. Change to PoisonValue once #52930 is resolved.
1538 if (isa<UndefValue>(Val))
1539 return eraseInstFromFunction(I&: SI);
1540
1541 if (!NullPointerIsDefined(F: SI.getFunction(), AS: SI.getPointerAddressSpace()))
1542 if (Value *V = simplifyNonNullOperand(V: Ptr, /*HasDereferenceable=*/true))
1543 return replaceOperand(I&: SI, OpNum: 1, V);
1544
1545 return nullptr;
1546}
1547
1548/// Try to transform:
1549/// if () { *P = v1; } else { *P = v2 }
1550/// or:
1551/// *P = v1; if () { *P = v2; }
1552/// into a phi node with a store in the successor.
1553bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
1554 if (!SI.isUnordered())
1555 return false; // This code has not been audited for volatile/ordered case.
1556
1557 // Check if the successor block has exactly 2 incoming edges.
1558 BasicBlock *StoreBB = SI.getParent();
1559 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(Idx: 0);
1560 if (!DestBB->hasNPredecessors(N: 2))
1561 return false;
1562
1563 // Capture the other block (the block that doesn't contain our store).
1564 pred_iterator PredIter = pred_begin(BB: DestBB);
1565 if (*PredIter == StoreBB)
1566 ++PredIter;
1567 BasicBlock *OtherBB = *PredIter;
1568
1569 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1570 // for example, if SI is in an infinite loop.
1571 if (StoreBB == DestBB || OtherBB == DestBB)
1572 return false;
1573
1574 // Verify that the other block ends in a branch and is not otherwise empty.
1575 BasicBlock::iterator BBI(OtherBB->getTerminator());
1576 BranchInst *OtherBr = dyn_cast<BranchInst>(Val&: BBI);
1577 if (!OtherBr || BBI == OtherBB->begin())
1578 return false;
1579
1580 auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {
1581 if (!OtherStore ||
1582 OtherStore->getPointerOperand() != SI.getPointerOperand())
1583 return false;
1584
1585 auto *SIVTy = SI.getValueOperand()->getType();
1586 auto *OSVTy = OtherStore->getValueOperand()->getType();
1587 return CastInst::isBitOrNoopPointerCastable(SrcTy: OSVTy, DestTy: SIVTy, DL) &&
1588 SI.hasSameSpecialState(I2: OtherStore);
1589 };
1590
1591 // If the other block ends in an unconditional branch, check for the 'if then
1592 // else' case. There is an instruction before the branch.
1593 StoreInst *OtherStore = nullptr;
1594 if (OtherBr->isUnconditional()) {
1595 --BBI;
1596 // Skip over debugging info and pseudo probes.
1597 while (BBI->isDebugOrPseudoInst()) {
1598 if (BBI==OtherBB->begin())
1599 return false;
1600 --BBI;
1601 }
1602 // If this isn't a store, isn't a store to the same location, or is not the
1603 // right kind of store, bail out.
1604 OtherStore = dyn_cast<StoreInst>(Val&: BBI);
1605 if (!OtherStoreIsMergeable(OtherStore))
1606 return false;
1607 } else {
1608 // Otherwise, the other block ended with a conditional branch. If one of the
1609 // destinations is StoreBB, then we have the if/then case.
1610 if (OtherBr->getSuccessor(i: 0) != StoreBB &&
1611 OtherBr->getSuccessor(i: 1) != StoreBB)
1612 return false;
1613
1614 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1615 // if/then triangle. See if there is a store to the same ptr as SI that
1616 // lives in OtherBB.
1617 for (;; --BBI) {
1618 // Check to see if we find the matching store.
1619 OtherStore = dyn_cast<StoreInst>(Val&: BBI);
1620 if (OtherStoreIsMergeable(OtherStore))
1621 break;
1622
1623 // If we find something that may be using or overwriting the stored
1624 // value, or if we run out of instructions, we can't do the transform.
1625 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1626 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1627 return false;
1628 }
1629
1630 // In order to eliminate the store in OtherBr, we have to make sure nothing
1631 // reads or overwrites the stored value in StoreBB.
1632 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1633 // FIXME: This should really be AA driven.
1634 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1635 return false;
1636 }
1637 }
1638
1639 // Insert a PHI node now if we need it.
1640 Value *MergedVal = OtherStore->getValueOperand();
1641 // The debug locations of the original instructions might differ. Merge them.
1642 DebugLoc MergedLoc =
1643 DebugLoc::getMergedLocation(LocA: SI.getDebugLoc(), LocB: OtherStore->getDebugLoc());
1644 if (MergedVal != SI.getValueOperand()) {
1645 PHINode *PN =
1646 PHINode::Create(Ty: SI.getValueOperand()->getType(), NumReservedValues: 2, NameStr: "storemerge");
1647 PN->addIncoming(V: SI.getValueOperand(), BB: SI.getParent());
1648 Builder.SetInsertPoint(OtherStore);
1649 PN->addIncoming(V: Builder.CreateBitOrPointerCast(V: MergedVal, DestTy: PN->getType()),
1650 BB: OtherBB);
1651 MergedVal = InsertNewInstBefore(New: PN, Old: DestBB->begin());
1652 PN->setDebugLoc(MergedLoc);
1653 }
1654
1655 // Advance to a place where it is safe to insert the new store and insert it.
1656 BBI = DestBB->getFirstInsertionPt();
1657 StoreInst *NewSI =
1658 new StoreInst(MergedVal, SI.getOperand(i_nocapture: 1), SI.isVolatile(), SI.getAlign(),
1659 SI.getOrdering(), SI.getSyncScopeID());
1660 InsertNewInstBefore(New: NewSI, Old: BBI);
1661 NewSI->setDebugLoc(MergedLoc);
1662 NewSI->mergeDIAssignID(SourceInstructions: {&SI, OtherStore});
1663
1664 // If the two stores had AA tags, merge them.
1665 AAMDNodes AATags = SI.getAAMetadata();
1666 if (AATags)
1667 NewSI->setAAMetadata(AATags.merge(Other: OtherStore->getAAMetadata()));
1668
1669 // Nuke the old stores.
1670 eraseInstFromFunction(I&: SI);
1671 eraseInstFromFunction(I&: *OtherStore);
1672 return true;
1673}
1674