1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/IR/Instructions.h"
15#include "LLVMContextImpl.h"
16#include "llvm/ADT/SmallBitVector.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
22#include "llvm/IR/ConstantRange.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
35#include "llvm/IR/PatternMatch.h"
36#include "llvm/IR/ProfDataUtils.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
39#include "llvm/Support/AtomicOrdering.h"
40#include "llvm/Support/Casting.h"
41#include "llvm/Support/CheckedArithmetic.h"
42#include "llvm/Support/Compiler.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/KnownBits.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/ModRef.h"
47#include "llvm/Support/TypeSize.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
56static cl::opt<bool> DisableI2pP2iOpt(
57 "disable-i2p-p2i-opt", cl::init(Val: false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
65AllocaInst::getAllocationSize(const DataLayout &DL) const {
66 TypeSize Size = DL.getTypeAllocSize(Ty: getAllocatedType());
67 // Zero-sized types can return early since 0 * N = 0 for any array size N.
68 if (Size.isZero())
69 return Size;
70 if (isArrayAllocation()) {
71 auto *C = dyn_cast<ConstantInt>(Val: getArraySize());
72 if (!C)
73 return std::nullopt;
74 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
75 auto CheckedProd =
76 checkedMulUnsigned(LHS: Size.getKnownMinValue(), RHS: C->getZExtValue());
77 if (!CheckedProd)
78 return std::nullopt;
79 return TypeSize::getFixed(ExactSize: *CheckedProd);
80 }
81 return Size;
82}
83
84std::optional<TypeSize>
85AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
86 std::optional<TypeSize> Size = getAllocationSize(DL);
87 if (!Size)
88 return std::nullopt;
89 auto CheckedProd = checkedMulUnsigned(LHS: Size->getKnownMinValue(),
90 RHS: static_cast<TypeSize::ScalarTy>(8));
91 if (!CheckedProd)
92 return std::nullopt;
93 return TypeSize::get(Quantity: *CheckedProd, Scalable: Size->isScalable());
94}
95
96//===----------------------------------------------------------------------===//
97// SelectInst Class
98//===----------------------------------------------------------------------===//
99
100/// areInvalidOperands - Return a string if the specified operands are invalid
101/// for a select operation, otherwise return null.
102const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
103 if (Op1->getType() != Op2->getType())
104 return "both values to select must have same type";
105
106 if (Op1->getType()->isTokenTy())
107 return "select values cannot have token type";
108
109 if (VectorType *VT = dyn_cast<VectorType>(Val: Op0->getType())) {
110 // Vector select.
111 if (VT->getElementType() != Type::getInt1Ty(C&: Op0->getContext()))
112 return "vector select condition element type must be i1";
113 VectorType *ET = dyn_cast<VectorType>(Val: Op1->getType());
114 if (!ET)
115 return "selected values for vector select must be vectors";
116 if (ET->getElementCount() != VT->getElementCount())
117 return "vector select requires selected vectors to have "
118 "the same vector length as select condition";
119 } else if (Op0->getType() != Type::getInt1Ty(C&: Op0->getContext())) {
120 return "select condition must be i1 or <n x i1>";
121 }
122 return nullptr;
123}
124
125//===----------------------------------------------------------------------===//
126// PHINode Class
127//===----------------------------------------------------------------------===//
128
129PHINode::PHINode(const PHINode &PN)
130 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
131 ReservedSpace(PN.getNumOperands()) {
132 NumUserOperands = PN.getNumOperands();
133 allocHungoffUses(N: PN.getNumOperands());
134 std::copy(first: PN.op_begin(), last: PN.op_end(), result: op_begin());
135 copyIncomingBlocks(BBRange: make_range(x: PN.block_begin(), y: PN.block_end()));
136 SubclassOptionalData = PN.SubclassOptionalData;
137}
138
139// removeIncomingValue - Remove an incoming value. This is useful if a
140// predecessor basic block is deleted.
141Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
142 Value *Removed = getIncomingValue(i: Idx);
143 // Swap with the end of the list.
144 unsigned Last = getNumOperands() - 1;
145 if (Idx != Last) {
146 setIncomingValue(i: Idx, V: getIncomingValue(i: Last));
147 setIncomingBlock(i: Idx, BB: getIncomingBlock(i: Last));
148 }
149
150 // Nuke the last value.
151 Op<-1>().set(nullptr);
152 setNumHungOffUseOperands(getNumOperands() - 1);
153
154 // If the PHI node is dead, because it has zero entries, nuke it now.
155 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
156 // If anyone is using this PHI, make them use a dummy value instead...
157 replaceAllUsesWith(V: PoisonValue::get(T: getType()));
158 eraseFromParent();
159 }
160 return Removed;
161}
162
163void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
164 bool DeletePHIIfEmpty) {
165 unsigned NumOps = getNumIncomingValues();
166
167 // Loop backwards in case the predicate is purely index based.
168 for (unsigned Idx = NumOps; Idx-- > 0;) {
169 if (Predicate(Idx)) {
170 unsigned LastIdx = NumOps - 1;
171 if (Idx != LastIdx) {
172 setIncomingValue(i: Idx, V: getIncomingValue(i: LastIdx));
173 setIncomingBlock(i: Idx, BB: getIncomingBlock(i: LastIdx));
174 }
175 getOperandUse(i: LastIdx).set(nullptr);
176 NumOps--;
177 }
178 }
179
180 setNumHungOffUseOperands(NumOps);
181
182 // If the PHI node is dead, because it has zero entries, nuke it now.
183 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
184 // If anyone is using this PHI, make them use a dummy value instead...
185 replaceAllUsesWith(V: PoisonValue::get(T: getType()));
186 eraseFromParent();
187 }
188}
189
190/// growOperands - grow operands - This grows the operand list in response
191/// to a push_back style of operation. This grows the number of ops by 1.5
192/// times.
193///
194void PHINode::growOperands() {
195 unsigned e = getNumOperands();
196 unsigned NumOps = e + e / 2;
197 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
198
199 ReservedSpace = NumOps;
200 growHungoffUses(N: ReservedSpace, /*WithExtraValues=*/true);
201}
202
203/// hasConstantValue - If the specified PHI node always merges together the same
204/// value, return the value, otherwise return null.
205Value *PHINode::hasConstantValue() const {
206 // Exploit the fact that phi nodes always have at least one entry.
207 Value *ConstantValue = getIncomingValue(i: 0);
208 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
209 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
210 if (ConstantValue != this)
211 return nullptr; // Incoming values not all the same.
212 // The case where the first value is this PHI.
213 ConstantValue = getIncomingValue(i);
214 }
215 if (ConstantValue == this)
216 return PoisonValue::get(T: getType());
217 return ConstantValue;
218}
219
220/// hasConstantOrUndefValue - Whether the specified PHI node always merges
221/// together the same value, assuming that undefs result in the same value as
222/// non-undefs.
223/// Unlike \ref hasConstantValue, this does not return a value because the
224/// unique non-undef incoming value need not dominate the PHI node.
225bool PHINode::hasConstantOrUndefValue() const {
226 Value *ConstantValue = nullptr;
227 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
228 Value *Incoming = getIncomingValue(i);
229 if (Incoming != this && !isa<UndefValue>(Val: Incoming)) {
230 if (ConstantValue && ConstantValue != Incoming)
231 return false;
232 ConstantValue = Incoming;
233 }
234 }
235 return true;
236}
237
238//===----------------------------------------------------------------------===//
239// LandingPadInst Implementation
240//===----------------------------------------------------------------------===//
241
242LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
243 const Twine &NameStr,
244 InsertPosition InsertBefore)
245 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
246 init(NumReservedValues, NameStr);
247}
248
249LandingPadInst::LandingPadInst(const LandingPadInst &LP)
250 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
251 ReservedSpace(LP.getNumOperands()) {
252 NumUserOperands = LP.getNumOperands();
253 allocHungoffUses(N: LP.getNumOperands());
254 Use *OL = getOperandList();
255 const Use *InOL = LP.getOperandList();
256 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
257 OL[I] = InOL[I];
258
259 setCleanup(LP.isCleanup());
260}
261
262LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
263 const Twine &NameStr,
264 InsertPosition InsertBefore) {
265 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
266}
267
268void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
269 ReservedSpace = NumReservedValues;
270 setNumHungOffUseOperands(0);
271 allocHungoffUses(N: ReservedSpace);
272 setName(NameStr);
273 setCleanup(false);
274}
275
276/// growOperands - grow operands - This grows the operand list in response to a
277/// push_back style of operation. This grows the number of ops by 2 times.
278void LandingPadInst::growOperands(unsigned Size) {
279 unsigned e = getNumOperands();
280 if (ReservedSpace >= e + Size) return;
281 ReservedSpace = (std::max(a: e, b: 1U) + Size / 2) * 2;
282 growHungoffUses(N: ReservedSpace);
283}
284
285void LandingPadInst::addClause(Constant *Val) {
286 unsigned OpNo = getNumOperands();
287 growOperands(Size: 1);
288 assert(OpNo < ReservedSpace && "Growing didn't work!");
289 setNumHungOffUseOperands(getNumOperands() + 1);
290 getOperandList()[OpNo] = Val;
291}
292
293//===----------------------------------------------------------------------===//
294// CallBase Implementation
295//===----------------------------------------------------------------------===//
296
297CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
298 InsertPosition InsertPt) {
299 switch (CB->getOpcode()) {
300 case Instruction::Call:
301 return CallInst::Create(CI: cast<CallInst>(Val: CB), Bundles, InsertPt);
302 case Instruction::Invoke:
303 return InvokeInst::Create(II: cast<InvokeInst>(Val: CB), Bundles, InsertPt);
304 case Instruction::CallBr:
305 return CallBrInst::Create(CBI: cast<CallBrInst>(Val: CB), Bundles, InsertBefore: InsertPt);
306 default:
307 llvm_unreachable("Unknown CallBase sub-class!");
308 }
309}
310
311CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
312 InsertPosition InsertPt) {
313 SmallVector<OperandBundleDef, 2> OpDefs;
314 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
315 auto ChildOB = CI->getOperandBundleAt(Index: i);
316 if (ChildOB.getTagName() != OpB.getTag())
317 OpDefs.emplace_back(Args&: ChildOB);
318 }
319 OpDefs.emplace_back(Args&: OpB);
320 return CallBase::Create(CB: CI, Bundles: OpDefs, InsertPt);
321}
322
323Function *CallBase::getCaller() { return getParent()->getParent(); }
324
325unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
326 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
327 return cast<CallBrInst>(Val: this)->getNumIndirectDests() + 1;
328}
329
330bool CallBase::isIndirectCall() const {
331 const Value *V = getCalledOperand();
332 if (isa<Function>(Val: V) || isa<Constant>(Val: V))
333 return false;
334 return !isInlineAsm();
335}
336
337/// Tests if this call site must be tail call optimized. Only a CallInst can
338/// be tail call optimized.
339bool CallBase::isMustTailCall() const {
340 if (auto *CI = dyn_cast<CallInst>(Val: this))
341 return CI->isMustTailCall();
342 return false;
343}
344
345/// Tests if this call site is marked as a tail call.
346bool CallBase::isTailCall() const {
347 if (auto *CI = dyn_cast<CallInst>(Val: this))
348 return CI->isTailCall();
349 return false;
350}
351
352Intrinsic::ID CallBase::getIntrinsicID() const {
353 if (auto *F = dyn_cast_or_null<Function>(Val: getCalledOperand()))
354 return F->getIntrinsicID();
355 return Intrinsic::not_intrinsic;
356}
357
358FPClassTest CallBase::getRetNoFPClass() const {
359 FPClassTest Mask = Attrs.getRetNoFPClass();
360
361 if (const Function *F = getCalledFunction())
362 Mask |= F->getAttributes().getRetNoFPClass();
363 return Mask;
364}
365
366FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
367 FPClassTest Mask = Attrs.getParamNoFPClass(ArgNo: i);
368
369 if (const Function *F = getCalledFunction())
370 Mask |= F->getAttributes().getParamNoFPClass(ArgNo: i);
371 return Mask;
372}
373
374std::optional<ConstantRange> CallBase::getRange() const {
375 Attribute CallAttr = Attrs.getRetAttr(Kind: Attribute::Range);
376 Attribute FnAttr;
377 if (const Function *F = getCalledFunction())
378 FnAttr = F->getRetAttribute(Kind: Attribute::Range);
379
380 if (CallAttr.isValid() && FnAttr.isValid())
381 return CallAttr.getRange().intersectWith(CR: FnAttr.getRange());
382 if (CallAttr.isValid())
383 return CallAttr.getRange();
384 if (FnAttr.isValid())
385 return FnAttr.getRange();
386 return std::nullopt;
387}
388
389bool CallBase::isReturnNonNull() const {
390 if (hasRetAttr(Kind: Attribute::NonNull))
391 return true;
392
393 if (getRetDereferenceableBytes() > 0 &&
394 !NullPointerIsDefined(F: getCaller(), AS: getType()->getPointerAddressSpace()))
395 return true;
396
397 return false;
398}
399
400Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
401 unsigned Index;
402
403 if (Attrs.hasAttrSomewhere(Kind, Index: &Index))
404 return getArgOperand(i: Index - AttributeList::FirstArgIndex);
405 if (const Function *F = getCalledFunction())
406 if (F->getAttributes().hasAttrSomewhere(Kind, Index: &Index))
407 return getArgOperand(i: Index - AttributeList::FirstArgIndex);
408
409 return nullptr;
410}
411
412/// Determine whether the argument or parameter has the given attribute.
413bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
414 assert(ArgNo < arg_size() && "Param index out of bounds!");
415
416 if (Attrs.hasParamAttr(ArgNo, Kind))
417 return true;
418
419 const Function *F = getCalledFunction();
420 if (!F)
421 return false;
422
423 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
424 return false;
425
426 // Take into account mod/ref by operand bundles.
427 switch (Kind) {
428 case Attribute::ReadNone:
429 return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
430 case Attribute::ReadOnly:
431 return !hasClobberingOperandBundles();
432 case Attribute::WriteOnly:
433 return !hasReadingOperandBundles();
434 default:
435 return true;
436 }
437}
438
439bool CallBase::paramHasNonNullAttr(unsigned ArgNo,
440 bool AllowUndefOrPoison) const {
441 assert(getArgOperand(ArgNo)->getType()->isPointerTy() &&
442 "Argument must be a pointer");
443 if (paramHasAttr(ArgNo, Kind: Attribute::NonNull) &&
444 (AllowUndefOrPoison || paramHasAttr(ArgNo, Kind: Attribute::NoUndef)))
445 return true;
446
447 if (paramHasAttr(ArgNo, Kind: Attribute::Dereferenceable) &&
448 !NullPointerIsDefined(
449 F: getCaller(),
450 AS: getArgOperand(i: ArgNo)->getType()->getPointerAddressSpace()))
451 return true;
452
453 return false;
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
457 if (auto *F = dyn_cast<Function>(Val: getCalledOperand()))
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
464 if (auto *F = dyn_cast<Function>(Val: getCalledOperand()))
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
478 if (auto *F = dyn_cast<Function>(Val: getCalledOperand()))
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template LLVM_ABI Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template LLVM_ABI Attribute
487CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
488
489template <typename AK>
490Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
491 AK Kind) const {
492 Value *V = getCalledOperand();
493
494 if (auto *F = dyn_cast<Function>(Val: V))
495 return F->getAttributes().getParamAttr(ArgNo, Kind);
496
497 return Attribute();
498}
499template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
500 unsigned ArgNo, Attribute::AttrKind Kind) const;
501template LLVM_ABI Attribute
502CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
503
504void CallBase::getOperandBundlesAsDefs(
505 SmallVectorImpl<OperandBundleDef> &Defs) const {
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
507 Defs.emplace_back(Args: getOperandBundleAt(Index: i));
508}
509
510CallBase::op_iterator
511CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(first: B.input_begin(), last: B.input_end(), result: It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(Tag: BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
536CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
540 if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
549 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
557 bundle_op_iterator Begin = bundle_op_info_begin();
558 bundle_op_iterator End = bundle_op_info_end();
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(x: End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(x: End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
583CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
584 OperandBundleDef OB,
585 InsertPosition InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
589 SmallVector<OperandBundleDef, 1> Bundles;
590 CB->getOperandBundlesAsDefs(Defs&: Bundles);
591 Bundles.push_back(Elt: OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
595CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
596 InsertPosition InsertPt) {
597 SmallVector<OperandBundleDef, 1> Bundles;
598 bool CreateNew = false;
599
600 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
601 auto Bundle = CB->getOperandBundleAt(Index: I);
602 if (Bundle.getTagID() == ID) {
603 CreateNew = true;
604 continue;
605 }
606 Bundles.emplace_back(Args&: Bundle);
607 }
608
609 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
610}
611
612bool CallBase::hasReadingOperandBundles() const {
613 // Implementation note: this is a conservative implementation of operand
614 // bundle semantics, where *any* non-assume operand bundle (other than
615 // ptrauth) forces a callsite to be at least readonly.
616 return hasOperandBundlesOtherThan(IDs: {LLVMContext::OB_ptrauth,
617 LLVMContext::OB_kcfi,
618 LLVMContext::OB_convergencectrl,
619 LLVMContext::OB_deactivation_symbol}) &&
620 getIntrinsicID() != Intrinsic::assume;
621}
622
623bool CallBase::hasClobberingOperandBundles() const {
624 return hasOperandBundlesOtherThan(
625 IDs: {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
626 LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi,
627 LLVMContext::OB_convergencectrl,
628 LLVMContext::OB_deactivation_symbol}) &&
629 getIntrinsicID() != Intrinsic::assume;
630}
631
632MemoryEffects CallBase::getMemoryEffects() const {
633 MemoryEffects ME = getAttributes().getMemoryEffects();
634 if (auto *Fn = dyn_cast<Function>(Val: getCalledOperand())) {
635 MemoryEffects FnME = Fn->getMemoryEffects();
636 if (hasOperandBundles()) {
637 // TODO: Add a method to get memory effects for operand bundles instead.
638 if (hasReadingOperandBundles())
639 FnME |= MemoryEffects::readOnly();
640 if (hasClobberingOperandBundles())
641 FnME |= MemoryEffects::writeOnly();
642 }
643 if (isVolatile()) {
644 // Volatile operations also access inaccessible memory.
645 FnME |= MemoryEffects::inaccessibleMemOnly();
646 }
647 ME &= FnME;
648 }
649 return ME;
650}
651void CallBase::setMemoryEffects(MemoryEffects ME) {
652 addFnAttr(Attr: Attribute::getWithMemoryEffects(Context&: getContext(), ME));
653}
654
655/// Determine if the function does not access memory.
656bool CallBase::doesNotAccessMemory() const {
657 return getMemoryEffects().doesNotAccessMemory();
658}
659void CallBase::setDoesNotAccessMemory() {
660 setMemoryEffects(MemoryEffects::none());
661}
662
663/// Determine if the function does not access or only reads memory.
664bool CallBase::onlyReadsMemory() const {
665 return getMemoryEffects().onlyReadsMemory();
666}
667void CallBase::setOnlyReadsMemory() {
668 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
669}
670
671/// Determine if the function does not access or only writes memory.
672bool CallBase::onlyWritesMemory() const {
673 return getMemoryEffects().onlyWritesMemory();
674}
675void CallBase::setOnlyWritesMemory() {
676 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
677}
678
679/// Determine if the call can access memmory only using pointers based
680/// on its arguments.
681bool CallBase::onlyAccessesArgMemory() const {
682 return getMemoryEffects().onlyAccessesArgPointees();
683}
684void CallBase::setOnlyAccessesArgMemory() {
685 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
686}
687
688/// Determine if the function may only access memory that is
689/// inaccessible from the IR.
690bool CallBase::onlyAccessesInaccessibleMemory() const {
691 return getMemoryEffects().onlyAccessesInaccessibleMem();
692}
693void CallBase::setOnlyAccessesInaccessibleMemory() {
694 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
695}
696
697/// Determine if the function may only access memory that is
698/// either inaccessible from the IR or pointed to by its arguments.
699bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
700 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
701}
702void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
703 setMemoryEffects(getMemoryEffects() &
704 MemoryEffects::inaccessibleOrArgMemOnly());
705}
706
707CaptureInfo CallBase::getCaptureInfo(unsigned OpNo) const {
708 if (OpNo < arg_size()) {
709 // If the argument is passed byval, the callee does not have access to the
710 // original pointer and thus cannot capture it.
711 if (isByValArgument(ArgNo: OpNo))
712 return CaptureInfo::none();
713
714 CaptureInfo CI = getParamAttributes(ArgNo: OpNo).getCaptureInfo();
715 if (auto *Fn = dyn_cast<Function>(Val: getCalledOperand()))
716 CI &= Fn->getAttributes().getParamAttrs(ArgNo: OpNo).getCaptureInfo();
717 return CI;
718 }
719
720 // Bundles on assumes are captures(none).
721 if (getIntrinsicID() == Intrinsic::assume)
722 return CaptureInfo::none();
723
724 // deopt operand bundles are captures(none)
725 auto &BOI = getBundleOpInfoForOperand(OpIdx: OpNo);
726 auto OBU = operandBundleFromBundleOpInfo(BOI);
727 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
728}
729
730bool CallBase::hasArgumentWithAdditionalReturnCaptureComponents() const {
731 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
732 if (!getArgOperand(i: I)->getType()->isPointerTy())
733 continue;
734
735 CaptureInfo CI = getParamAttributes(ArgNo: I).getCaptureInfo();
736 if (auto *Fn = dyn_cast<Function>(Val: getCalledOperand()))
737 CI &= Fn->getAttributes().getParamAttrs(ArgNo: I).getCaptureInfo();
738 if (capturesAnything(CC: CI.getRetComponents() & ~CI.getOtherComponents()))
739 return true;
740 }
741 return false;
742}
743
744//===----------------------------------------------------------------------===//
745// CallInst Implementation
746//===----------------------------------------------------------------------===//
747
748void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
749 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
750 this->FTy = FTy;
751 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
752 "NumOperands not set up?");
753
754#ifndef NDEBUG
755 assert((Args.size() == FTy->getNumParams() ||
756 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
757 "Calling a function with bad signature!");
758
759 for (unsigned i = 0; i != Args.size(); ++i)
760 assert((i >= FTy->getNumParams() ||
761 FTy->getParamType(i) == Args[i]->getType()) &&
762 "Calling a function with a bad signature!");
763#endif
764
765 // Set operands in order of their index to match use-list-order
766 // prediction.
767 llvm::copy(Range&: Args, Out: op_begin());
768 setCalledOperand(Func);
769
770 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
771 (void)It;
772 assert(It + 1 == op_end() && "Should add up!");
773
774 setName(NameStr);
775}
776
777void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
778 this->FTy = FTy;
779 assert(getNumOperands() == 1 && "NumOperands not set up?");
780 setCalledOperand(Func);
781
782 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
783
784 setName(NameStr);
785}
786
787CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
788 AllocInfo AllocInfo, InsertPosition InsertBefore)
789 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
790 InsertBefore) {
791 init(FTy: Ty, Func, NameStr: Name);
792}
793
794CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
795 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
796 assert(getNumOperands() == CI.getNumOperands() &&
797 "Wrong number of operands allocated");
798 setTailCallKind(CI.getTailCallKind());
799 setCallingConv(CI.getCallingConv());
800
801 std::copy(first: CI.op_begin(), last: CI.op_end(), result: op_begin());
802 std::copy(first: CI.bundle_op_info_begin(), last: CI.bundle_op_info_end(),
803 result: bundle_op_info_begin());
804 SubclassOptionalData = CI.SubclassOptionalData;
805}
806
807CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
808 InsertPosition InsertPt) {
809 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
810
811 auto *NewCI = CallInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(),
812 Args, Bundles: OpB, NameStr: CI->getName(), InsertBefore: InsertPt);
813 NewCI->setTailCallKind(CI->getTailCallKind());
814 NewCI->setCallingConv(CI->getCallingConv());
815 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
816 NewCI->setAttributes(CI->getAttributes());
817 NewCI->setDebugLoc(CI->getDebugLoc());
818 return NewCI;
819}
820
821// Update profile weight for call instruction by scaling it using the ratio
822// of S/T. The meaning of "branch_weights" meta data for call instruction is
823// transfered to represent call count.
824void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
825 if (T == 0) {
826 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
827 "div by 0. Ignoring. Likely the function "
828 << getParent()->getParent()->getName()
829 << " has 0 entry count, and contains call instructions "
830 "with non-zero prof info.");
831 return;
832 }
833 scaleProfData(I&: *this, S, T);
834}
835
836//===----------------------------------------------------------------------===//
837// InvokeInst Implementation
838//===----------------------------------------------------------------------===//
839
840void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
841 BasicBlock *IfException, ArrayRef<Value *> Args,
842 ArrayRef<OperandBundleDef> Bundles,
843 const Twine &NameStr) {
844 this->FTy = FTy;
845
846 assert(getNumOperands() ==
847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
848 "NumOperands not set up?");
849
850#ifndef NDEBUG
851 assert(((Args.size() == FTy->getNumParams()) ||
852 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
853 "Invoking a function with bad signature");
854
855 for (unsigned i = 0, e = Args.size(); i != e; i++)
856 assert((i >= FTy->getNumParams() ||
857 FTy->getParamType(i) == Args[i]->getType()) &&
858 "Invoking a function with a bad signature!");
859#endif
860
861 // Set operands in order of their index to match use-list-order
862 // prediction.
863 llvm::copy(Range&: Args, Out: op_begin());
864 setNormalDest(IfNormal);
865 setUnwindDest(IfException);
866 setCalledOperand(Fn);
867
868 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
869 (void)It;
870 assert(It + 3 == op_end() && "Should add up!");
871
872 setName(NameStr);
873}
874
875InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
876 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
877 assert(getNumOperands() == II.getNumOperands() &&
878 "Wrong number of operands allocated");
879 setCallingConv(II.getCallingConv());
880 std::copy(first: II.op_begin(), last: II.op_end(), result: op_begin());
881 std::copy(first: II.bundle_op_info_begin(), last: II.bundle_op_info_end(),
882 result: bundle_op_info_begin());
883 SubclassOptionalData = II.SubclassOptionalData;
884}
885
886InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
887 InsertPosition InsertPt) {
888 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
889
890 auto *NewII = InvokeInst::Create(
891 Ty: II->getFunctionType(), Func: II->getCalledOperand(), IfNormal: II->getNormalDest(),
892 IfException: II->getUnwindDest(), Args, Bundles: OpB, NameStr: II->getName(), InsertBefore: InsertPt);
893 NewII->setCallingConv(II->getCallingConv());
894 NewII->SubclassOptionalData = II->SubclassOptionalData;
895 NewII->setAttributes(II->getAttributes());
896 NewII->setDebugLoc(II->getDebugLoc());
897 return NewII;
898}
899
900LandingPadInst *InvokeInst::getLandingPadInst() const {
901 return cast<LandingPadInst>(Val: getUnwindDest()->getFirstNonPHIIt());
902}
903
904void InvokeInst::updateProfWeight(uint64_t S, uint64_t T) {
905 if (T == 0) {
906 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
907 "div by 0. Ignoring. Likely the function "
908 << getParent()->getParent()->getName()
909 << " has 0 entry count, and contains call instructions "
910 "with non-zero prof info.");
911 return;
912 }
913 scaleProfData(I&: *this, S, T);
914}
915
916//===----------------------------------------------------------------------===//
917// CallBrInst Implementation
918//===----------------------------------------------------------------------===//
919
920void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
921 ArrayRef<BasicBlock *> IndirectDests,
922 ArrayRef<Value *> Args,
923 ArrayRef<OperandBundleDef> Bundles,
924 const Twine &NameStr) {
925 this->FTy = FTy;
926
927 assert(getNumOperands() == ComputeNumOperands(Args.size(),
928 IndirectDests.size(),
929 CountBundleInputs(Bundles)) &&
930 "NumOperands not set up?");
931
932#ifndef NDEBUG
933 assert(((Args.size() == FTy->getNumParams()) ||
934 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
935 "Calling a function with bad signature");
936
937 for (unsigned i = 0, e = Args.size(); i != e; i++)
938 assert((i >= FTy->getNumParams() ||
939 FTy->getParamType(i) == Args[i]->getType()) &&
940 "Calling a function with a bad signature!");
941#endif
942
943 // Set operands in order of their index to match use-list-order
944 // prediction.
945 llvm::copy(Range&: Args, Out: op_begin());
946 NumIndirectDests = IndirectDests.size();
947 setDefaultDest(Fallthrough);
948 for (unsigned i = 0; i != NumIndirectDests; ++i)
949 setIndirectDest(i, B: IndirectDests[i]);
950 setCalledOperand(Fn);
951
952 auto It = populateBundleOperandInfos(Bundles, BeginIndex: Args.size());
953 (void)It;
954 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
955
956 setName(NameStr);
957}
958
959CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
960 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
961 AllocInfo) {
962 assert(getNumOperands() == CBI.getNumOperands() &&
963 "Wrong number of operands allocated");
964 setCallingConv(CBI.getCallingConv());
965 std::copy(first: CBI.op_begin(), last: CBI.op_end(), result: op_begin());
966 std::copy(first: CBI.bundle_op_info_begin(), last: CBI.bundle_op_info_end(),
967 result: bundle_op_info_begin());
968 SubclassOptionalData = CBI.SubclassOptionalData;
969 NumIndirectDests = CBI.NumIndirectDests;
970}
971
972CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
973 InsertPosition InsertPt) {
974 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
975
976 auto *NewCBI = CallBrInst::Create(
977 Ty: CBI->getFunctionType(), Func: CBI->getCalledOperand(), DefaultDest: CBI->getDefaultDest(),
978 IndirectDests: CBI->getIndirectDests(), Args, Bundles: OpB, NameStr: CBI->getName(), InsertBefore: InsertPt);
979 NewCBI->setCallingConv(CBI->getCallingConv());
980 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
981 NewCBI->setAttributes(CBI->getAttributes());
982 NewCBI->setDebugLoc(CBI->getDebugLoc());
983 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
984 return NewCBI;
985}
986
987//===----------------------------------------------------------------------===//
988// ReturnInst Implementation
989//===----------------------------------------------------------------------===//
990
991ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
992 : Instruction(Type::getVoidTy(C&: RI.getContext()), Instruction::Ret,
993 AllocInfo) {
994 assert(getNumOperands() == RI.getNumOperands() &&
995 "Wrong number of operands allocated");
996 if (RI.getNumOperands())
997 Op<0>() = RI.Op<0>();
998 SubclassOptionalData = RI.SubclassOptionalData;
999}
1000
1001ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1002 InsertPosition InsertBefore)
1003 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1004 InsertBefore) {
1005 if (retVal)
1006 Op<0>() = retVal;
1007}
1008
1009//===----------------------------------------------------------------------===//
1010// ResumeInst Implementation
1011//===----------------------------------------------------------------------===//
1012
1013ResumeInst::ResumeInst(const ResumeInst &RI)
1014 : Instruction(Type::getVoidTy(C&: RI.getContext()), Instruction::Resume,
1015 AllocMarker) {
1016 Op<0>() = RI.Op<0>();
1017}
1018
1019ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1020 : Instruction(Type::getVoidTy(C&: Exn->getContext()), Instruction::Resume,
1021 AllocMarker, InsertBefore) {
1022 Op<0>() = Exn;
1023}
1024
1025//===----------------------------------------------------------------------===//
1026// CleanupReturnInst Implementation
1027//===----------------------------------------------------------------------===//
1028
1029CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1030 AllocInfo AllocInfo)
1031 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1032 assert(getNumOperands() == CRI.getNumOperands() &&
1033 "Wrong number of operands allocated");
1034 setSubclassData<Instruction::OpaqueField>(
1035 CRI.getSubclassData<Instruction::OpaqueField>());
1036 Op<0>() = CRI.Op<0>();
1037 if (CRI.hasUnwindDest())
1038 Op<1>() = CRI.Op<1>();
1039}
1040
1041void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1042 if (UnwindBB)
1043 setSubclassData<UnwindDestField>(true);
1044
1045 Op<0>() = CleanupPad;
1046 if (UnwindBB)
1047 Op<1>() = UnwindBB;
1048}
1049
1050CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1051 AllocInfo AllocInfo,
1052 InsertPosition InsertBefore)
1053 : Instruction(Type::getVoidTy(C&: CleanupPad->getContext()),
1054 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1055 init(CleanupPad, UnwindBB);
1056}
1057
1058//===----------------------------------------------------------------------===//
1059// CatchReturnInst Implementation
1060//===----------------------------------------------------------------------===//
1061void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1062 Op<0>() = CatchPad;
1063 Op<1>() = BB;
1064}
1065
1066CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1067 : Instruction(Type::getVoidTy(C&: CRI.getContext()), Instruction::CatchRet,
1068 AllocMarker) {
1069 Op<0>() = CRI.Op<0>();
1070 Op<1>() = CRI.Op<1>();
1071}
1072
1073CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1074 InsertPosition InsertBefore)
1075 : Instruction(Type::getVoidTy(C&: BB->getContext()), Instruction::CatchRet,
1076 AllocMarker, InsertBefore) {
1077 init(CatchPad, BB);
1078}
1079
1080//===----------------------------------------------------------------------===//
1081// CatchSwitchInst Implementation
1082//===----------------------------------------------------------------------===//
1083
1084CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1085 unsigned NumReservedValues,
1086 const Twine &NameStr,
1087 InsertPosition InsertBefore)
1088 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1089 InsertBefore) {
1090 if (UnwindDest)
1091 ++NumReservedValues;
1092 init(ParentPad, UnwindDest, NumReserved: NumReservedValues + 1);
1093 setName(NameStr);
1094}
1095
1096CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1097 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1098 NumUserOperands = CSI.NumUserOperands;
1099 init(ParentPad: CSI.getParentPad(), UnwindDest: CSI.getUnwindDest(), NumReserved: CSI.getNumOperands());
1100 setNumHungOffUseOperands(ReservedSpace);
1101 Use *OL = getOperandList();
1102 const Use *InOL = CSI.getOperandList();
1103 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1104 OL[I] = InOL[I];
1105}
1106
1107void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1108 unsigned NumReservedValues) {
1109 assert(ParentPad && NumReservedValues);
1110
1111 ReservedSpace = NumReservedValues;
1112 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1113 allocHungoffUses(N: ReservedSpace);
1114
1115 Op<0>() = ParentPad;
1116 if (UnwindDest) {
1117 setSubclassData<UnwindDestField>(true);
1118 setUnwindDest(UnwindDest);
1119 }
1120}
1121
1122/// growOperands - grow operands - This grows the operand list in response to a
1123/// push_back style of operation. This grows the number of ops by 2 times.
1124void CatchSwitchInst::growOperands(unsigned Size) {
1125 unsigned NumOperands = getNumOperands();
1126 assert(NumOperands >= 1);
1127 if (ReservedSpace >= NumOperands + Size)
1128 return;
1129 ReservedSpace = (NumOperands + Size / 2) * 2;
1130 growHungoffUses(N: ReservedSpace);
1131}
1132
1133void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1134 unsigned OpNo = getNumOperands();
1135 growOperands(Size: 1);
1136 assert(OpNo < ReservedSpace && "Growing didn't work!");
1137 setNumHungOffUseOperands(getNumOperands() + 1);
1138 getOperandList()[OpNo] = Handler;
1139}
1140
1141void CatchSwitchInst::removeHandler(handler_iterator HI) {
1142 // Move all subsequent handlers up one.
1143 Use *EndDst = op_end() - 1;
1144 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1145 *CurDst = *(CurDst + 1);
1146 // Null out the last handler use.
1147 *EndDst = nullptr;
1148
1149 setNumHungOffUseOperands(getNumOperands() - 1);
1150}
1151
1152//===----------------------------------------------------------------------===//
1153// FuncletPadInst Implementation
1154//===----------------------------------------------------------------------===//
1155void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1156 const Twine &NameStr) {
1157 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1158 llvm::copy(Range&: Args, Out: op_begin());
1159 setParentPad(ParentPad);
1160 setName(NameStr);
1161}
1162
1163FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1164 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1165 assert(getNumOperands() == FPI.getNumOperands() &&
1166 "Wrong number of operands allocated");
1167 std::copy(first: FPI.op_begin(), last: FPI.op_end(), result: op_begin());
1168 setParentPad(FPI.getParentPad());
1169}
1170
1171FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1172 ArrayRef<Value *> Args, AllocInfo AllocInfo,
1173 const Twine &NameStr,
1174 InsertPosition InsertBefore)
1175 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1176 init(ParentPad, Args, NameStr);
1177}
1178
1179//===----------------------------------------------------------------------===//
1180// UnreachableInst Implementation
1181//===----------------------------------------------------------------------===//
1182
1183UnreachableInst::UnreachableInst(LLVMContext &Context,
1184 InsertPosition InsertBefore)
1185 : Instruction(Type::getVoidTy(C&: Context), Instruction::Unreachable,
1186 AllocMarker, InsertBefore) {}
1187
1188//===----------------------------------------------------------------------===//
1189// BranchInst Implementation
1190//===----------------------------------------------------------------------===//
1191
1192void BranchInst::AssertOK() {
1193 if (isConditional())
1194 assert(getCondition()->getType()->isIntegerTy(1) &&
1195 "May only branch on boolean predicates!");
1196}
1197
1198BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1199 InsertPosition InsertBefore)
1200 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1201 AllocInfo, InsertBefore) {
1202 assert(IfTrue && "Branch destination may not be null!");
1203 Op<-1>() = IfTrue;
1204}
1205
1206BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1207 AllocInfo AllocInfo, InsertPosition InsertBefore)
1208 : Instruction(Type::getVoidTy(C&: IfTrue->getContext()), Instruction::Br,
1209 AllocInfo, InsertBefore) {
1210 // Assign in order of operand index to make use-list order predictable.
1211 Op<-3>() = Cond;
1212 Op<-2>() = IfFalse;
1213 Op<-1>() = IfTrue;
1214#ifndef NDEBUG
1215 AssertOK();
1216#endif
1217}
1218
1219BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1220 : Instruction(Type::getVoidTy(C&: BI.getContext()), Instruction::Br,
1221 AllocInfo) {
1222 assert(getNumOperands() == BI.getNumOperands() &&
1223 "Wrong number of operands allocated");
1224 // Assign in order of operand index to make use-list order predictable.
1225 if (BI.getNumOperands() != 1) {
1226 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1227 Op<-3>() = BI.Op<-3>();
1228 Op<-2>() = BI.Op<-2>();
1229 }
1230 Op<-1>() = BI.Op<-1>();
1231 SubclassOptionalData = BI.SubclassOptionalData;
1232}
1233
1234void BranchInst::swapSuccessors() {
1235 assert(isConditional() &&
1236 "Cannot swap successors of an unconditional branch");
1237 Op<-1>().swap(RHS&: Op<-2>());
1238
1239 // Update profile metadata if present and it matches our structural
1240 // expectations.
1241 swapProfMetadata();
1242}
1243
1244//===----------------------------------------------------------------------===//
1245// AllocaInst Implementation
1246//===----------------------------------------------------------------------===//
1247
1248static Value *getAISize(LLVMContext &Context, Value *Amt) {
1249 if (!Amt)
1250 Amt = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 1);
1251 else {
1252 assert(!isa<BasicBlock>(Amt) &&
1253 "Passed basic block into allocation size parameter! Use other ctor");
1254 assert(Amt->getType()->isIntegerTy() &&
1255 "Allocation array size is not an integer!");
1256 }
1257 return Amt;
1258}
1259
1260static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos) {
1261 assert(Pos.isValid() &&
1262 "Insertion position cannot be null when alignment not provided!");
1263 BasicBlock *BB = Pos.getBasicBlock();
1264 assert(BB->getParent() &&
1265 "BB must be in a Function when alignment not provided!");
1266 const DataLayout &DL = BB->getDataLayout();
1267 return DL.getPrefTypeAlign(Ty);
1268}
1269
1270AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1271 InsertPosition InsertBefore)
1272 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1273
1274AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1275 const Twine &Name, InsertPosition InsertBefore)
1276 : AllocaInst(Ty, AddrSpace, ArraySize,
1277 computeAllocaDefaultAlign(Ty, Pos: InsertBefore), Name,
1278 InsertBefore) {}
1279
1280AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1281 Align Align, const Twine &Name,
1282 InsertPosition InsertBefore)
1283 : UnaryInstruction(PointerType::get(C&: Ty->getContext(), AddressSpace: AddrSpace), Alloca,
1284 getAISize(Context&: Ty->getContext(), Amt: ArraySize), InsertBefore),
1285 AllocatedType(Ty) {
1286 setAlignment(Align);
1287 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1288 setName(Name);
1289}
1290
1291bool AllocaInst::isArrayAllocation() const {
1292 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: getOperand(i_nocapture: 0)))
1293 return !CI->isOne();
1294 return true;
1295}
1296
1297/// isStaticAlloca - Return true if this alloca is in the entry block of the
1298/// function and is a constant size. If so, the code generator will fold it
1299/// into the prolog/epilog code, so it is basically free.
1300bool AllocaInst::isStaticAlloca() const {
1301 // Must be constant size.
1302 if (!isa<ConstantInt>(Val: getArraySize())) return false;
1303
1304 // Must be in the entry block.
1305 const BasicBlock *Parent = getParent();
1306 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1307}
1308
1309//===----------------------------------------------------------------------===//
1310// LoadInst Implementation
1311//===----------------------------------------------------------------------===//
1312
1313void LoadInst::AssertOK() {
1314 assert(getOperand(0)->getType()->isPointerTy() &&
1315 "Ptr must have pointer type.");
1316}
1317
1318static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
1319 assert(Pos.isValid() &&
1320 "Insertion position cannot be null when alignment not provided!");
1321 BasicBlock *BB = Pos.getBasicBlock();
1322 assert(BB->getParent() &&
1323 "BB must be in a Function when alignment not provided!");
1324 const DataLayout &DL = BB->getDataLayout();
1325 return DL.getABITypeAlign(Ty);
1326}
1327
1328LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1329 InsertPosition InsertBef)
1330 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1331
1332LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1333 InsertPosition InsertBef)
1334 : LoadInst(Ty, Ptr, Name, isVolatile,
1335 computeLoadStoreDefaultAlign(Ty, Pos: InsertBef), InsertBef) {}
1336
1337LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1338 Align Align, InsertPosition InsertBef)
1339 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1340 SyncScope::System, InsertBef) {}
1341
1342LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1343 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1344 InsertPosition InsertBef)
1345 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1346 setVolatile(isVolatile);
1347 setAlignment(Align);
1348 setAtomic(Ordering: Order, SSID);
1349 AssertOK();
1350 setName(Name);
1351}
1352
1353//===----------------------------------------------------------------------===//
1354// StoreInst Implementation
1355//===----------------------------------------------------------------------===//
1356
1357void StoreInst::AssertOK() {
1358 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1359 assert(getOperand(1)->getType()->isPointerTy() &&
1360 "Ptr must have pointer type!");
1361}
1362
1363StoreInst::StoreInst(Value *val, Value *addr, InsertPosition InsertBefore)
1364 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1365
1366StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1367 InsertPosition InsertBefore)
1368 : StoreInst(val, addr, isVolatile,
1369 computeLoadStoreDefaultAlign(Ty: val->getType(), Pos: InsertBefore),
1370 InsertBefore) {}
1371
1372StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1373 InsertPosition InsertBefore)
1374 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1375 SyncScope::System, InsertBefore) {}
1376
1377StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1378 AtomicOrdering Order, SyncScope::ID SSID,
1379 InsertPosition InsertBefore)
1380 : Instruction(Type::getVoidTy(C&: val->getContext()), Store, AllocMarker,
1381 InsertBefore) {
1382 Op<0>() = val;
1383 Op<1>() = addr;
1384 setVolatile(isVolatile);
1385 setAlignment(Align);
1386 setAtomic(Ordering: Order, SSID);
1387 AssertOK();
1388}
1389
1390//===----------------------------------------------------------------------===//
1391// AtomicCmpXchgInst Implementation
1392//===----------------------------------------------------------------------===//
1393
1394void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1395 Align Alignment, AtomicOrdering SuccessOrdering,
1396 AtomicOrdering FailureOrdering,
1397 SyncScope::ID SSID) {
1398 Op<0>() = Ptr;
1399 Op<1>() = Cmp;
1400 Op<2>() = NewVal;
1401 setSuccessOrdering(SuccessOrdering);
1402 setFailureOrdering(FailureOrdering);
1403 setSyncScopeID(SSID);
1404 setAlignment(Alignment);
1405
1406 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1407 "All operands must be non-null!");
1408 assert(getOperand(0)->getType()->isPointerTy() &&
1409 "Ptr must have pointer type!");
1410 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1411 "Cmp type and NewVal type must be same!");
1412}
1413
1414AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1415 Align Alignment,
1416 AtomicOrdering SuccessOrdering,
1417 AtomicOrdering FailureOrdering,
1418 SyncScope::ID SSID,
1419 InsertPosition InsertBefore)
1420 : Instruction(
1421 StructType::get(elt1: Cmp->getType(), elts: Type::getInt1Ty(C&: Cmp->getContext())),
1422 AtomicCmpXchg, AllocMarker, InsertBefore) {
1423 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1424}
1425
1426//===----------------------------------------------------------------------===//
1427// AtomicRMWInst Implementation
1428//===----------------------------------------------------------------------===//
1429
1430void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1431 Align Alignment, AtomicOrdering Ordering,
1432 SyncScope::ID SSID) {
1433 assert(Ordering != AtomicOrdering::NotAtomic &&
1434 "atomicrmw instructions can only be atomic.");
1435 assert(Ordering != AtomicOrdering::Unordered &&
1436 "atomicrmw instructions cannot be unordered.");
1437 Op<0>() = Ptr;
1438 Op<1>() = Val;
1439 setOperation(Operation);
1440 setOrdering(Ordering);
1441 setSyncScopeID(SSID);
1442 setAlignment(Alignment);
1443
1444 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1445 assert(getOperand(0)->getType()->isPointerTy() &&
1446 "Ptr must have pointer type!");
1447 assert(Ordering != AtomicOrdering::NotAtomic &&
1448 "AtomicRMW instructions must be atomic!");
1449}
1450
1451AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1452 Align Alignment, AtomicOrdering Ordering,
1453 SyncScope::ID SSID, InsertPosition InsertBefore)
1454 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1455 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1456}
1457
1458StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1459 switch (Op) {
1460 case AtomicRMWInst::Xchg:
1461 return "xchg";
1462 case AtomicRMWInst::Add:
1463 return "add";
1464 case AtomicRMWInst::Sub:
1465 return "sub";
1466 case AtomicRMWInst::And:
1467 return "and";
1468 case AtomicRMWInst::Nand:
1469 return "nand";
1470 case AtomicRMWInst::Or:
1471 return "or";
1472 case AtomicRMWInst::Xor:
1473 return "xor";
1474 case AtomicRMWInst::Max:
1475 return "max";
1476 case AtomicRMWInst::Min:
1477 return "min";
1478 case AtomicRMWInst::UMax:
1479 return "umax";
1480 case AtomicRMWInst::UMin:
1481 return "umin";
1482 case AtomicRMWInst::FAdd:
1483 return "fadd";
1484 case AtomicRMWInst::FSub:
1485 return "fsub";
1486 case AtomicRMWInst::FMax:
1487 return "fmax";
1488 case AtomicRMWInst::FMin:
1489 return "fmin";
1490 case AtomicRMWInst::FMaximum:
1491 return "fmaximum";
1492 case AtomicRMWInst::FMinimum:
1493 return "fminimum";
1494 case AtomicRMWInst::UIncWrap:
1495 return "uinc_wrap";
1496 case AtomicRMWInst::UDecWrap:
1497 return "udec_wrap";
1498 case AtomicRMWInst::USubCond:
1499 return "usub_cond";
1500 case AtomicRMWInst::USubSat:
1501 return "usub_sat";
1502 case AtomicRMWInst::BAD_BINOP:
1503 return "<invalid operation>";
1504 }
1505
1506 llvm_unreachable("invalid atomicrmw operation");
1507}
1508
1509//===----------------------------------------------------------------------===//
1510// FenceInst Implementation
1511//===----------------------------------------------------------------------===//
1512
1513FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1514 SyncScope::ID SSID, InsertPosition InsertBefore)
1515 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1516 setOrdering(Ordering);
1517 setSyncScopeID(SSID);
1518}
1519
1520//===----------------------------------------------------------------------===//
1521// GetElementPtrInst Implementation
1522//===----------------------------------------------------------------------===//
1523
1524void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1525 const Twine &Name) {
1526 assert(getNumOperands() == 1 + IdxList.size() &&
1527 "NumOperands not initialized?");
1528 Op<0>() = Ptr;
1529 llvm::copy(Range&: IdxList, Out: op_begin() + 1);
1530 setName(Name);
1531}
1532
1533GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1534 AllocInfo AllocInfo)
1535 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1536 SourceElementType(GEPI.SourceElementType),
1537 ResultElementType(GEPI.ResultElementType) {
1538 assert(getNumOperands() == GEPI.getNumOperands() &&
1539 "Wrong number of operands allocated");
1540 std::copy(first: GEPI.op_begin(), last: GEPI.op_end(), result: op_begin());
1541 SubclassOptionalData = GEPI.SubclassOptionalData;
1542}
1543
1544Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1545 if (auto *Struct = dyn_cast<StructType>(Val: Ty)) {
1546 if (!Struct->indexValid(V: Idx))
1547 return nullptr;
1548 return Struct->getTypeAtIndex(V: Idx);
1549 }
1550 if (!Idx->getType()->isIntOrIntVectorTy())
1551 return nullptr;
1552 if (auto *Array = dyn_cast<ArrayType>(Val: Ty))
1553 return Array->getElementType();
1554 if (auto *Vector = dyn_cast<VectorType>(Val: Ty))
1555 return Vector->getElementType();
1556 return nullptr;
1557}
1558
1559Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1560 if (auto *Struct = dyn_cast<StructType>(Val: Ty)) {
1561 if (Idx >= Struct->getNumElements())
1562 return nullptr;
1563 return Struct->getElementType(N: Idx);
1564 }
1565 if (auto *Array = dyn_cast<ArrayType>(Val: Ty))
1566 return Array->getElementType();
1567 if (auto *Vector = dyn_cast<VectorType>(Val: Ty))
1568 return Vector->getElementType();
1569 return nullptr;
1570}
1571
1572template <typename IndexTy>
1573static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1574 if (IdxList.empty())
1575 return Ty;
1576 for (IndexTy V : IdxList.slice(1)) {
1577 Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1578 if (!Ty)
1579 return Ty;
1580 }
1581 return Ty;
1582}
1583
1584Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1585 return getIndexedTypeInternal(Ty, IdxList);
1586}
1587
1588Type *GetElementPtrInst::getIndexedType(Type *Ty,
1589 ArrayRef<Constant *> IdxList) {
1590 return getIndexedTypeInternal(Ty, IdxList);
1591}
1592
1593Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1594 return getIndexedTypeInternal(Ty, IdxList);
1595}
1596
1597/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1598/// zeros. If so, the result pointer and the first operand have the same
1599/// value, just potentially different types.
1600bool GetElementPtrInst::hasAllZeroIndices() const {
1601 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1602 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: getOperand(i_nocapture: i))) {
1603 if (!CI->isZero()) return false;
1604 } else {
1605 return false;
1606 }
1607 }
1608 return true;
1609}
1610
1611/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1612/// constant integers. If so, the result pointer and the first operand have
1613/// a constant offset between them.
1614bool GetElementPtrInst::hasAllConstantIndices() const {
1615 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1616 if (!isa<ConstantInt>(Val: getOperand(i_nocapture: i)))
1617 return false;
1618 }
1619 return true;
1620}
1621
1622void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {
1623 SubclassOptionalData = NW.getRaw();
1624}
1625
1626void GetElementPtrInst::setIsInBounds(bool B) {
1627 GEPNoWrapFlags NW = cast<GEPOperator>(Val: this)->getNoWrapFlags();
1628 if (B)
1629 NW |= GEPNoWrapFlags::inBounds();
1630 else
1631 NW = NW.withoutInBounds();
1632 setNoWrapFlags(NW);
1633}
1634
1635GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {
1636 return cast<GEPOperator>(Val: this)->getNoWrapFlags();
1637}
1638
1639bool GetElementPtrInst::isInBounds() const {
1640 return cast<GEPOperator>(Val: this)->isInBounds();
1641}
1642
1643bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {
1644 return cast<GEPOperator>(Val: this)->hasNoUnsignedSignedWrap();
1645}
1646
1647bool GetElementPtrInst::hasNoUnsignedWrap() const {
1648 return cast<GEPOperator>(Val: this)->hasNoUnsignedWrap();
1649}
1650
1651bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1652 APInt &Offset) const {
1653 // Delegate to the generic GEPOperator implementation.
1654 return cast<GEPOperator>(Val: this)->accumulateConstantOffset(DL, Offset);
1655}
1656
1657bool GetElementPtrInst::collectOffset(
1658 const DataLayout &DL, unsigned BitWidth,
1659 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1660 APInt &ConstantOffset) const {
1661 // Delegate to the generic GEPOperator implementation.
1662 return cast<GEPOperator>(Val: this)->collectOffset(DL, BitWidth, VariableOffsets,
1663 ConstantOffset);
1664}
1665
1666//===----------------------------------------------------------------------===//
1667// ExtractElementInst Implementation
1668//===----------------------------------------------------------------------===//
1669
1670ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1671 const Twine &Name,
1672 InsertPosition InsertBef)
1673 : Instruction(cast<VectorType>(Val: Val->getType())->getElementType(),
1674 ExtractElement, AllocMarker, InsertBef) {
1675 assert(isValidOperands(Val, Index) &&
1676 "Invalid extractelement instruction operands!");
1677 Op<0>() = Val;
1678 Op<1>() = Index;
1679 setName(Name);
1680}
1681
1682bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1683 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1684 return false;
1685 return true;
1686}
1687
1688//===----------------------------------------------------------------------===//
1689// InsertElementInst Implementation
1690//===----------------------------------------------------------------------===//
1691
1692InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1693 const Twine &Name,
1694 InsertPosition InsertBef)
1695 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1696 assert(isValidOperands(Vec, Elt, Index) &&
1697 "Invalid insertelement instruction operands!");
1698 Op<0>() = Vec;
1699 Op<1>() = Elt;
1700 Op<2>() = Index;
1701 setName(Name);
1702}
1703
1704bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1705 const Value *Index) {
1706 if (!Vec->getType()->isVectorTy())
1707 return false; // First operand of insertelement must be vector type.
1708
1709 if (Elt->getType() != cast<VectorType>(Val: Vec->getType())->getElementType())
1710 return false;// Second operand of insertelement must be vector element type.
1711
1712 if (!Index->getType()->isIntegerTy())
1713 return false; // Third operand of insertelement must be i32.
1714 return true;
1715}
1716
1717//===----------------------------------------------------------------------===//
1718// ShuffleVectorInst Implementation
1719//===----------------------------------------------------------------------===//
1720
1721static Value *createPlaceholderForShuffleVector(Value *V) {
1722 assert(V && "Cannot create placeholder of nullptr V");
1723 return PoisonValue::get(T: V->getType());
1724}
1725
1726ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1727 InsertPosition InsertBefore)
1728 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
1729 InsertBefore) {}
1730
1731ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1732 const Twine &Name,
1733 InsertPosition InsertBefore)
1734 : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V: V1), Mask, Name,
1735 InsertBefore) {}
1736
1737ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1738 const Twine &Name,
1739 InsertPosition InsertBefore)
1740 : Instruction(
1741 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
1742 EC: cast<VectorType>(Val: Mask->getType())->getElementCount()),
1743 ShuffleVector, AllocMarker, InsertBefore) {
1744 assert(isValidOperands(V1, V2, Mask) &&
1745 "Invalid shuffle vector instruction operands!");
1746
1747 Op<0>() = V1;
1748 Op<1>() = V2;
1749 SmallVector<int, 16> MaskArr;
1750 getShuffleMask(Mask: cast<Constant>(Val: Mask), Result&: MaskArr);
1751 setShuffleMask(MaskArr);
1752 setName(Name);
1753}
1754
1755ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1756 const Twine &Name,
1757 InsertPosition InsertBefore)
1758 : Instruction(
1759 VectorType::get(ElementType: cast<VectorType>(Val: V1->getType())->getElementType(),
1760 NumElements: Mask.size(), Scalable: isa<ScalableVectorType>(Val: V1->getType())),
1761 ShuffleVector, AllocMarker, InsertBefore) {
1762 assert(isValidOperands(V1, V2, Mask) &&
1763 "Invalid shuffle vector instruction operands!");
1764 Op<0>() = V1;
1765 Op<1>() = V2;
1766 setShuffleMask(Mask);
1767 setName(Name);
1768}
1769
1770void ShuffleVectorInst::commute() {
1771 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
1772 int NumMaskElts = ShuffleMask.size();
1773 SmallVector<int, 16> NewMask(NumMaskElts);
1774 for (int i = 0; i != NumMaskElts; ++i) {
1775 int MaskElt = getMaskValue(Elt: i);
1776 if (MaskElt == PoisonMaskElem) {
1777 NewMask[i] = PoisonMaskElem;
1778 continue;
1779 }
1780 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1781 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1782 NewMask[i] = MaskElt;
1783 }
1784 setShuffleMask(NewMask);
1785 Op<0>().swap(RHS&: Op<1>());
1786}
1787
1788bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1789 ArrayRef<int> Mask) {
1790 // V1 and V2 must be vectors of the same type.
1791 if (!isa<VectorType>(Val: V1->getType()) || V1->getType() != V2->getType())
1792 return false;
1793
1794 // Make sure the mask elements make sense.
1795 int V1Size =
1796 cast<VectorType>(Val: V1->getType())->getElementCount().getKnownMinValue();
1797 for (int Elem : Mask)
1798 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1799 return false;
1800
1801 if (isa<ScalableVectorType>(Val: V1->getType()))
1802 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Range&: Mask))
1803 return false;
1804
1805 return true;
1806}
1807
1808bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1809 const Value *Mask) {
1810 // V1 and V2 must be vectors of the same type.
1811 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1812 return false;
1813
1814 // Mask must be vector of i32, and must be the same kind of vector as the
1815 // input vectors
1816 auto *MaskTy = dyn_cast<VectorType>(Val: Mask->getType());
1817 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(Bitwidth: 32) ||
1818 isa<ScalableVectorType>(Val: MaskTy) != isa<ScalableVectorType>(Val: V1->getType()))
1819 return false;
1820
1821 // Check to see if Mask is valid.
1822 if (isa<UndefValue>(Val: Mask) || isa<ConstantAggregateZero>(Val: Mask))
1823 return true;
1824
1825 // NOTE: Through vector ConstantInt we have the potential to support more
1826 // than just zero splat masks but that requires a LangRef change.
1827 if (isa<ScalableVectorType>(Val: MaskTy))
1828 return false;
1829
1830 unsigned V1Size = cast<FixedVectorType>(Val: V1->getType())->getNumElements();
1831
1832 if (const auto *CI = dyn_cast<ConstantInt>(Val: Mask))
1833 return !CI->uge(Num: V1Size * 2);
1834
1835 if (const auto *MV = dyn_cast<ConstantVector>(Val: Mask)) {
1836 for (Value *Op : MV->operands()) {
1837 if (auto *CI = dyn_cast<ConstantInt>(Val: Op)) {
1838 if (CI->uge(Num: V1Size*2))
1839 return false;
1840 } else if (!isa<UndefValue>(Val: Op)) {
1841 return false;
1842 }
1843 }
1844 return true;
1845 }
1846
1847 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Val: Mask)) {
1848 for (unsigned i = 0, e = cast<FixedVectorType>(Val: MaskTy)->getNumElements();
1849 i != e; ++i)
1850 if (CDS->getElementAsInteger(i) >= V1Size*2)
1851 return false;
1852 return true;
1853 }
1854
1855 return false;
1856}
1857
1858void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
1859 SmallVectorImpl<int> &Result) {
1860 ElementCount EC = cast<VectorType>(Val: Mask->getType())->getElementCount();
1861
1862 if (isa<ConstantAggregateZero>(Val: Mask) || isa<UndefValue>(Val: Mask)) {
1863 int MaskVal = isa<UndefValue>(Val: Mask) ? -1 : 0;
1864 Result.append(NumInputs: EC.getKnownMinValue(), Elt: MaskVal);
1865 return;
1866 }
1867
1868 assert(!EC.isScalable() &&
1869 "Scalable vector shuffle mask must be undef or zeroinitializer");
1870
1871 unsigned NumElts = EC.getFixedValue();
1872
1873 Result.reserve(N: NumElts);
1874
1875 if (auto *CDS = dyn_cast<ConstantDataSequential>(Val: Mask)) {
1876 for (unsigned i = 0; i != NumElts; ++i)
1877 Result.push_back(Elt: CDS->getElementAsInteger(i));
1878 return;
1879 }
1880 for (unsigned i = 0; i != NumElts; ++i) {
1881 Constant *C = Mask->getAggregateElement(Elt: i);
1882 Result.push_back(Elt: isa<UndefValue>(Val: C) ? -1 :
1883 cast<ConstantInt>(Val: C)->getZExtValue());
1884 }
1885}
1886
1887void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
1888 ShuffleMask.assign(in_start: Mask.begin(), in_end: Mask.end());
1889 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, ResultTy: getType());
1890}
1891
1892Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1893 Type *ResultTy) {
1894 Type *Int32Ty = Type::getInt32Ty(C&: ResultTy->getContext());
1895 if (isa<ScalableVectorType>(Val: ResultTy)) {
1896 assert(all_equal(Mask) && "Unexpected shuffle");
1897 Type *VecTy = VectorType::get(ElementType: Int32Ty, NumElements: Mask.size(), Scalable: true);
1898 if (Mask[0] == 0)
1899 return Constant::getNullValue(Ty: VecTy);
1900 return PoisonValue::get(T: VecTy);
1901 }
1902 SmallVector<Constant *, 16> MaskConst;
1903 for (int Elem : Mask) {
1904 if (Elem == PoisonMaskElem)
1905 MaskConst.push_back(Elt: PoisonValue::get(T: Int32Ty));
1906 else
1907 MaskConst.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Elem));
1908 }
1909 return ConstantVector::get(V: MaskConst);
1910}
1911
1912static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1913 assert(!Mask.empty() && "Shuffle mask must contain elements");
1914 bool UsesLHS = false;
1915 bool UsesRHS = false;
1916 for (int I : Mask) {
1917 if (I == -1)
1918 continue;
1919 assert(I >= 0 && I < (NumOpElts * 2) &&
1920 "Out-of-bounds shuffle mask element");
1921 UsesLHS |= (I < NumOpElts);
1922 UsesRHS |= (I >= NumOpElts);
1923 if (UsesLHS && UsesRHS)
1924 return false;
1925 }
1926 // Allow for degenerate case: completely undef mask means neither source is used.
1927 return UsesLHS || UsesRHS;
1928}
1929
1930bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
1931 // We don't have vector operand size information, so assume operands are the
1932 // same size as the mask.
1933 return isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts);
1934}
1935
1936static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1937 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1938 return false;
1939 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1940 if (Mask[i] == -1)
1941 continue;
1942 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1943 return false;
1944 }
1945 return true;
1946}
1947
1948bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
1949 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1950 return false;
1951 // We don't have vector operand size information, so assume operands are the
1952 // same size as the mask.
1953 return isIdentityMaskImpl(Mask, NumOpElts: NumSrcElts);
1954}
1955
1956bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
1957 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1958 return false;
1959 if (!isSingleSourceMask(Mask, NumSrcElts))
1960 return false;
1961
1962 // The number of elements in the mask must be at least 2.
1963 if (NumSrcElts < 2)
1964 return false;
1965
1966 for (int I = 0, E = Mask.size(); I < E; ++I) {
1967 if (Mask[I] == -1)
1968 continue;
1969 if (Mask[I] != (NumSrcElts - 1 - I) &&
1970 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1971 return false;
1972 }
1973 return true;
1974}
1975
1976bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
1977 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1978 return false;
1979 if (!isSingleSourceMask(Mask, NumSrcElts))
1980 return false;
1981 for (int I = 0, E = Mask.size(); I < E; ++I) {
1982 if (Mask[I] == -1)
1983 continue;
1984 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1985 return false;
1986 }
1987 return true;
1988}
1989
1990bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
1991 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1992 return false;
1993 // Select is differentiated from identity. It requires using both sources.
1994 if (isSingleSourceMask(Mask, NumSrcElts))
1995 return false;
1996 for (int I = 0, E = Mask.size(); I < E; ++I) {
1997 if (Mask[I] == -1)
1998 continue;
1999 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2000 return false;
2001 }
2002 return true;
2003}
2004
2005bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
2006 // Example masks that will return true:
2007 // v1 = <a, b, c, d>
2008 // v2 = <e, f, g, h>
2009 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2010 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2011
2012 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2013 return false;
2014 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2015 int Sz = Mask.size();
2016 if (Sz < 2 || !isPowerOf2_32(Value: Sz))
2017 return false;
2018
2019 // 2. The first element of the mask must be either a 0 or a 1.
2020 if (Mask[0] != 0 && Mask[0] != 1)
2021 return false;
2022
2023 // 3. The difference between the first 2 elements must be equal to the
2024 // number of elements in the mask.
2025 if ((Mask[1] - Mask[0]) != NumSrcElts)
2026 return false;
2027
2028 // 4. The difference between consecutive even-numbered and odd-numbered
2029 // elements must be equal to 2.
2030 for (int I = 2; I < Sz; ++I) {
2031 int MaskEltVal = Mask[I];
2032 if (MaskEltVal == -1)
2033 return false;
2034 int MaskEltPrevVal = Mask[I - 2];
2035 if (MaskEltVal - MaskEltPrevVal != 2)
2036 return false;
2037 }
2038 return true;
2039}
2040
2041bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2042 int &Index) {
2043 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2044 return false;
2045 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2046 int StartIndex = -1;
2047 for (int I = 0, E = Mask.size(); I != E; ++I) {
2048 int MaskEltVal = Mask[I];
2049 if (MaskEltVal == -1)
2050 continue;
2051
2052 if (StartIndex == -1) {
2053 // Don't support a StartIndex that begins in the second input, or if the
2054 // first non-undef index would access below the StartIndex.
2055 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2056 return false;
2057
2058 StartIndex = MaskEltVal - I;
2059 continue;
2060 }
2061
2062 // Splice is sequential starting from StartIndex.
2063 if (MaskEltVal != (StartIndex + I))
2064 return false;
2065 }
2066
2067 if (StartIndex == -1)
2068 return false;
2069
2070 // NOTE: This accepts StartIndex == 0 (COPY).
2071 Index = StartIndex;
2072 return true;
2073}
2074
2075bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2076 int NumSrcElts, int &Index) {
2077 // Must extract from a single source.
2078 if (!isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts))
2079 return false;
2080
2081 // Must be smaller (else this is an Identity shuffle).
2082 if (NumSrcElts <= (int)Mask.size())
2083 return false;
2084
2085 // Find start of extraction, accounting that we may start with an UNDEF.
2086 int SubIndex = -1;
2087 for (int i = 0, e = Mask.size(); i != e; ++i) {
2088 int M = Mask[i];
2089 if (M < 0)
2090 continue;
2091 int Offset = (M % NumSrcElts) - i;
2092 if (0 <= SubIndex && SubIndex != Offset)
2093 return false;
2094 SubIndex = Offset;
2095 }
2096
2097 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2098 Index = SubIndex;
2099 return true;
2100 }
2101 return false;
2102}
2103
2104bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2105 int NumSrcElts, int &NumSubElts,
2106 int &Index) {
2107 int NumMaskElts = Mask.size();
2108
2109 // Don't try to match if we're shuffling to a smaller size.
2110 if (NumMaskElts < NumSrcElts)
2111 return false;
2112
2113 // TODO: We don't recognize self-insertion/widening.
2114 if (isSingleSourceMaskImpl(Mask, NumOpElts: NumSrcElts))
2115 return false;
2116
2117 // Determine which mask elements are attributed to which source.
2118 APInt UndefElts = APInt::getZero(numBits: NumMaskElts);
2119 APInt Src0Elts = APInt::getZero(numBits: NumMaskElts);
2120 APInt Src1Elts = APInt::getZero(numBits: NumMaskElts);
2121 bool Src0Identity = true;
2122 bool Src1Identity = true;
2123
2124 for (int i = 0; i != NumMaskElts; ++i) {
2125 int M = Mask[i];
2126 if (M < 0) {
2127 UndefElts.setBit(i);
2128 continue;
2129 }
2130 if (M < NumSrcElts) {
2131 Src0Elts.setBit(i);
2132 Src0Identity &= (M == i);
2133 continue;
2134 }
2135 Src1Elts.setBit(i);
2136 Src1Identity &= (M == (i + NumSrcElts));
2137 }
2138 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2139 "unknown shuffle elements");
2140 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2141 "2-source shuffle not found");
2142
2143 // Determine lo/hi span ranges.
2144 // TODO: How should we handle undefs at the start of subvector insertions?
2145 int Src0Lo = Src0Elts.countr_zero();
2146 int Src1Lo = Src1Elts.countr_zero();
2147 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2148 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2149
2150 // If src0 is in place, see if the src1 elements is inplace within its own
2151 // span.
2152 if (Src0Identity) {
2153 int NumSub1Elts = Src1Hi - Src1Lo;
2154 ArrayRef<int> Sub1Mask = Mask.slice(N: Src1Lo, M: NumSub1Elts);
2155 if (isIdentityMaskImpl(Mask: Sub1Mask, NumOpElts: NumSrcElts)) {
2156 NumSubElts = NumSub1Elts;
2157 Index = Src1Lo;
2158 return true;
2159 }
2160 }
2161
2162 // If src1 is in place, see if the src0 elements is inplace within its own
2163 // span.
2164 if (Src1Identity) {
2165 int NumSub0Elts = Src0Hi - Src0Lo;
2166 ArrayRef<int> Sub0Mask = Mask.slice(N: Src0Lo, M: NumSub0Elts);
2167 if (isIdentityMaskImpl(Mask: Sub0Mask, NumOpElts: NumSrcElts)) {
2168 NumSubElts = NumSub0Elts;
2169 Index = Src0Lo;
2170 return true;
2171 }
2172 }
2173
2174 return false;
2175}
2176
2177bool ShuffleVectorInst::isIdentityWithPadding() const {
2178 // FIXME: Not currently possible to express a shuffle mask for a scalable
2179 // vector for this case.
2180 if (isa<ScalableVectorType>(Val: getType()))
2181 return false;
2182
2183 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2184 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2185 if (NumMaskElts <= NumOpElts)
2186 return false;
2187
2188 // The first part of the mask must choose elements from exactly 1 source op.
2189 ArrayRef<int> Mask = getShuffleMask();
2190 if (!isIdentityMaskImpl(Mask, NumOpElts))
2191 return false;
2192
2193 // All extending must be with undef elements.
2194 for (int i = NumOpElts; i < NumMaskElts; ++i)
2195 if (Mask[i] != -1)
2196 return false;
2197
2198 return true;
2199}
2200
2201bool ShuffleVectorInst::isIdentityWithExtract() const {
2202 // FIXME: Not currently possible to express a shuffle mask for a scalable
2203 // vector for this case.
2204 if (isa<ScalableVectorType>(Val: getType()))
2205 return false;
2206
2207 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2208 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2209 if (NumMaskElts >= NumOpElts)
2210 return false;
2211
2212 return isIdentityMaskImpl(Mask: getShuffleMask(), NumOpElts);
2213}
2214
2215bool ShuffleVectorInst::isConcat() const {
2216 // Vector concatenation is differentiated from identity with padding.
2217 if (isa<UndefValue>(Val: Op<0>()) || isa<UndefValue>(Val: Op<1>()))
2218 return false;
2219
2220 // FIXME: Not currently possible to express a shuffle mask for a scalable
2221 // vector for this case.
2222 if (isa<ScalableVectorType>(Val: getType()))
2223 return false;
2224
2225 int NumOpElts = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2226 int NumMaskElts = cast<FixedVectorType>(Val: getType())->getNumElements();
2227 if (NumMaskElts != NumOpElts * 2)
2228 return false;
2229
2230 // Use the mask length rather than the operands' vector lengths here. We
2231 // already know that the shuffle returns a vector twice as long as the inputs,
2232 // and neither of the inputs are undef vectors. If the mask picks consecutive
2233 // elements from both inputs, then this is a concatenation of the inputs.
2234 return isIdentityMaskImpl(Mask: getShuffleMask(), NumOpElts: NumMaskElts);
2235}
2236
2237static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2238 int ReplicationFactor, int VF) {
2239 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2240 "Unexpected mask size.");
2241
2242 for (int CurrElt : seq(Size: VF)) {
2243 ArrayRef<int> CurrSubMask = Mask.take_front(N: ReplicationFactor);
2244 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2245 "Run out of mask?");
2246 Mask = Mask.drop_front(N: ReplicationFactor);
2247 if (!all_of(Range&: CurrSubMask, P: [CurrElt](int MaskElt) {
2248 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2249 }))
2250 return false;
2251 }
2252 assert(Mask.empty() && "Did not consume the whole mask?");
2253
2254 return true;
2255}
2256
2257bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2258 int &ReplicationFactor, int &VF) {
2259 // undef-less case is trivial.
2260 if (!llvm::is_contained(Range&: Mask, Element: PoisonMaskElem)) {
2261 ReplicationFactor =
2262 Mask.take_while(Pred: [](int MaskElt) { return MaskElt == 0; }).size();
2263 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2264 return false;
2265 VF = Mask.size() / ReplicationFactor;
2266 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2267 }
2268
2269 // However, if the mask contains undef's, we have to enumerate possible tuples
2270 // and pick one. There are bounds on replication factor: [1, mask size]
2271 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2272 // Additionally, mask size is a replication factor multiplied by vector size,
2273 // which further significantly reduces the search space.
2274
2275 // Before doing that, let's perform basic correctness checking first.
2276 int Largest = -1;
2277 for (int MaskElt : Mask) {
2278 if (MaskElt == PoisonMaskElem)
2279 continue;
2280 // Elements must be in non-decreasing order.
2281 if (MaskElt < Largest)
2282 return false;
2283 Largest = std::max(a: Largest, b: MaskElt);
2284 }
2285
2286 // Prefer larger replication factor if all else equal.
2287 for (int PossibleReplicationFactor :
2288 reverse(C: seq_inclusive<unsigned>(Begin: 1, End: Mask.size()))) {
2289 if (Mask.size() % PossibleReplicationFactor != 0)
2290 continue;
2291 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2292 if (!isReplicationMaskWithParams(Mask, ReplicationFactor: PossibleReplicationFactor,
2293 VF: PossibleVF))
2294 continue;
2295 ReplicationFactor = PossibleReplicationFactor;
2296 VF = PossibleVF;
2297 return true;
2298 }
2299
2300 return false;
2301}
2302
2303bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2304 int &VF) const {
2305 // Not possible to express a shuffle mask for a scalable vector for this
2306 // case.
2307 if (isa<ScalableVectorType>(Val: getType()))
2308 return false;
2309
2310 VF = cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2311 if (ShuffleMask.size() % VF != 0)
2312 return false;
2313 ReplicationFactor = ShuffleMask.size() / VF;
2314
2315 return isReplicationMaskWithParams(Mask: ShuffleMask, ReplicationFactor, VF);
2316}
2317
2318bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2319 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2320 Mask.size() % VF != 0)
2321 return false;
2322 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2323 ArrayRef<int> SubMask = Mask.slice(N: K, M: VF);
2324 if (all_of(Range&: SubMask, P: equal_to(Arg: PoisonMaskElem)))
2325 continue;
2326 SmallBitVector Used(VF, false);
2327 for (int Idx : SubMask) {
2328 if (Idx != PoisonMaskElem && Idx < VF)
2329 Used.set(Idx);
2330 }
2331 if (!Used.all())
2332 return false;
2333 }
2334 return true;
2335}
2336
2337/// Return true if this shuffle mask is a replication mask.
2338bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2339 // Not possible to express a shuffle mask for a scalable vector for this
2340 // case.
2341 if (isa<ScalableVectorType>(Val: getType()))
2342 return false;
2343 if (!isSingleSourceMask(Mask: ShuffleMask, NumSrcElts: VF))
2344 return false;
2345
2346 return isOneUseSingleSourceMask(Mask: ShuffleMask, VF);
2347}
2348
2349bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2350 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(Val: getOperand(i_nocapture: 0)->getType());
2351 // shuffle_vector can only interleave fixed length vectors - for scalable
2352 // vectors, see the @llvm.vector.interleave2 intrinsic
2353 if (!OpTy)
2354 return false;
2355 unsigned OpNumElts = OpTy->getNumElements();
2356
2357 return isInterleaveMask(Mask: ShuffleMask, Factor, NumInputElts: OpNumElts * 2);
2358}
2359
2360bool ShuffleVectorInst::isInterleaveMask(
2361 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2362 SmallVectorImpl<unsigned> &StartIndexes) {
2363 unsigned NumElts = Mask.size();
2364 if (NumElts % Factor)
2365 return false;
2366
2367 unsigned LaneLen = NumElts / Factor;
2368 if (!isPowerOf2_32(Value: LaneLen))
2369 return false;
2370
2371 StartIndexes.resize(N: Factor);
2372
2373 // Check whether each element matches the general interleaved rule.
2374 // Ignore undef elements, as long as the defined elements match the rule.
2375 // Outer loop processes all factors (x, y, z in the above example)
2376 unsigned I = 0, J;
2377 for (; I < Factor; I++) {
2378 unsigned SavedLaneValue;
2379 unsigned SavedNoUndefs = 0;
2380
2381 // Inner loop processes consecutive accesses (x, x+1... in the example)
2382 for (J = 0; J < LaneLen - 1; J++) {
2383 // Lane computes x's position in the Mask
2384 unsigned Lane = J * Factor + I;
2385 unsigned NextLane = Lane + Factor;
2386 int LaneValue = Mask[Lane];
2387 int NextLaneValue = Mask[NextLane];
2388
2389 // If both are defined, values must be sequential
2390 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2391 LaneValue + 1 != NextLaneValue)
2392 break;
2393
2394 // If the next value is undef, save the current one as reference
2395 if (LaneValue >= 0 && NextLaneValue < 0) {
2396 SavedLaneValue = LaneValue;
2397 SavedNoUndefs = 1;
2398 }
2399
2400 // Undefs are allowed, but defined elements must still be consecutive:
2401 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2402 // Verify this by storing the last non-undef followed by an undef
2403 // Check that following non-undef masks are incremented with the
2404 // corresponding distance.
2405 if (SavedNoUndefs > 0 && LaneValue < 0) {
2406 SavedNoUndefs++;
2407 if (NextLaneValue >= 0 &&
2408 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2409 break;
2410 }
2411 }
2412
2413 if (J < LaneLen - 1)
2414 return false;
2415
2416 int StartMask = 0;
2417 if (Mask[I] >= 0) {
2418 // Check that the start of the I range (J=0) is greater than 0
2419 StartMask = Mask[I];
2420 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2421 // StartMask defined by the last value in lane
2422 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2423 } else if (SavedNoUndefs > 0) {
2424 // StartMask defined by some non-zero value in the j loop
2425 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2426 }
2427 // else StartMask remains set to 0, i.e. all elements are undefs
2428
2429 if (StartMask < 0)
2430 return false;
2431 // We must stay within the vectors; This case can happen with undefs.
2432 if (StartMask + LaneLen > NumInputElts)
2433 return false;
2434
2435 StartIndexes[I] = StartMask;
2436 }
2437
2438 return true;
2439}
2440
2441/// Check if the mask is a DE-interleave mask of the given factor
2442/// \p Factor like:
2443/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2444bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2445 unsigned Factor,
2446 unsigned &Index) {
2447 // Check all potential start indices from 0 to (Factor - 1).
2448 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2449 unsigned I = 0;
2450
2451 // Check that elements are in ascending order by Factor. Ignore undef
2452 // elements.
2453 for (; I < Mask.size(); I++)
2454 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2455 break;
2456
2457 if (I == Mask.size()) {
2458 Index = Idx;
2459 return true;
2460 }
2461 }
2462
2463 return false;
2464}
2465
2466/// Try to lower a vector shuffle as a bit rotation.
2467///
2468/// Look for a repeated rotation pattern in each sub group.
2469/// Returns an element-wise left bit rotation amount or -1 if failed.
2470static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2471 int NumElts = Mask.size();
2472 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2473
2474 int RotateAmt = -1;
2475 for (int i = 0; i != NumElts; i += NumSubElts) {
2476 for (int j = 0; j != NumSubElts; ++j) {
2477 int M = Mask[i + j];
2478 if (M < 0)
2479 continue;
2480 if (M < i || M >= i + NumSubElts)
2481 return -1;
2482 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2483 if (0 <= RotateAmt && Offset != RotateAmt)
2484 return -1;
2485 RotateAmt = Offset;
2486 }
2487 }
2488 return RotateAmt;
2489}
2490
2491bool ShuffleVectorInst::isBitRotateMask(
2492 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2493 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2494 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2495 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2496 if (EltRotateAmt < 0)
2497 continue;
2498 RotateAmt = EltRotateAmt * EltSizeInBits;
2499 return true;
2500 }
2501
2502 return false;
2503}
2504
2505//===----------------------------------------------------------------------===//
2506// InsertValueInst Class
2507//===----------------------------------------------------------------------===//
2508
2509void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2510 const Twine &Name) {
2511 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2512
2513 // There's no fundamental reason why we require at least one index
2514 // (other than weirdness with &*IdxBegin being invalid; see
2515 // getelementptr's init routine for example). But there's no
2516 // present need to support it.
2517 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2518
2519 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2520 Val->getType() && "Inserted value must match indexed type!");
2521 Op<0>() = Agg;
2522 Op<1>() = Val;
2523
2524 Indices.append(in_start: Idxs.begin(), in_end: Idxs.end());
2525 setName(Name);
2526}
2527
2528InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2529 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2530 Indices(IVI.Indices) {
2531 Op<0>() = IVI.getOperand(i_nocapture: 0);
2532 Op<1>() = IVI.getOperand(i_nocapture: 1);
2533 SubclassOptionalData = IVI.SubclassOptionalData;
2534}
2535
2536//===----------------------------------------------------------------------===//
2537// ExtractValueInst Class
2538//===----------------------------------------------------------------------===//
2539
2540void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2541 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2542
2543 // There's no fundamental reason why we require at least one index.
2544 // But there's no present need to support it.
2545 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2546
2547 Indices.append(in_start: Idxs.begin(), in_end: Idxs.end());
2548 setName(Name);
2549}
2550
2551ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2552 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(i_nocapture: 0),
2553 (BasicBlock *)nullptr),
2554 Indices(EVI.Indices) {
2555 SubclassOptionalData = EVI.SubclassOptionalData;
2556}
2557
2558// getIndexedType - Returns the type of the element that would be extracted
2559// with an extractvalue instruction with the specified parameters.
2560//
2561// A null type is returned if the indices are invalid for the specified
2562// pointer type.
2563//
2564Type *ExtractValueInst::getIndexedType(Type *Agg,
2565 ArrayRef<unsigned> Idxs) {
2566 for (unsigned Index : Idxs) {
2567 // We can't use CompositeType::indexValid(Index) here.
2568 // indexValid() always returns true for arrays because getelementptr allows
2569 // out-of-bounds indices. Since we don't allow those for extractvalue and
2570 // insertvalue we need to check array indexing manually.
2571 // Since the only other types we can index into are struct types it's just
2572 // as easy to check those manually as well.
2573 if (ArrayType *AT = dyn_cast<ArrayType>(Val: Agg)) {
2574 if (Index >= AT->getNumElements())
2575 return nullptr;
2576 Agg = AT->getElementType();
2577 } else if (StructType *ST = dyn_cast<StructType>(Val: Agg)) {
2578 if (Index >= ST->getNumElements())
2579 return nullptr;
2580 Agg = ST->getElementType(N: Index);
2581 } else {
2582 // Not a valid type to index into.
2583 return nullptr;
2584 }
2585 }
2586 return Agg;
2587}
2588
2589//===----------------------------------------------------------------------===//
2590// UnaryOperator Class
2591//===----------------------------------------------------------------------===//
2592
2593UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
2594 const Twine &Name, InsertPosition InsertBefore)
2595 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2596 Op<0>() = S;
2597 setName(Name);
2598 AssertOK();
2599}
2600
2601UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,
2602 InsertPosition InsertBefore) {
2603 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2604}
2605
2606void UnaryOperator::AssertOK() {
2607 Value *LHS = getOperand(i_nocapture: 0);
2608 (void)LHS; // Silence warnings.
2609#ifndef NDEBUG
2610 switch (getOpcode()) {
2611 case FNeg:
2612 assert(getType() == LHS->getType() &&
2613 "Unary operation should return same type as operand!");
2614 assert(getType()->isFPOrFPVectorTy() &&
2615 "Tried to create a floating-point operation on a "
2616 "non-floating-point type!");
2617 break;
2618 default: llvm_unreachable("Invalid opcode provided");
2619 }
2620#endif
2621}
2622
2623//===----------------------------------------------------------------------===//
2624// BinaryOperator Class
2625//===----------------------------------------------------------------------===//
2626
2627BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
2628 const Twine &Name, InsertPosition InsertBefore)
2629 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2630 Op<0>() = S1;
2631 Op<1>() = S2;
2632 setName(Name);
2633 AssertOK();
2634}
2635
2636void BinaryOperator::AssertOK() {
2637 Value *LHS = getOperand(i_nocapture: 0), *RHS = getOperand(i_nocapture: 1);
2638 (void)LHS; (void)RHS; // Silence warnings.
2639 assert(LHS->getType() == RHS->getType() &&
2640 "Binary operator operand types must match!");
2641#ifndef NDEBUG
2642 switch (getOpcode()) {
2643 case Add: case Sub:
2644 case Mul:
2645 assert(getType() == LHS->getType() &&
2646 "Arithmetic operation should return same type as operands!");
2647 assert(getType()->isIntOrIntVectorTy() &&
2648 "Tried to create an integer operation on a non-integer type!");
2649 break;
2650 case FAdd: case FSub:
2651 case FMul:
2652 assert(getType() == LHS->getType() &&
2653 "Arithmetic operation should return same type as operands!");
2654 assert(getType()->isFPOrFPVectorTy() &&
2655 "Tried to create a floating-point operation on a "
2656 "non-floating-point type!");
2657 break;
2658 case UDiv:
2659 case SDiv:
2660 assert(getType() == LHS->getType() &&
2661 "Arithmetic operation should return same type as operands!");
2662 assert(getType()->isIntOrIntVectorTy() &&
2663 "Incorrect operand type (not integer) for S/UDIV");
2664 break;
2665 case FDiv:
2666 assert(getType() == LHS->getType() &&
2667 "Arithmetic operation should return same type as operands!");
2668 assert(getType()->isFPOrFPVectorTy() &&
2669 "Incorrect operand type (not floating point) for FDIV");
2670 break;
2671 case URem:
2672 case SRem:
2673 assert(getType() == LHS->getType() &&
2674 "Arithmetic operation should return same type as operands!");
2675 assert(getType()->isIntOrIntVectorTy() &&
2676 "Incorrect operand type (not integer) for S/UREM");
2677 break;
2678 case FRem:
2679 assert(getType() == LHS->getType() &&
2680 "Arithmetic operation should return same type as operands!");
2681 assert(getType()->isFPOrFPVectorTy() &&
2682 "Incorrect operand type (not floating point) for FREM");
2683 break;
2684 case Shl:
2685 case LShr:
2686 case AShr:
2687 assert(getType() == LHS->getType() &&
2688 "Shift operation should return same type as operands!");
2689 assert(getType()->isIntOrIntVectorTy() &&
2690 "Tried to create a shift operation on a non-integral type!");
2691 break;
2692 case And: case Or:
2693 case Xor:
2694 assert(getType() == LHS->getType() &&
2695 "Logical operation should return same type as operands!");
2696 assert(getType()->isIntOrIntVectorTy() &&
2697 "Tried to create a logical operation on a non-integral type!");
2698 break;
2699 default: llvm_unreachable("Invalid opcode provided");
2700 }
2701#endif
2702}
2703
2704BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2705 const Twine &Name,
2706 InsertPosition InsertBefore) {
2707 assert(S1->getType() == S2->getType() &&
2708 "Cannot create binary operator with two operands of differing type!");
2709 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2710}
2711
2712BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2713 InsertPosition InsertBefore) {
2714 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
2715 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2716 InsertBefore);
2717}
2718
2719BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2720 InsertPosition InsertBefore) {
2721 Value *Zero = ConstantInt::get(Ty: Op->getType(), V: 0);
2722 return BinaryOperator::CreateNSWSub(V1: Zero, V2: Op, Name, InsertBefore);
2723}
2724
2725BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2726 InsertPosition InsertBefore) {
2727 Constant *C = Constant::getAllOnesValue(Ty: Op->getType());
2728 return new BinaryOperator(Instruction::Xor, Op, C,
2729 Op->getType(), Name, InsertBefore);
2730}
2731
2732// Exchange the two operands to this instruction. This instruction is safe to
2733// use on any binary instruction and does not modify the semantics of the
2734// instruction.
2735bool BinaryOperator::swapOperands() {
2736 if (!isCommutative())
2737 return true; // Can't commute operands
2738 Op<0>().swap(RHS&: Op<1>());
2739 return false;
2740}
2741
2742//===----------------------------------------------------------------------===//
2743// FPMathOperator Class
2744//===----------------------------------------------------------------------===//
2745
2746float FPMathOperator::getFPAccuracy() const {
2747 const MDNode *MD =
2748 cast<Instruction>(Val: this)->getMetadata(KindID: LLVMContext::MD_fpmath);
2749 if (!MD)
2750 return 0.0;
2751 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD: MD->getOperand(I: 0));
2752 return Accuracy->getValueAPF().convertToFloat();
2753}
2754
2755//===----------------------------------------------------------------------===//
2756// CastInst Class
2757//===----------------------------------------------------------------------===//
2758
2759// Just determine if this cast only deals with integral->integral conversion.
2760bool CastInst::isIntegerCast() const {
2761 switch (getOpcode()) {
2762 default: return false;
2763 case Instruction::ZExt:
2764 case Instruction::SExt:
2765 case Instruction::Trunc:
2766 return true;
2767 case Instruction::BitCast:
2768 return getOperand(i_nocapture: 0)->getType()->isIntegerTy() &&
2769 getType()->isIntegerTy();
2770 }
2771}
2772
2773/// This function determines if the CastInst does not require any bits to be
2774/// changed in order to effect the cast. Essentially, it identifies cases where
2775/// no code gen is necessary for the cast, hence the name no-op cast. For
2776/// example, the following are all no-op casts:
2777/// # bitcast i32* %x to i8*
2778/// # bitcast <2 x i32> %x to <4 x i16>
2779/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2780/// Determine if the described cast is a no-op.
2781bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2782 Type *SrcTy,
2783 Type *DestTy,
2784 const DataLayout &DL) {
2785 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2786 switch (Opcode) {
2787 default: llvm_unreachable("Invalid CastOp");
2788 case Instruction::Trunc:
2789 case Instruction::ZExt:
2790 case Instruction::SExt:
2791 case Instruction::FPTrunc:
2792 case Instruction::FPExt:
2793 case Instruction::UIToFP:
2794 case Instruction::SIToFP:
2795 case Instruction::FPToUI:
2796 case Instruction::FPToSI:
2797 case Instruction::AddrSpaceCast:
2798 // TODO: Target informations may give a more accurate answer here.
2799 return false;
2800 case Instruction::BitCast:
2801 return true; // BitCast never modifies bits.
2802 case Instruction::PtrToAddr:
2803 case Instruction::PtrToInt:
2804 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2805 DestTy->getScalarSizeInBits();
2806 case Instruction::IntToPtr:
2807 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2808 SrcTy->getScalarSizeInBits();
2809 }
2810}
2811
2812bool CastInst::isNoopCast(const DataLayout &DL) const {
2813 return isNoopCast(Opcode: getOpcode(), SrcTy: getOperand(i_nocapture: 0)->getType(), DestTy: getType(), DL);
2814}
2815
2816/// This function determines if a pair of casts can be eliminated and what
2817/// opcode should be used in the elimination. This assumes that there are two
2818/// instructions like this:
2819/// * %F = firstOpcode SrcTy %x to MidTy
2820/// * %S = secondOpcode MidTy %F to DstTy
2821/// The function returns a resultOpcode so these two casts can be replaced with:
2822/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2823/// If no such cast is permitted, the function returns 0.
2824unsigned CastInst::isEliminableCastPair(Instruction::CastOps firstOp,
2825 Instruction::CastOps secondOp,
2826 Type *SrcTy, Type *MidTy, Type *DstTy,
2827 const DataLayout *DL) {
2828 // Define the 144 possibilities for these two cast instructions. The values
2829 // in this matrix determine what to do in a given situation and select the
2830 // case in the switch below. The rows correspond to firstOp, the columns
2831 // correspond to secondOp. In looking at the table below, keep in mind
2832 // the following cast properties:
2833 //
2834 // Size Compare Source Destination
2835 // Operator Src ? Size Type Sign Type Sign
2836 // -------- ------------ ------------------- ---------------------
2837 // TRUNC > Integer Any Integral Any
2838 // ZEXT < Integral Unsigned Integer Any
2839 // SEXT < Integral Signed Integer Any
2840 // FPTOUI n/a FloatPt n/a Integral Unsigned
2841 // FPTOSI n/a FloatPt n/a Integral Signed
2842 // UITOFP n/a Integral Unsigned FloatPt n/a
2843 // SITOFP n/a Integral Signed FloatPt n/a
2844 // FPTRUNC > FloatPt n/a FloatPt n/a
2845 // FPEXT < FloatPt n/a FloatPt n/a
2846 // PTRTOINT n/a Pointer n/a Integral Unsigned
2847 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2848 // INTTOPTR n/a Integral Unsigned Pointer n/a
2849 // BITCAST = FirstClass n/a FirstClass n/a
2850 // ADDRSPCST n/a Pointer n/a Pointer n/a
2851 //
2852 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2853 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2854 // into "fptoui double to i64", but this loses information about the range
2855 // of the produced value (we no longer know the top-part is all zeros).
2856 // Further this conversion is often much more expensive for typical hardware,
2857 // and causes issues when building libgcc. We disallow fptosi+sext for the
2858 // same reason.
2859 const unsigned numCastOps =
2860 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2861 // clang-format off
2862 static const uint8_t CastResults[numCastOps][numCastOps] = {
2863 // T F F U S F F P P I B A -+
2864 // R Z S P P I I T P 2 2 N T S |
2865 // U E E 2 2 2 2 R E I A T C C +- secondOp
2866 // N X X U S F F N X N D 2 V V |
2867 // C T T I I P P C T T R P T T -+
2868 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2869 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2870 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2871 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2872 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2873 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2874 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2875 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2876 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2877 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2878 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2879 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2880 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2881 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2882 };
2883 // clang-format on
2884
2885 // TODO: This logic could be encoded into the table above and handled in the
2886 // switch below.
2887 // If either of the casts are a bitcast from scalar to vector, disallow the
2888 // merging. However, any pair of bitcasts are allowed.
2889 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2890 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2891 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2892
2893 // Check if any of the casts convert scalars <-> vectors.
2894 if ((IsFirstBitcast && isa<VectorType>(Val: SrcTy) != isa<VectorType>(Val: MidTy)) ||
2895 (IsSecondBitcast && isa<VectorType>(Val: MidTy) != isa<VectorType>(Val: DstTy)))
2896 if (!AreBothBitcasts)
2897 return 0;
2898
2899 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2900 [secondOp-Instruction::CastOpsBegin];
2901 switch (ElimCase) {
2902 case 0:
2903 // Categorically disallowed.
2904 return 0;
2905 case 1:
2906 // Allowed, use first cast's opcode.
2907 return firstOp;
2908 case 2:
2909 // Allowed, use second cast's opcode.
2910 return secondOp;
2911 case 3:
2912 // No-op cast in second op implies firstOp as long as the DestTy
2913 // is integer and we are not converting between a vector and a
2914 // non-vector type.
2915 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2916 return firstOp;
2917 return 0;
2918 case 4:
2919 // No-op cast in second op implies firstOp as long as the DestTy
2920 // matches MidTy.
2921 if (DstTy == MidTy)
2922 return firstOp;
2923 return 0;
2924 case 5:
2925 // No-op cast in first op implies secondOp as long as the SrcTy
2926 // is an integer.
2927 if (SrcTy->isIntegerTy())
2928 return secondOp;
2929 return 0;
2930 case 7: {
2931 // Disable inttoptr/ptrtoint optimization if enabled.
2932 if (DisableI2pP2iOpt)
2933 return 0;
2934
2935 // Cannot simplify if address spaces are different!
2936 if (SrcTy != DstTy)
2937 return 0;
2938
2939 // Cannot simplify if the intermediate integer size is smaller than the
2940 // pointer size.
2941 unsigned MidSize = MidTy->getScalarSizeInBits();
2942 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2943 return 0;
2944
2945 return Instruction::BitCast;
2946 }
2947 case 8: {
2948 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2949 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2950 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2951 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2952 unsigned DstSize = DstTy->getScalarSizeInBits();
2953 if (SrcTy == DstTy)
2954 return Instruction::BitCast;
2955 if (SrcSize < DstSize)
2956 return firstOp;
2957 if (SrcSize > DstSize)
2958 return secondOp;
2959 return 0;
2960 }
2961 case 9:
2962 // zext, sext -> zext, because sext can't sign extend after zext
2963 return Instruction::ZExt;
2964 case 11: {
2965 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2966 if (!DL)
2967 return 0;
2968 unsigned MidSize = secondOp == Instruction::PtrToAddr
2969 ? DL->getAddressSizeInBits(Ty: MidTy)
2970 : DL->getPointerTypeSizeInBits(MidTy);
2971 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2972 unsigned DstSize = DstTy->getScalarSizeInBits();
2973 // If the middle size is smaller than both source and destination,
2974 // an additional masking operation would be required.
2975 if (MidSize < SrcSize && MidSize < DstSize)
2976 return 0;
2977 if (DstSize < SrcSize)
2978 return Instruction::Trunc;
2979 if (DstSize > SrcSize)
2980 return Instruction::ZExt;
2981 return Instruction::BitCast;
2982 }
2983 case 12:
2984 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2985 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2986 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2987 return Instruction::AddrSpaceCast;
2988 return Instruction::BitCast;
2989 case 13:
2990 // FIXME: this state can be merged with (1), but the following assert
2991 // is useful to check the correcteness of the sequence due to semantic
2992 // change of bitcast.
2993 assert(
2994 SrcTy->isPtrOrPtrVectorTy() &&
2995 MidTy->isPtrOrPtrVectorTy() &&
2996 DstTy->isPtrOrPtrVectorTy() &&
2997 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2998 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2999 "Illegal addrspacecast, bitcast sequence!");
3000 // Allowed, use first cast's opcode
3001 return firstOp;
3002 case 14:
3003 // bitcast, addrspacecast -> addrspacecast
3004 return Instruction::AddrSpaceCast;
3005 case 15:
3006 // FIXME: this state can be merged with (1), but the following assert
3007 // is useful to check the correcteness of the sequence due to semantic
3008 // change of bitcast.
3009 assert(
3010 SrcTy->isIntOrIntVectorTy() &&
3011 MidTy->isPtrOrPtrVectorTy() &&
3012 DstTy->isPtrOrPtrVectorTy() &&
3013 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3014 "Illegal inttoptr, bitcast sequence!");
3015 // Allowed, use first cast's opcode
3016 return firstOp;
3017 case 16:
3018 // FIXME: this state can be merged with (2), but the following assert
3019 // is useful to check the correcteness of the sequence due to semantic
3020 // change of bitcast.
3021 assert(
3022 SrcTy->isPtrOrPtrVectorTy() &&
3023 MidTy->isPtrOrPtrVectorTy() &&
3024 DstTy->isIntOrIntVectorTy() &&
3025 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3026 "Illegal bitcast, ptrtoint sequence!");
3027 // Allowed, use second cast's opcode
3028 return secondOp;
3029 case 17:
3030 // (sitofp (zext x)) -> (uitofp x)
3031 return Instruction::UIToFP;
3032 case 99:
3033 // Cast combination can't happen (error in input). This is for all cases
3034 // where the MidTy is not the same for the two cast instructions.
3035 llvm_unreachable("Invalid Cast Combination");
3036 default:
3037 llvm_unreachable("Error in CastResults table!!!");
3038 }
3039}
3040
3041CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3042 const Twine &Name, InsertPosition InsertBefore) {
3043 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3044 // Construct and return the appropriate CastInst subclass
3045 switch (op) {
3046 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3047 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3048 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3049 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3050 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3051 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3052 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3053 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3054 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3055 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3056 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3057 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3058 case BitCast:
3059 return new BitCastInst(S, Ty, Name, InsertBefore);
3060 case AddrSpaceCast:
3061 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3062 default:
3063 llvm_unreachable("Invalid opcode provided");
3064 }
3065}
3066
3067CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3068 InsertPosition InsertBefore) {
3069 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3070 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3071 return Create(op: Instruction::ZExt, S, Ty, Name, InsertBefore);
3072}
3073
3074CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3075 InsertPosition InsertBefore) {
3076 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3077 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3078 return Create(op: Instruction::SExt, S, Ty, Name, InsertBefore);
3079}
3080
3081CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,
3082 InsertPosition InsertBefore) {
3083 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3084 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3085 return Create(op: Instruction::Trunc, S, Ty, Name, InsertBefore);
3086}
3087
3088/// Create a BitCast or a PtrToInt cast instruction
3089CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3090 InsertPosition InsertBefore) {
3091 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3092 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3093 "Invalid cast");
3094 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3095 assert((!Ty->isVectorTy() ||
3096 cast<VectorType>(Ty)->getElementCount() ==
3097 cast<VectorType>(S->getType())->getElementCount()) &&
3098 "Invalid cast");
3099
3100 if (Ty->isIntOrIntVectorTy())
3101 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3102
3103 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3104}
3105
3106CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3107 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3108 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3109 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3110
3111 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3112 return Create(op: Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3113
3114 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3115}
3116
3117CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3118 const Twine &Name,
3119 InsertPosition InsertBefore) {
3120 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3121 return Create(op: Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3122 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3123 return Create(op: Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3124
3125 return Create(op: Instruction::BitCast, S, Ty, Name, InsertBefore);
3126}
3127
3128CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,
3129 const Twine &Name,
3130 InsertPosition InsertBefore) {
3131 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3132 "Invalid integer cast");
3133 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3134 unsigned DstBits = Ty->getScalarSizeInBits();
3135 Instruction::CastOps opcode =
3136 (SrcBits == DstBits ? Instruction::BitCast :
3137 (SrcBits > DstBits ? Instruction::Trunc :
3138 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3139 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3140}
3141
3142CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,
3143 InsertPosition InsertBefore) {
3144 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3145 "Invalid cast");
3146 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3147 unsigned DstBits = Ty->getScalarSizeInBits();
3148 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3149 Instruction::CastOps opcode =
3150 (SrcBits == DstBits ? Instruction::BitCast :
3151 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3152 return Create(op: opcode, S: C, Ty, Name, InsertBefore);
3153}
3154
3155bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3156 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3157 return false;
3158
3159 if (SrcTy == DestTy)
3160 return true;
3161
3162 if (VectorType *SrcVecTy = dyn_cast<VectorType>(Val: SrcTy)) {
3163 if (VectorType *DestVecTy = dyn_cast<VectorType>(Val: DestTy)) {
3164 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3165 // An element by element cast. Valid if casting the elements is valid.
3166 SrcTy = SrcVecTy->getElementType();
3167 DestTy = DestVecTy->getElementType();
3168 }
3169 }
3170 }
3171
3172 if (PointerType *DestPtrTy = dyn_cast<PointerType>(Val: DestTy)) {
3173 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy)) {
3174 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3175 }
3176 }
3177
3178 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3179 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3180
3181 // Could still have vectors of pointers if the number of elements doesn't
3182 // match
3183 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3184 return false;
3185
3186 if (SrcBits != DestBits)
3187 return false;
3188
3189 return true;
3190}
3191
3192bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3193 const DataLayout &DL) {
3194 // ptrtoint and inttoptr are not allowed on non-integral pointers
3195 if (auto *PtrTy = dyn_cast<PointerType>(Val: SrcTy))
3196 if (auto *IntTy = dyn_cast<IntegerType>(Val: DestTy))
3197 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3198 !DL.isNonIntegralPointerType(PT: PtrTy));
3199 if (auto *PtrTy = dyn_cast<PointerType>(Val: DestTy))
3200 if (auto *IntTy = dyn_cast<IntegerType>(Val: SrcTy))
3201 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3202 !DL.isNonIntegralPointerType(PT: PtrTy));
3203
3204 return isBitCastable(SrcTy, DestTy);
3205}
3206
3207// Provide a way to get a "cast" where the cast opcode is inferred from the
3208// types and size of the operand. This, basically, is a parallel of the
3209// logic in the castIsValid function below. This axiom should hold:
3210// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3211// should not assert in castIsValid. In other words, this produces a "correct"
3212// casting opcode for the arguments passed to it.
3213Instruction::CastOps
3214CastInst::getCastOpcode(
3215 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3216 Type *SrcTy = Src->getType();
3217
3218 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3219 "Only first class types are castable!");
3220
3221 if (SrcTy == DestTy)
3222 return BitCast;
3223
3224 // FIXME: Check address space sizes here
3225 if (VectorType *SrcVecTy = dyn_cast<VectorType>(Val: SrcTy))
3226 if (VectorType *DestVecTy = dyn_cast<VectorType>(Val: DestTy))
3227 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3228 // An element by element cast. Find the appropriate opcode based on the
3229 // element types.
3230 SrcTy = SrcVecTy->getElementType();
3231 DestTy = DestVecTy->getElementType();
3232 }
3233
3234 // Get the bit sizes, we'll need these
3235 // FIXME: This doesn't work for scalable vector types with different element
3236 // counts that don't call getElementType above.
3237 unsigned SrcBits =
3238 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3239 unsigned DestBits =
3240 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3241
3242 // Run through the possibilities ...
3243 if (DestTy->isIntegerTy()) { // Casting to integral
3244 if (SrcTy->isIntegerTy()) { // Casting from integral
3245 if (DestBits < SrcBits)
3246 return Trunc; // int -> smaller int
3247 else if (DestBits > SrcBits) { // its an extension
3248 if (SrcIsSigned)
3249 return SExt; // signed -> SEXT
3250 else
3251 return ZExt; // unsigned -> ZEXT
3252 } else {
3253 return BitCast; // Same size, No-op cast
3254 }
3255 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3256 if (DestIsSigned)
3257 return FPToSI; // FP -> sint
3258 else
3259 return FPToUI; // FP -> uint
3260 } else if (SrcTy->isVectorTy()) {
3261 assert(DestBits == SrcBits &&
3262 "Casting vector to integer of different width");
3263 return BitCast; // Same size, no-op cast
3264 } else {
3265 assert(SrcTy->isPointerTy() &&
3266 "Casting from a value that is not first-class type");
3267 return PtrToInt; // ptr -> int
3268 }
3269 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3270 if (SrcTy->isIntegerTy()) { // Casting from integral
3271 if (SrcIsSigned)
3272 return SIToFP; // sint -> FP
3273 else
3274 return UIToFP; // uint -> FP
3275 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3276 if (DestBits < SrcBits) {
3277 return FPTrunc; // FP -> smaller FP
3278 } else if (DestBits > SrcBits) {
3279 return FPExt; // FP -> larger FP
3280 } else {
3281 return BitCast; // same size, no-op cast
3282 }
3283 } else if (SrcTy->isVectorTy()) {
3284 assert(DestBits == SrcBits &&
3285 "Casting vector to floating point of different width");
3286 return BitCast; // same size, no-op cast
3287 }
3288 llvm_unreachable("Casting pointer or non-first class to float");
3289 } else if (DestTy->isVectorTy()) {
3290 assert(DestBits == SrcBits &&
3291 "Illegal cast to vector (wrong type or size)");
3292 return BitCast;
3293 } else if (DestTy->isPointerTy()) {
3294 if (SrcTy->isPointerTy()) {
3295 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3296 return AddrSpaceCast;
3297 return BitCast; // ptr -> ptr
3298 } else if (SrcTy->isIntegerTy()) {
3299 return IntToPtr; // int -> ptr
3300 }
3301 llvm_unreachable("Casting pointer to other than pointer or int");
3302 }
3303 llvm_unreachable("Casting to type that is not first-class");
3304}
3305
3306//===----------------------------------------------------------------------===//
3307// CastInst SubClass Constructors
3308//===----------------------------------------------------------------------===//
3309
3310/// Check that the construction parameters for a CastInst are correct. This
3311/// could be broken out into the separate constructors but it is useful to have
3312/// it in one place and to eliminate the redundant code for getting the sizes
3313/// of the types involved.
3314bool
3315CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3316 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3317 SrcTy->isAggregateType() || DstTy->isAggregateType())
3318 return false;
3319
3320 // Get the size of the types in bits, and whether we are dealing
3321 // with vector types, we'll need this later.
3322 bool SrcIsVec = isa<VectorType>(Val: SrcTy);
3323 bool DstIsVec = isa<VectorType>(Val: DstTy);
3324 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3325 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3326
3327 // If these are vector types, get the lengths of the vectors (using zero for
3328 // scalar types means that checking that vector lengths match also checks that
3329 // scalars are not being converted to vectors or vectors to scalars).
3330 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(Val: SrcTy)->getElementCount()
3331 : ElementCount::getFixed(MinVal: 0);
3332 ElementCount DstEC = DstIsVec ? cast<VectorType>(Val: DstTy)->getElementCount()
3333 : ElementCount::getFixed(MinVal: 0);
3334
3335 // Switch on the opcode provided
3336 switch (op) {
3337 default: return false; // This is an input error
3338 case Instruction::Trunc:
3339 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3340 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3341 case Instruction::ZExt:
3342 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3343 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3344 case Instruction::SExt:
3345 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3346 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3347 case Instruction::FPTrunc:
3348 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3349 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3350 case Instruction::FPExt:
3351 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3352 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3353 case Instruction::UIToFP:
3354 case Instruction::SIToFP:
3355 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3356 SrcEC == DstEC;
3357 case Instruction::FPToUI:
3358 case Instruction::FPToSI:
3359 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3360 SrcEC == DstEC;
3361 case Instruction::PtrToAddr:
3362 case Instruction::PtrToInt:
3363 if (SrcEC != DstEC)
3364 return false;
3365 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3366 case Instruction::IntToPtr:
3367 if (SrcEC != DstEC)
3368 return false;
3369 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3370 case Instruction::BitCast: {
3371 PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy->getScalarType());
3372 PointerType *DstPtrTy = dyn_cast<PointerType>(Val: DstTy->getScalarType());
3373
3374 // BitCast implies a no-op cast of type only. No bits change.
3375 // However, you can't cast pointers to anything but pointers.
3376 if (!SrcPtrTy != !DstPtrTy)
3377 return false;
3378
3379 // For non-pointer cases, the cast is okay if the source and destination bit
3380 // widths are identical.
3381 if (!SrcPtrTy)
3382 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3383
3384 // If both are pointers then the address spaces must match.
3385 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3386 return false;
3387
3388 // A vector of pointers must have the same number of elements.
3389 if (SrcIsVec && DstIsVec)
3390 return SrcEC == DstEC;
3391 if (SrcIsVec)
3392 return SrcEC == ElementCount::getFixed(MinVal: 1);
3393 if (DstIsVec)
3394 return DstEC == ElementCount::getFixed(MinVal: 1);
3395
3396 return true;
3397 }
3398 case Instruction::AddrSpaceCast: {
3399 PointerType *SrcPtrTy = dyn_cast<PointerType>(Val: SrcTy->getScalarType());
3400 if (!SrcPtrTy)
3401 return false;
3402
3403 PointerType *DstPtrTy = dyn_cast<PointerType>(Val: DstTy->getScalarType());
3404 if (!DstPtrTy)
3405 return false;
3406
3407 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3408 return false;
3409
3410 return SrcEC == DstEC;
3411 }
3412 }
3413}
3414
3415TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,
3416 InsertPosition InsertBefore)
3417 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3418 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3419}
3420
3421ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3422 InsertPosition InsertBefore)
3423 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3424 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3425}
3426
3427SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3428 InsertPosition InsertBefore)
3429 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3430 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3431}
3432
3433FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,
3434 InsertPosition InsertBefore)
3435 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3436 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3437}
3438
3439FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,
3440 InsertPosition InsertBefore)
3441 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3443}
3444
3445UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,
3446 InsertPosition InsertBefore)
3447 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3449}
3450
3451SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,
3452 InsertPosition InsertBefore)
3453 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3455}
3456
3457FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,
3458 InsertPosition InsertBefore)
3459 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3460 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3461}
3462
3463FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,
3464 InsertPosition InsertBefore)
3465 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3466 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3467}
3468
3469PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
3470 InsertPosition InsertBefore)
3471 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3472 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3473}
3474
3475PtrToAddrInst::PtrToAddrInst(Value *S, Type *Ty, const Twine &Name,
3476 InsertPosition InsertBefore)
3477 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3478 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3479}
3480
3481IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
3482 InsertPosition InsertBefore)
3483 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3484 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3485}
3486
3487BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,
3488 InsertPosition InsertBefore)
3489 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3490 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3491}
3492
3493AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,
3494 InsertPosition InsertBefore)
3495 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3496 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3497}
3498
3499//===----------------------------------------------------------------------===//
3500// CmpInst Classes
3501//===----------------------------------------------------------------------===//
3502
3503CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3504 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3505 Instruction *FlagsSource)
3506 : Instruction(ty, op, AllocMarker, InsertBefore) {
3507 Op<0>() = LHS;
3508 Op<1>() = RHS;
3509 setPredicate(predicate);
3510 setName(Name);
3511 if (FlagsSource)
3512 copyIRFlags(V: FlagsSource);
3513}
3514
3515CmpInst *CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3516 const Twine &Name, InsertPosition InsertBefore) {
3517 if (Op == Instruction::ICmp) {
3518 if (InsertBefore.isValid())
3519 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3520 S1, S2, Name);
3521 else
3522 return new ICmpInst(CmpInst::Predicate(predicate),
3523 S1, S2, Name);
3524 }
3525
3526 if (InsertBefore.isValid())
3527 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3528 S1, S2, Name);
3529 else
3530 return new FCmpInst(CmpInst::Predicate(predicate),
3531 S1, S2, Name);
3532}
3533
3534CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
3535 Value *S2,
3536 const Instruction *FlagsSource,
3537 const Twine &Name,
3538 InsertPosition InsertBefore) {
3539 CmpInst *Inst = Create(Op, predicate: Pred, S1, S2, Name, InsertBefore);
3540 Inst->copyIRFlags(V: FlagsSource);
3541 return Inst;
3542}
3543
3544void CmpInst::swapOperands() {
3545 if (ICmpInst *IC = dyn_cast<ICmpInst>(Val: this))
3546 IC->swapOperands();
3547 else
3548 cast<FCmpInst>(Val: this)->swapOperands();
3549}
3550
3551bool CmpInst::isCommutative() const {
3552 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Val: this))
3553 return IC->isCommutative();
3554 return cast<FCmpInst>(Val: this)->isCommutative();
3555}
3556
3557bool CmpInst::isEquality(Predicate P) {
3558 if (ICmpInst::isIntPredicate(P))
3559 return ICmpInst::isEquality(P);
3560 if (FCmpInst::isFPPredicate(P))
3561 return FCmpInst::isEquality(Pred: P);
3562 llvm_unreachable("Unsupported predicate kind");
3563}
3564
3565// Returns true if either operand of CmpInst is a provably non-zero
3566// floating-point constant.
3567static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3568 auto *LHS = dyn_cast<Constant>(Val: Cmp->getOperand(i_nocapture: 0));
3569 auto *RHS = dyn_cast<Constant>(Val: Cmp->getOperand(i_nocapture: 1));
3570 if (auto *Const = LHS ? LHS : RHS) {
3571 using namespace llvm::PatternMatch;
3572 return match(V: Const, P: m_NonZeroNotDenormalFP());
3573 }
3574 return false;
3575}
3576
3577// Floating-point equality is not an equivalence when comparing +0.0 with
3578// -0.0, when comparing NaN with another value, or when flushing
3579// denormals-to-zero.
3580bool CmpInst::isEquivalence(bool Invert) const {
3581 switch (Invert ? getInversePredicate() : getPredicate()) {
3582 case CmpInst::Predicate::ICMP_EQ:
3583 return true;
3584 case CmpInst::Predicate::FCMP_UEQ:
3585 if (!hasNoNaNs())
3586 return false;
3587 [[fallthrough]];
3588 case CmpInst::Predicate::FCMP_OEQ:
3589 return hasNonZeroFPOperands(Cmp: this);
3590 default:
3591 return false;
3592 }
3593}
3594
3595CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3596 switch (pred) {
3597 default: llvm_unreachable("Unknown cmp predicate!");
3598 case ICMP_EQ: return ICMP_NE;
3599 case ICMP_NE: return ICMP_EQ;
3600 case ICMP_UGT: return ICMP_ULE;
3601 case ICMP_ULT: return ICMP_UGE;
3602 case ICMP_UGE: return ICMP_ULT;
3603 case ICMP_ULE: return ICMP_UGT;
3604 case ICMP_SGT: return ICMP_SLE;
3605 case ICMP_SLT: return ICMP_SGE;
3606 case ICMP_SGE: return ICMP_SLT;
3607 case ICMP_SLE: return ICMP_SGT;
3608
3609 case FCMP_OEQ: return FCMP_UNE;
3610 case FCMP_ONE: return FCMP_UEQ;
3611 case FCMP_OGT: return FCMP_ULE;
3612 case FCMP_OLT: return FCMP_UGE;
3613 case FCMP_OGE: return FCMP_ULT;
3614 case FCMP_OLE: return FCMP_UGT;
3615 case FCMP_UEQ: return FCMP_ONE;
3616 case FCMP_UNE: return FCMP_OEQ;
3617 case FCMP_UGT: return FCMP_OLE;
3618 case FCMP_ULT: return FCMP_OGE;
3619 case FCMP_UGE: return FCMP_OLT;
3620 case FCMP_ULE: return FCMP_OGT;
3621 case FCMP_ORD: return FCMP_UNO;
3622 case FCMP_UNO: return FCMP_ORD;
3623 case FCMP_TRUE: return FCMP_FALSE;
3624 case FCMP_FALSE: return FCMP_TRUE;
3625 }
3626}
3627
3628StringRef CmpInst::getPredicateName(Predicate Pred) {
3629 switch (Pred) {
3630 default: return "unknown";
3631 case FCmpInst::FCMP_FALSE: return "false";
3632 case FCmpInst::FCMP_OEQ: return "oeq";
3633 case FCmpInst::FCMP_OGT: return "ogt";
3634 case FCmpInst::FCMP_OGE: return "oge";
3635 case FCmpInst::FCMP_OLT: return "olt";
3636 case FCmpInst::FCMP_OLE: return "ole";
3637 case FCmpInst::FCMP_ONE: return "one";
3638 case FCmpInst::FCMP_ORD: return "ord";
3639 case FCmpInst::FCMP_UNO: return "uno";
3640 case FCmpInst::FCMP_UEQ: return "ueq";
3641 case FCmpInst::FCMP_UGT: return "ugt";
3642 case FCmpInst::FCMP_UGE: return "uge";
3643 case FCmpInst::FCMP_ULT: return "ult";
3644 case FCmpInst::FCMP_ULE: return "ule";
3645 case FCmpInst::FCMP_UNE: return "une";
3646 case FCmpInst::FCMP_TRUE: return "true";
3647 case ICmpInst::ICMP_EQ: return "eq";
3648 case ICmpInst::ICMP_NE: return "ne";
3649 case ICmpInst::ICMP_SGT: return "sgt";
3650 case ICmpInst::ICMP_SGE: return "sge";
3651 case ICmpInst::ICMP_SLT: return "slt";
3652 case ICmpInst::ICMP_SLE: return "sle";
3653 case ICmpInst::ICMP_UGT: return "ugt";
3654 case ICmpInst::ICMP_UGE: return "uge";
3655 case ICmpInst::ICMP_ULT: return "ult";
3656 case ICmpInst::ICMP_ULE: return "ule";
3657 }
3658}
3659
3660raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
3661 OS << CmpInst::getPredicateName(Pred);
3662 return OS;
3663}
3664
3665ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3666 switch (pred) {
3667 default: llvm_unreachable("Unknown icmp predicate!");
3668 case ICMP_EQ: case ICMP_NE:
3669 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3670 return pred;
3671 case ICMP_UGT: return ICMP_SGT;
3672 case ICMP_ULT: return ICMP_SLT;
3673 case ICMP_UGE: return ICMP_SGE;
3674 case ICMP_ULE: return ICMP_SLE;
3675 }
3676}
3677
3678ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3679 switch (pred) {
3680 default: llvm_unreachable("Unknown icmp predicate!");
3681 case ICMP_EQ: case ICMP_NE:
3682 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3683 return pred;
3684 case ICMP_SGT: return ICMP_UGT;
3685 case ICMP_SLT: return ICMP_ULT;
3686 case ICMP_SGE: return ICMP_UGE;
3687 case ICMP_SLE: return ICMP_ULE;
3688 }
3689}
3690
3691CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3692 switch (pred) {
3693 default: llvm_unreachable("Unknown cmp predicate!");
3694 case ICMP_EQ: case ICMP_NE:
3695 return pred;
3696 case ICMP_SGT: return ICMP_SLT;
3697 case ICMP_SLT: return ICMP_SGT;
3698 case ICMP_SGE: return ICMP_SLE;
3699 case ICMP_SLE: return ICMP_SGE;
3700 case ICMP_UGT: return ICMP_ULT;
3701 case ICMP_ULT: return ICMP_UGT;
3702 case ICMP_UGE: return ICMP_ULE;
3703 case ICMP_ULE: return ICMP_UGE;
3704
3705 case FCMP_FALSE: case FCMP_TRUE:
3706 case FCMP_OEQ: case FCMP_ONE:
3707 case FCMP_UEQ: case FCMP_UNE:
3708 case FCMP_ORD: case FCMP_UNO:
3709 return pred;
3710 case FCMP_OGT: return FCMP_OLT;
3711 case FCMP_OLT: return FCMP_OGT;
3712 case FCMP_OGE: return FCMP_OLE;
3713 case FCMP_OLE: return FCMP_OGE;
3714 case FCMP_UGT: return FCMP_ULT;
3715 case FCMP_ULT: return FCMP_UGT;
3716 case FCMP_UGE: return FCMP_ULE;
3717 case FCMP_ULE: return FCMP_UGE;
3718 }
3719}
3720
3721bool CmpInst::isNonStrictPredicate(Predicate pred) {
3722 switch (pred) {
3723 case ICMP_SGE:
3724 case ICMP_SLE:
3725 case ICMP_UGE:
3726 case ICMP_ULE:
3727 case FCMP_OGE:
3728 case FCMP_OLE:
3729 case FCMP_UGE:
3730 case FCMP_ULE:
3731 return true;
3732 default:
3733 return false;
3734 }
3735}
3736
3737bool CmpInst::isStrictPredicate(Predicate pred) {
3738 switch (pred) {
3739 case ICMP_SGT:
3740 case ICMP_SLT:
3741 case ICMP_UGT:
3742 case ICMP_ULT:
3743 case FCMP_OGT:
3744 case FCMP_OLT:
3745 case FCMP_UGT:
3746 case FCMP_ULT:
3747 return true;
3748 default:
3749 return false;
3750 }
3751}
3752
3753CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
3754 switch (pred) {
3755 case ICMP_SGE:
3756 return ICMP_SGT;
3757 case ICMP_SLE:
3758 return ICMP_SLT;
3759 case ICMP_UGE:
3760 return ICMP_UGT;
3761 case ICMP_ULE:
3762 return ICMP_ULT;
3763 case FCMP_OGE:
3764 return FCMP_OGT;
3765 case FCMP_OLE:
3766 return FCMP_OLT;
3767 case FCMP_UGE:
3768 return FCMP_UGT;
3769 case FCMP_ULE:
3770 return FCMP_ULT;
3771 default:
3772 return pred;
3773 }
3774}
3775
3776CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
3777 switch (pred) {
3778 case ICMP_SGT:
3779 return ICMP_SGE;
3780 case ICMP_SLT:
3781 return ICMP_SLE;
3782 case ICMP_UGT:
3783 return ICMP_UGE;
3784 case ICMP_ULT:
3785 return ICMP_ULE;
3786 case FCMP_OGT:
3787 return FCMP_OGE;
3788 case FCMP_OLT:
3789 return FCMP_OLE;
3790 case FCMP_UGT:
3791 return FCMP_UGE;
3792 case FCMP_ULT:
3793 return FCMP_ULE;
3794 default:
3795 return pred;
3796 }
3797}
3798
3799CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
3800 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3801
3802 if (isStrictPredicate(pred))
3803 return getNonStrictPredicate(pred);
3804 if (isNonStrictPredicate(pred))
3805 return getStrictPredicate(pred);
3806
3807 llvm_unreachable("Unknown predicate!");
3808}
3809
3810bool CmpInst::isUnsigned(Predicate predicate) {
3811 switch (predicate) {
3812 default: return false;
3813 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3814 case ICmpInst::ICMP_UGE: return true;
3815 }
3816}
3817
3818bool CmpInst::isSigned(Predicate predicate) {
3819 switch (predicate) {
3820 default: return false;
3821 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3822 case ICmpInst::ICMP_SGE: return true;
3823 }
3824}
3825
3826bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3827 ICmpInst::Predicate Pred) {
3828 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3829 switch (Pred) {
3830 case ICmpInst::Predicate::ICMP_EQ:
3831 return LHS.eq(RHS);
3832 case ICmpInst::Predicate::ICMP_NE:
3833 return LHS.ne(RHS);
3834 case ICmpInst::Predicate::ICMP_UGT:
3835 return LHS.ugt(RHS);
3836 case ICmpInst::Predicate::ICMP_UGE:
3837 return LHS.uge(RHS);
3838 case ICmpInst::Predicate::ICMP_ULT:
3839 return LHS.ult(RHS);
3840 case ICmpInst::Predicate::ICMP_ULE:
3841 return LHS.ule(RHS);
3842 case ICmpInst::Predicate::ICMP_SGT:
3843 return LHS.sgt(RHS);
3844 case ICmpInst::Predicate::ICMP_SGE:
3845 return LHS.sge(RHS);
3846 case ICmpInst::Predicate::ICMP_SLT:
3847 return LHS.slt(RHS);
3848 case ICmpInst::Predicate::ICMP_SLE:
3849 return LHS.sle(RHS);
3850 default:
3851 llvm_unreachable("Unexpected non-integer predicate.");
3852 };
3853}
3854
3855bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3856 FCmpInst::Predicate Pred) {
3857 APFloat::cmpResult R = LHS.compare(RHS);
3858 switch (Pred) {
3859 default:
3860 llvm_unreachable("Invalid FCmp Predicate");
3861 case FCmpInst::FCMP_FALSE:
3862 return false;
3863 case FCmpInst::FCMP_TRUE:
3864 return true;
3865 case FCmpInst::FCMP_UNO:
3866 return R == APFloat::cmpUnordered;
3867 case FCmpInst::FCMP_ORD:
3868 return R != APFloat::cmpUnordered;
3869 case FCmpInst::FCMP_UEQ:
3870 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3871 case FCmpInst::FCMP_OEQ:
3872 return R == APFloat::cmpEqual;
3873 case FCmpInst::FCMP_UNE:
3874 return R != APFloat::cmpEqual;
3875 case FCmpInst::FCMP_ONE:
3876 return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
3877 case FCmpInst::FCMP_ULT:
3878 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3879 case FCmpInst::FCMP_OLT:
3880 return R == APFloat::cmpLessThan;
3881 case FCmpInst::FCMP_UGT:
3882 return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
3883 case FCmpInst::FCMP_OGT:
3884 return R == APFloat::cmpGreaterThan;
3885 case FCmpInst::FCMP_ULE:
3886 return R != APFloat::cmpGreaterThan;
3887 case FCmpInst::FCMP_OLE:
3888 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3889 case FCmpInst::FCMP_UGE:
3890 return R != APFloat::cmpLessThan;
3891 case FCmpInst::FCMP_OGE:
3892 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3893 }
3894}
3895
3896std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3897 const KnownBits &RHS,
3898 ICmpInst::Predicate Pred) {
3899 switch (Pred) {
3900 case ICmpInst::ICMP_EQ:
3901 return KnownBits::eq(LHS, RHS);
3902 case ICmpInst::ICMP_NE:
3903 return KnownBits::ne(LHS, RHS);
3904 case ICmpInst::ICMP_UGE:
3905 return KnownBits::uge(LHS, RHS);
3906 case ICmpInst::ICMP_UGT:
3907 return KnownBits::ugt(LHS, RHS);
3908 case ICmpInst::ICMP_ULE:
3909 return KnownBits::ule(LHS, RHS);
3910 case ICmpInst::ICMP_ULT:
3911 return KnownBits::ult(LHS, RHS);
3912 case ICmpInst::ICMP_SGE:
3913 return KnownBits::sge(LHS, RHS);
3914 case ICmpInst::ICMP_SGT:
3915 return KnownBits::sgt(LHS, RHS);
3916 case ICmpInst::ICMP_SLE:
3917 return KnownBits::sle(LHS, RHS);
3918 case ICmpInst::ICMP_SLT:
3919 return KnownBits::slt(LHS, RHS);
3920 default:
3921 llvm_unreachable("Unexpected non-integer predicate.");
3922 }
3923}
3924
3925CmpInst::Predicate ICmpInst::getFlippedSignednessPredicate(Predicate pred) {
3926 if (CmpInst::isEquality(P: pred))
3927 return pred;
3928 if (isSigned(predicate: pred))
3929 return getUnsignedPredicate(pred);
3930 if (isUnsigned(predicate: pred))
3931 return getSignedPredicate(pred);
3932
3933 llvm_unreachable("Unknown predicate!");
3934}
3935
3936bool CmpInst::isOrdered(Predicate predicate) {
3937 switch (predicate) {
3938 default: return false;
3939 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3940 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3941 case FCmpInst::FCMP_ORD: return true;
3942 }
3943}
3944
3945bool CmpInst::isUnordered(Predicate predicate) {
3946 switch (predicate) {
3947 default: return false;
3948 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3949 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3950 case FCmpInst::FCMP_UNO: return true;
3951 }
3952}
3953
3954bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3955 switch(predicate) {
3956 default: return false;
3957 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3958 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3959 }
3960}
3961
3962bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3963 switch(predicate) {
3964 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3965 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3966 default: return false;
3967 }
3968}
3969
3970static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2) {
3971 // If the predicates match, then we know the first condition implies the
3972 // second is true.
3973 if (CmpPredicate::getMatching(A: Pred1, B: Pred2))
3974 return true;
3975
3976 if (Pred1.hasSameSign() && CmpInst::isSigned(predicate: Pred2))
3977 Pred1 = ICmpInst::getFlippedSignednessPredicate(pred: Pred1);
3978 else if (Pred2.hasSameSign() && CmpInst::isSigned(predicate: Pred1))
3979 Pred2 = ICmpInst::getFlippedSignednessPredicate(pred: Pred2);
3980
3981 switch (Pred1) {
3982 default:
3983 break;
3984 case CmpInst::ICMP_EQ:
3985 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3986 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3987 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3988 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3989 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3990 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3991 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3992 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3994 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3995 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3996 }
3997 return false;
3998}
3999
4000static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1,
4001 CmpPredicate Pred2) {
4002 return isImpliedTrueByMatchingCmp(Pred1,
4003 Pred2: ICmpInst::getInverseCmpPredicate(Pred: Pred2));
4004}
4005
4006std::optional<bool> ICmpInst::isImpliedByMatchingCmp(CmpPredicate Pred1,
4007 CmpPredicate Pred2) {
4008 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4009 return true;
4010 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4011 return false;
4012 return std::nullopt;
4013}
4014
4015//===----------------------------------------------------------------------===//
4016// CmpPredicate Implementation
4017//===----------------------------------------------------------------------===//
4018
4019std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4020 CmpPredicate B) {
4021 if (A.Pred == B.Pred)
4022 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4023 if (CmpInst::isFPPredicate(P: A) || CmpInst::isFPPredicate(P: B))
4024 return {};
4025 if (A.HasSameSign &&
4026 A.Pred == ICmpInst::getFlippedSignednessPredicate(pred: B.Pred))
4027 return B.Pred;
4028 if (B.HasSameSign &&
4029 B.Pred == ICmpInst::getFlippedSignednessPredicate(pred: A.Pred))
4030 return A.Pred;
4031 return {};
4032}
4033
4034CmpInst::Predicate CmpPredicate::getPreferredSignedPredicate() const {
4035 return HasSameSign ? ICmpInst::getSignedPredicate(pred: Pred) : Pred;
4036}
4037
4038CmpPredicate CmpPredicate::get(const CmpInst *Cmp) {
4039 if (auto *ICI = dyn_cast<ICmpInst>(Val: Cmp))
4040 return ICI->getCmpPredicate();
4041 return Cmp->getPredicate();
4042}
4043
4044CmpPredicate CmpPredicate::getSwapped(CmpPredicate P) {
4045 return {CmpInst::getSwappedPredicate(pred: P), P.hasSameSign()};
4046}
4047
4048CmpPredicate CmpPredicate::getSwapped(const CmpInst *Cmp) {
4049 return getSwapped(P: get(Cmp));
4050}
4051
4052//===----------------------------------------------------------------------===//
4053// SwitchInst Implementation
4054//===----------------------------------------------------------------------===//
4055
4056void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4057 assert(Value && Default && NumReserved);
4058 ReservedSpace = NumReserved;
4059 setNumHungOffUseOperands(2);
4060 allocHungoffUses(N: ReservedSpace);
4061
4062 Op<0>() = Value;
4063 Op<1>() = Default;
4064}
4065
4066/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4067/// switch on and a default destination. The number of additional cases can
4068/// be specified here to make memory allocation more efficient. This
4069/// constructor can also autoinsert before another instruction.
4070SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4071 InsertPosition InsertBefore)
4072 : Instruction(Type::getVoidTy(C&: Value->getContext()), Instruction::Switch,
4073 AllocMarker, InsertBefore) {
4074 init(Value, Default, NumReserved: 2 + NumCases);
4075}
4076
4077SwitchInst::SwitchInst(const SwitchInst &SI)
4078 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4079 init(Value: SI.getCondition(), Default: SI.getDefaultDest(), NumReserved: SI.getNumOperands());
4080 setNumHungOffUseOperands(SI.getNumOperands());
4081 Use *OL = getOperandList();
4082 ConstantInt **VL = case_values();
4083 const Use *InOL = SI.getOperandList();
4084 ConstantInt *const *InVL = SI.case_values();
4085 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4086 OL[i] = InOL[i];
4087 VL[i - 2] = InVL[i - 2];
4088 }
4089 SubclassOptionalData = SI.SubclassOptionalData;
4090}
4091
4092/// addCase - Add an entry to the switch instruction...
4093///
4094void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
4095 unsigned NewCaseIdx = getNumCases();
4096 unsigned OpNo = getNumOperands();
4097 if (OpNo + 1 > ReservedSpace)
4098 growOperands(); // Get more space!
4099 // Initialize some new operands.
4100 assert(OpNo < ReservedSpace && "Growing didn't work!");
4101 setNumHungOffUseOperands(OpNo + 1);
4102 CaseHandle Case(this, NewCaseIdx);
4103 Case.setValue(OnVal);
4104 Case.setSuccessor(Dest);
4105}
4106
4107/// removeCase - This method removes the specified case and its successor
4108/// from the switch instruction.
4109SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
4110 unsigned idx = I->getCaseIndex();
4111
4112 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4113
4114 unsigned NumOps = getNumOperands();
4115 Use *OL = getOperandList();
4116 ConstantInt **VL = case_values();
4117
4118 // Overwrite this case with the end of the list.
4119 if (2 + idx + 1 != NumOps) {
4120 OL[2 + idx] = OL[NumOps - 1];
4121 VL[idx] = VL[NumOps - 2 - 1];
4122 }
4123
4124 // Nuke the last value.
4125 OL[NumOps - 1].set(nullptr);
4126 VL[NumOps - 2 - 1] = nullptr;
4127 setNumHungOffUseOperands(NumOps - 1);
4128
4129 return CaseIt(this, idx);
4130}
4131
4132/// growOperands - grow operands - This grows the operand list in response
4133/// to a push_back style of operation. This grows the number of ops by 3 times.
4134///
4135void SwitchInst::growOperands() {
4136 unsigned e = getNumOperands();
4137 unsigned NumOps = e*3;
4138
4139 ReservedSpace = NumOps;
4140 growHungoffUses(N: ReservedSpace, /*WithExtraValues=*/true);
4141}
4142
4143void SwitchInstProfUpdateWrapper::init() {
4144 MDNode *ProfileData = getBranchWeightMDNode(I: SI);
4145 if (!ProfileData)
4146 return;
4147
4148 if (getNumBranchWeights(ProfileData: *ProfileData) != SI.getNumSuccessors()) {
4149 llvm_unreachable("number of prof branch_weights metadata operands does "
4150 "not correspond to number of succesors");
4151 }
4152
4153 SmallVector<uint32_t, 8> Weights;
4154 if (!extractBranchWeights(ProfileData, Weights))
4155 return;
4156 this->Weights = std::move(Weights);
4157}
4158
4159SwitchInst::CaseIt
4160SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4161 if (Weights) {
4162 assert(SI.getNumSuccessors() == Weights->size() &&
4163 "num of prof branch_weights must accord with num of successors");
4164 Changed = true;
4165 // Copy the last case to the place of the removed one and shrink.
4166 // This is tightly coupled with the way SwitchInst::removeCase() removes
4167 // the cases in SwitchInst::removeCase(CaseIt).
4168 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4169 Weights->pop_back();
4170 }
4171 return SI.removeCase(I);
4172}
4173
4174void SwitchInstProfUpdateWrapper::replaceDefaultDest(SwitchInst::CaseIt I) {
4175 auto *DestBlock = I->getCaseSuccessor();
4176 if (Weights) {
4177 auto Weight = getSuccessorWeight(idx: I->getCaseIndex() + 1);
4178 (*Weights)[0] = Weight.value();
4179 }
4180
4181 SI.setDefaultDest(DestBlock);
4182}
4183
4184void SwitchInstProfUpdateWrapper::addCase(
4185 ConstantInt *OnVal, BasicBlock *Dest,
4186 SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4187 SI.addCase(OnVal, Dest);
4188
4189 if (!Weights && W && *W) {
4190 Changed = true;
4191 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4192 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4193 } else if (Weights) {
4194 Changed = true;
4195 Weights->push_back(Elt: W.value_or(u: 0));
4196 }
4197 if (Weights)
4198 assert(SI.getNumSuccessors() == Weights->size() &&
4199 "num of prof branch_weights must accord with num of successors");
4200}
4201
4202Instruction::InstListType::iterator
4203SwitchInstProfUpdateWrapper::eraseFromParent() {
4204 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4205 Changed = false;
4206 if (Weights)
4207 Weights->resize(N: 0);
4208 return SI.eraseFromParent();
4209}
4210
4211SwitchInstProfUpdateWrapper::CaseWeightOpt
4212SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4213 if (!Weights)
4214 return std::nullopt;
4215 return (*Weights)[idx];
4216}
4217
4218void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4219 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4220 if (!W)
4221 return;
4222
4223 if (!Weights && *W)
4224 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4225
4226 if (Weights) {
4227 auto &OldW = (*Weights)[idx];
4228 if (*W != OldW) {
4229 Changed = true;
4230 OldW = *W;
4231 }
4232 }
4233}
4234
4235SwitchInstProfUpdateWrapper::CaseWeightOpt
4236SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4237 unsigned idx) {
4238 if (MDNode *ProfileData = getBranchWeightMDNode(I: SI))
4239 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4240 return mdconst::extract<ConstantInt>(MD: ProfileData->getOperand(I: idx + 1))
4241 ->getValue()
4242 .getZExtValue();
4243
4244 return std::nullopt;
4245}
4246
4247//===----------------------------------------------------------------------===//
4248// IndirectBrInst Implementation
4249//===----------------------------------------------------------------------===//
4250
4251void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4252 assert(Address && Address->getType()->isPointerTy() &&
4253 "Address of indirectbr must be a pointer");
4254 ReservedSpace = 1+NumDests;
4255 setNumHungOffUseOperands(1);
4256 allocHungoffUses(N: ReservedSpace);
4257
4258 Op<0>() = Address;
4259}
4260
4261
4262/// growOperands - grow operands - This grows the operand list in response
4263/// to a push_back style of operation. This grows the number of ops by 2 times.
4264///
4265void IndirectBrInst::growOperands() {
4266 unsigned e = getNumOperands();
4267 unsigned NumOps = e*2;
4268
4269 ReservedSpace = NumOps;
4270 growHungoffUses(N: ReservedSpace);
4271}
4272
4273IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4274 InsertPosition InsertBefore)
4275 : Instruction(Type::getVoidTy(C&: Address->getContext()),
4276 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4277 init(Address, NumDests: NumCases);
4278}
4279
4280IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4281 : Instruction(Type::getVoidTy(C&: IBI.getContext()), Instruction::IndirectBr,
4282 AllocMarker) {
4283 NumUserOperands = IBI.NumUserOperands;
4284 allocHungoffUses(N: IBI.getNumOperands());
4285 Use *OL = getOperandList();
4286 const Use *InOL = IBI.getOperandList();
4287 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4288 OL[i] = InOL[i];
4289 SubclassOptionalData = IBI.SubclassOptionalData;
4290}
4291
4292/// addDestination - Add a destination.
4293///
4294void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4295 unsigned OpNo = getNumOperands();
4296 if (OpNo+1 > ReservedSpace)
4297 growOperands(); // Get more space!
4298 // Initialize some new operands.
4299 assert(OpNo < ReservedSpace && "Growing didn't work!");
4300 setNumHungOffUseOperands(OpNo+1);
4301 getOperandList()[OpNo] = DestBB;
4302}
4303
4304/// removeDestination - This method removes the specified successor from the
4305/// indirectbr instruction.
4306void IndirectBrInst::removeDestination(unsigned idx) {
4307 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4308
4309 unsigned NumOps = getNumOperands();
4310 Use *OL = getOperandList();
4311
4312 // Replace this value with the last one.
4313 OL[idx+1] = OL[NumOps-1];
4314
4315 // Nuke the last value.
4316 OL[NumOps-1].set(nullptr);
4317 setNumHungOffUseOperands(NumOps-1);
4318}
4319
4320//===----------------------------------------------------------------------===//
4321// FreezeInst Implementation
4322//===----------------------------------------------------------------------===//
4323
4324FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4325 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4326 setName(Name);
4327}
4328
4329//===----------------------------------------------------------------------===//
4330// cloneImpl() implementations
4331//===----------------------------------------------------------------------===//
4332
4333// Define these methods here so vtables don't get emitted into every translation
4334// unit that uses these classes.
4335
4336GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4337 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4338 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4339}
4340
4341UnaryOperator *UnaryOperator::cloneImpl() const {
4342 return Create(Op: getOpcode(), S: Op<0>());
4343}
4344
4345BinaryOperator *BinaryOperator::cloneImpl() const {
4346 return Create(Op: getOpcode(), S1: Op<0>(), S2: Op<1>());
4347}
4348
4349FCmpInst *FCmpInst::cloneImpl() const {
4350 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4351}
4352
4353ICmpInst *ICmpInst::cloneImpl() const {
4354 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4355}
4356
4357ExtractValueInst *ExtractValueInst::cloneImpl() const {
4358 return new ExtractValueInst(*this);
4359}
4360
4361InsertValueInst *InsertValueInst::cloneImpl() const {
4362 return new InsertValueInst(*this);
4363}
4364
4365AllocaInst *AllocaInst::cloneImpl() const {
4366 AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4367 getOperand(i_nocapture: 0), getAlign());
4368 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4369 Result->setSwiftError(isSwiftError());
4370 return Result;
4371}
4372
4373LoadInst *LoadInst::cloneImpl() const {
4374 return new LoadInst(getType(), getOperand(i_nocapture: 0), Twine(), isVolatile(),
4375 getAlign(), getOrdering(), getSyncScopeID());
4376}
4377
4378StoreInst *StoreInst::cloneImpl() const {
4379 return new StoreInst(getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), isVolatile(), getAlign(),
4380 getOrdering(), getSyncScopeID());
4381}
4382
4383AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4384 AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4385 getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), getOperand(i_nocapture: 2), getAlign(),
4386 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4387 Result->setVolatile(isVolatile());
4388 Result->setWeak(isWeak());
4389 return Result;
4390}
4391
4392AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4393 AtomicRMWInst *Result =
4394 new AtomicRMWInst(getOperation(), getOperand(i_nocapture: 0), getOperand(i_nocapture: 1),
4395 getAlign(), getOrdering(), getSyncScopeID());
4396 Result->setVolatile(isVolatile());
4397 return Result;
4398}
4399
4400FenceInst *FenceInst::cloneImpl() const {
4401 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4402}
4403
4404TruncInst *TruncInst::cloneImpl() const {
4405 return new TruncInst(getOperand(i_nocapture: 0), getType());
4406}
4407
4408ZExtInst *ZExtInst::cloneImpl() const {
4409 return new ZExtInst(getOperand(i_nocapture: 0), getType());
4410}
4411
4412SExtInst *SExtInst::cloneImpl() const {
4413 return new SExtInst(getOperand(i_nocapture: 0), getType());
4414}
4415
4416FPTruncInst *FPTruncInst::cloneImpl() const {
4417 return new FPTruncInst(getOperand(i_nocapture: 0), getType());
4418}
4419
4420FPExtInst *FPExtInst::cloneImpl() const {
4421 return new FPExtInst(getOperand(i_nocapture: 0), getType());
4422}
4423
4424UIToFPInst *UIToFPInst::cloneImpl() const {
4425 return new UIToFPInst(getOperand(i_nocapture: 0), getType());
4426}
4427
4428SIToFPInst *SIToFPInst::cloneImpl() const {
4429 return new SIToFPInst(getOperand(i_nocapture: 0), getType());
4430}
4431
4432FPToUIInst *FPToUIInst::cloneImpl() const {
4433 return new FPToUIInst(getOperand(i_nocapture: 0), getType());
4434}
4435
4436FPToSIInst *FPToSIInst::cloneImpl() const {
4437 return new FPToSIInst(getOperand(i_nocapture: 0), getType());
4438}
4439
4440PtrToIntInst *PtrToIntInst::cloneImpl() const {
4441 return new PtrToIntInst(getOperand(i_nocapture: 0), getType());
4442}
4443
4444PtrToAddrInst *PtrToAddrInst::cloneImpl() const {
4445 return new PtrToAddrInst(getOperand(i_nocapture: 0), getType());
4446}
4447
4448IntToPtrInst *IntToPtrInst::cloneImpl() const {
4449 return new IntToPtrInst(getOperand(i_nocapture: 0), getType());
4450}
4451
4452BitCastInst *BitCastInst::cloneImpl() const {
4453 return new BitCastInst(getOperand(i_nocapture: 0), getType());
4454}
4455
4456AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4457 return new AddrSpaceCastInst(getOperand(i_nocapture: 0), getType());
4458}
4459
4460CallInst *CallInst::cloneImpl() const {
4461 if (hasOperandBundles()) {
4462 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4463 .NumOps: getNumOperands(),
4464 .DescBytes: getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4465 return new (AllocMarker) CallInst(*this, AllocMarker);
4466 }
4467 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4468 return new (AllocMarker) CallInst(*this, AllocMarker);
4469}
4470
4471SelectInst *SelectInst::cloneImpl() const {
4472 return SelectInst::Create(C: getOperand(i_nocapture: 0), S1: getOperand(i_nocapture: 1), S2: getOperand(i_nocapture: 2));
4473}
4474
4475VAArgInst *VAArgInst::cloneImpl() const {
4476 return new VAArgInst(getOperand(i_nocapture: 0), getType());
4477}
4478
4479ExtractElementInst *ExtractElementInst::cloneImpl() const {
4480 return ExtractElementInst::Create(Vec: getOperand(i_nocapture: 0), Idx: getOperand(i_nocapture: 1));
4481}
4482
4483InsertElementInst *InsertElementInst::cloneImpl() const {
4484 return InsertElementInst::Create(Vec: getOperand(i_nocapture: 0), NewElt: getOperand(i_nocapture: 1), Idx: getOperand(i_nocapture: 2));
4485}
4486
4487ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4488 return new ShuffleVectorInst(getOperand(i_nocapture: 0), getOperand(i_nocapture: 1), getShuffleMask());
4489}
4490
4491PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4492
4493LandingPadInst *LandingPadInst::cloneImpl() const {
4494 return new LandingPadInst(*this);
4495}
4496
4497ReturnInst *ReturnInst::cloneImpl() const {
4498 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4499 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4500}
4501
4502BranchInst *BranchInst::cloneImpl() const {
4503 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4504 return new (AllocMarker) BranchInst(*this, AllocMarker);
4505}
4506
4507SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4508
4509IndirectBrInst *IndirectBrInst::cloneImpl() const {
4510 return new IndirectBrInst(*this);
4511}
4512
4513InvokeInst *InvokeInst::cloneImpl() const {
4514 if (hasOperandBundles()) {
4515 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4516 .NumOps: getNumOperands(),
4517 .DescBytes: getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4518 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4519 }
4520 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4521 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4522}
4523
4524CallBrInst *CallBrInst::cloneImpl() const {
4525 if (hasOperandBundles()) {
4526 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4527 .NumOps: getNumOperands(),
4528 .DescBytes: getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4529 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4530 }
4531 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4532 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4533}
4534
4535ResumeInst *ResumeInst::cloneImpl() const {
4536 return new (AllocMarker) ResumeInst(*this);
4537}
4538
4539CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4540 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4541 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4542}
4543
4544CatchReturnInst *CatchReturnInst::cloneImpl() const {
4545 return new (AllocMarker) CatchReturnInst(*this);
4546}
4547
4548CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4549 return new CatchSwitchInst(*this);
4550}
4551
4552FuncletPadInst *FuncletPadInst::cloneImpl() const {
4553 IntrusiveOperandsAllocMarker AllocMarker{.NumOps: getNumOperands()};
4554 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4555}
4556
4557UnreachableInst *UnreachableInst::cloneImpl() const {
4558 LLVMContext &Context = getContext();
4559 return new UnreachableInst(Context);
4560}
4561
4562bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4563 bool NoTrapAfterNoreturn) const {
4564 if (!TrapUnreachable)
4565 return false;
4566
4567 // We may be able to ignore unreachable behind a noreturn call.
4568 if (const CallInst *Call = dyn_cast_or_null<CallInst>(Val: getPrevNode());
4569 Call && Call->doesNotReturn()) {
4570 if (NoTrapAfterNoreturn)
4571 return false;
4572 // Do not emit an additional trap instruction.
4573 if (Call->isNonContinuableTrap())
4574 return false;
4575 }
4576
4577 if (getFunction()->hasFnAttribute(Kind: Attribute::Naked))
4578 return false;
4579
4580 return true;
4581}
4582
4583FreezeInst *FreezeInst::cloneImpl() const {
4584 return new FreezeInst(getOperand(i_nocapture: 0));
4585}
4586