1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetInstrInfo.h"
14#include "llvm/ADT/SmallSet.h"
15#include "llvm/ADT/StringExtras.h"
16#include "llvm/BinaryFormat/Dwarf.h"
17#include "llvm/CodeGen/MachineCombinerPattern.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineMemOperand.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/MachineScheduler.h"
23#include "llvm/CodeGen/MachineTraceMetrics.h"
24#include "llvm/CodeGen/PseudoSourceValue.h"
25#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
26#include "llvm/CodeGen/StackMaps.h"
27#include "llvm/CodeGen/TargetFrameLowering.h"
28#include "llvm/CodeGen/TargetLowering.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
30#include "llvm/CodeGen/TargetSchedule.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/DebugInfoMetadata.h"
33#include "llvm/MC/MCAsmInfo.h"
34#include "llvm/MC/MCInstrItineraries.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/InterleavedRange.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/Target/TargetMachine.h"
40
41using namespace llvm;
42
43static cl::opt<bool> DisableHazardRecognizer(
44 "disable-sched-hazard", cl::Hidden, cl::init(Val: false),
45 cl::desc("Disable hazard detection during preRA scheduling"));
46
47static cl::opt<bool> EnableAccReassociation(
48 "acc-reassoc", cl::Hidden, cl::init(Val: true),
49 cl::desc("Enable reassociation of accumulation chains"));
50
51static cl::opt<unsigned int>
52 MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(Val: 8),
53 cl::desc("Minimum length of accumulator chains "
54 "required for the optimization to kick in"));
55
56static cl::opt<unsigned int> MaxAccumulatorWidth(
57 "acc-max-width", cl::Hidden, cl::init(Val: 3),
58 cl::desc("Maximum number of branches in the accumulator tree"));
59
60TargetInstrInfo::~TargetInstrInfo() = default;
61
62const TargetRegisterClass *TargetInstrInfo::getRegClass(const MCInstrDesc &MCID,
63 unsigned OpNum) const {
64 if (OpNum >= MCID.getNumOperands())
65 return nullptr;
66
67 const MCOperandInfo &OpInfo = MCID.operands()[OpNum];
68 int16_t RegClass = getOpRegClassID(OpInfo);
69
70 // Instructions like INSERT_SUBREG do not have fixed register classes.
71 if (RegClass < 0)
72 return nullptr;
73
74 // Otherwise just look it up normally.
75 return TRI.getRegClass(i: RegClass);
76}
77
78/// insertNoop - Insert a noop into the instruction stream at the specified
79/// point.
80void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
81 MachineBasicBlock::iterator MI) const {
82 llvm_unreachable("Target didn't implement insertNoop!");
83}
84
85/// insertNoops - Insert noops into the instruction stream at the specified
86/// point.
87void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
88 MachineBasicBlock::iterator MI,
89 unsigned Quantity) const {
90 for (unsigned i = 0; i < Quantity; ++i)
91 insertNoop(MBB, MI);
92}
93
94static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
95 return strncmp(s1: Str, s2: MAI.getCommentString().data(),
96 n: MAI.getCommentString().size()) == 0;
97}
98
99/// Measure the specified inline asm to determine an approximation of its
100/// length.
101/// Comments (which run till the next SeparatorString or newline) do not
102/// count as an instruction.
103/// Any other non-whitespace text is considered an instruction, with
104/// multiple instructions separated by SeparatorString or newlines.
105/// Variable-length instructions are not handled here; this function
106/// may be overloaded in the target code to do that.
107/// We implement a special case of the .space directive which takes only a
108/// single integer argument in base 10 that is the size in bytes. This is a
109/// restricted form of the GAS directive in that we only interpret
110/// simple--i.e. not a logical or arithmetic expression--size values without
111/// the optional fill value. This is primarily used for creating arbitrary
112/// sized inline asm blocks for testing purposes.
113unsigned TargetInstrInfo::getInlineAsmLength(
114 const char *Str,
115 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
116 // Count the number of instructions in the asm.
117 bool AtInsnStart = true;
118 unsigned Length = 0;
119 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
120 for (; *Str; ++Str) {
121 if (*Str == '\n' || strncmp(s1: Str, s2: MAI.getSeparatorString(),
122 n: strlen(s: MAI.getSeparatorString())) == 0) {
123 AtInsnStart = true;
124 } else if (isAsmComment(Str, MAI)) {
125 // Stop counting as an instruction after a comment until the next
126 // separator.
127 AtInsnStart = false;
128 }
129
130 if (AtInsnStart && !isSpace(C: static_cast<unsigned char>(*Str))) {
131 unsigned AddLength = MaxInstLength;
132 if (strncmp(s1: Str, s2: ".space", n: 6) == 0) {
133 char *EStr;
134 int SpaceSize;
135 SpaceSize = strtol(nptr: Str + 6, endptr: &EStr, base: 10);
136 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
137 while (*EStr != '\n' && isSpace(C: static_cast<unsigned char>(*EStr)))
138 ++EStr;
139 if (*EStr == '\0' || *EStr == '\n' ||
140 isAsmComment(Str: EStr, MAI)) // Successfully parsed .space argument
141 AddLength = SpaceSize;
142 }
143 Length += AddLength;
144 AtInsnStart = false;
145 }
146 }
147
148 return Length;
149}
150
151/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
152/// after it, replacing it with an unconditional branch to NewDest.
153void
154TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
155 MachineBasicBlock *NewDest) const {
156 MachineBasicBlock *MBB = Tail->getParent();
157
158 // Remove all the old successors of MBB from the CFG.
159 while (!MBB->succ_empty())
160 MBB->removeSuccessor(I: MBB->succ_begin());
161
162 // Save off the debug loc before erasing the instruction.
163 DebugLoc DL = Tail->getDebugLoc();
164
165 // Update call info and remove all the dead instructions
166 // from the end of MBB.
167 while (Tail != MBB->end()) {
168 auto MI = Tail++;
169 if (MI->shouldUpdateAdditionalCallInfo())
170 MBB->getParent()->eraseAdditionalCallInfo(MI: &*MI);
171 MBB->erase(I: MI);
172 }
173
174 // If MBB isn't immediately before MBB, insert a branch to it.
175 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
176 insertBranch(MBB&: *MBB, TBB: NewDest, FBB: nullptr, Cond: SmallVector<MachineOperand, 0>(), DL);
177 MBB->addSuccessor(Succ: NewDest);
178}
179
180MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
181 bool NewMI, unsigned Idx1,
182 unsigned Idx2) const {
183 const MCInstrDesc &MCID = MI.getDesc();
184 bool HasDef = MCID.getNumDefs();
185 if (HasDef && !MI.getOperand(i: 0).isReg())
186 // No idea how to commute this instruction. Target should implement its own.
187 return nullptr;
188
189 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
190 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
191 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
192 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
193 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
194 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
195 "This only knows how to commute register operands so far");
196
197 Register Reg0 = HasDef ? MI.getOperand(i: 0).getReg() : Register();
198 Register Reg1 = MI.getOperand(i: Idx1).getReg();
199 Register Reg2 = MI.getOperand(i: Idx2).getReg();
200 unsigned SubReg0 = HasDef ? MI.getOperand(i: 0).getSubReg() : 0;
201 unsigned SubReg1 = MI.getOperand(i: Idx1).getSubReg();
202 unsigned SubReg2 = MI.getOperand(i: Idx2).getSubReg();
203 bool Reg1IsKill = MI.getOperand(i: Idx1).isKill();
204 bool Reg2IsKill = MI.getOperand(i: Idx2).isKill();
205 bool Reg1IsUndef = MI.getOperand(i: Idx1).isUndef();
206 bool Reg2IsUndef = MI.getOperand(i: Idx2).isUndef();
207 bool Reg1IsInternal = MI.getOperand(i: Idx1).isInternalRead();
208 bool Reg2IsInternal = MI.getOperand(i: Idx2).isInternalRead();
209 // Avoid calling isRenamable for virtual registers since we assert that
210 // renamable property is only queried/set for physical registers.
211 bool Reg1IsRenamable =
212 Reg1.isPhysical() ? MI.getOperand(i: Idx1).isRenamable() : false;
213 bool Reg2IsRenamable =
214 Reg2.isPhysical() ? MI.getOperand(i: Idx2).isRenamable() : false;
215
216 // For a case like this:
217 // %0.sub = INST %0.sub(tied), %1.sub, implicit-def %0
218 // we need to update the implicit-def after commuting to result in:
219 // %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
220 SmallVector<unsigned> UpdateImplicitDefIdx;
221 if (HasDef && MI.hasImplicitDef()) {
222 for (auto [OpNo, MO] : llvm::enumerate(First: MI.implicit_operands())) {
223 Register ImplReg = MO.getReg();
224 if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
225 (ImplReg.isPhysical() && Reg0.isPhysical() &&
226 TRI.isSubRegisterEq(RegA: ImplReg, RegB: Reg0)))
227 UpdateImplicitDefIdx.push_back(Elt: OpNo + MI.getNumExplicitOperands());
228 }
229 }
230
231 // If destination is tied to either of the commuted source register, then
232 // it must be updated.
233 if (HasDef && Reg0 == Reg1 &&
234 MI.getDesc().getOperandConstraint(OpNum: Idx1, Constraint: MCOI::TIED_TO) == 0) {
235 Reg2IsKill = false;
236 Reg0 = Reg2;
237 SubReg0 = SubReg2;
238 } else if (HasDef && Reg0 == Reg2 &&
239 MI.getDesc().getOperandConstraint(OpNum: Idx2, Constraint: MCOI::TIED_TO) == 0) {
240 Reg1IsKill = false;
241 Reg0 = Reg1;
242 SubReg0 = SubReg1;
243 }
244
245 MachineInstr *CommutedMI = nullptr;
246 if (NewMI) {
247 // Create a new instruction.
248 MachineFunction &MF = *MI.getMF();
249 CommutedMI = MF.CloneMachineInstr(Orig: &MI);
250 } else {
251 CommutedMI = &MI;
252 }
253
254 if (HasDef) {
255 CommutedMI->getOperand(i: 0).setReg(Reg0);
256 CommutedMI->getOperand(i: 0).setSubReg(SubReg0);
257 for (unsigned Idx : UpdateImplicitDefIdx)
258 CommutedMI->getOperand(i: Idx).setReg(Reg0);
259 }
260 CommutedMI->getOperand(i: Idx2).setReg(Reg1);
261 CommutedMI->getOperand(i: Idx1).setReg(Reg2);
262 CommutedMI->getOperand(i: Idx2).setSubReg(SubReg1);
263 CommutedMI->getOperand(i: Idx1).setSubReg(SubReg2);
264 CommutedMI->getOperand(i: Idx2).setIsKill(Reg1IsKill);
265 CommutedMI->getOperand(i: Idx1).setIsKill(Reg2IsKill);
266 CommutedMI->getOperand(i: Idx2).setIsUndef(Reg1IsUndef);
267 CommutedMI->getOperand(i: Idx1).setIsUndef(Reg2IsUndef);
268 CommutedMI->getOperand(i: Idx2).setIsInternalRead(Reg1IsInternal);
269 CommutedMI->getOperand(i: Idx1).setIsInternalRead(Reg2IsInternal);
270 // Avoid calling setIsRenamable for virtual registers since we assert that
271 // renamable property is only queried/set for physical registers.
272 if (Reg1.isPhysical())
273 CommutedMI->getOperand(i: Idx2).setIsRenamable(Reg1IsRenamable);
274 if (Reg2.isPhysical())
275 CommutedMI->getOperand(i: Idx1).setIsRenamable(Reg2IsRenamable);
276 return CommutedMI;
277}
278
279MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
280 unsigned OpIdx1,
281 unsigned OpIdx2) const {
282 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
283 // any commutable operand, which is done in findCommutedOpIndices() method
284 // called below.
285 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
286 !findCommutedOpIndices(MI, SrcOpIdx1&: OpIdx1, SrcOpIdx2&: OpIdx2)) {
287 assert(MI.isCommutable() &&
288 "Precondition violation: MI must be commutable.");
289 return nullptr;
290 }
291 return commuteInstructionImpl(MI, NewMI, Idx1: OpIdx1, Idx2: OpIdx2);
292}
293
294bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
295 unsigned &ResultIdx2,
296 unsigned CommutableOpIdx1,
297 unsigned CommutableOpIdx2) {
298 if (ResultIdx1 == CommuteAnyOperandIndex &&
299 ResultIdx2 == CommuteAnyOperandIndex) {
300 ResultIdx1 = CommutableOpIdx1;
301 ResultIdx2 = CommutableOpIdx2;
302 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
303 if (ResultIdx2 == CommutableOpIdx1)
304 ResultIdx1 = CommutableOpIdx2;
305 else if (ResultIdx2 == CommutableOpIdx2)
306 ResultIdx1 = CommutableOpIdx1;
307 else
308 return false;
309 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
310 if (ResultIdx1 == CommutableOpIdx1)
311 ResultIdx2 = CommutableOpIdx2;
312 else if (ResultIdx1 == CommutableOpIdx2)
313 ResultIdx2 = CommutableOpIdx1;
314 else
315 return false;
316 } else
317 // Check that the result operand indices match the given commutable
318 // operand indices.
319 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
320 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
321
322 return true;
323}
324
325bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
326 unsigned &SrcOpIdx1,
327 unsigned &SrcOpIdx2) const {
328 assert(!MI.isBundle() &&
329 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
330
331 const MCInstrDesc &MCID = MI.getDesc();
332 if (!MCID.isCommutable())
333 return false;
334
335 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
336 // is not true, then the target must implement this.
337 unsigned CommutableOpIdx1 = MCID.getNumDefs();
338 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
339 if (!fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2,
340 CommutableOpIdx1, CommutableOpIdx2))
341 return false;
342
343 if (!MI.getOperand(i: SrcOpIdx1).isReg() || !MI.getOperand(i: SrcOpIdx2).isReg())
344 // No idea.
345 return false;
346 return true;
347}
348
349bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
350 if (!MI.isTerminator()) return false;
351
352 // Conditional branch is a special case.
353 if (MI.isBranch() && !MI.isBarrier())
354 return true;
355 if (!MI.isPredicable())
356 return true;
357 return !isPredicated(MI);
358}
359
360bool TargetInstrInfo::PredicateInstruction(
361 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
362 bool MadeChange = false;
363
364 assert(!MI.isBundle() &&
365 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
366
367 const MCInstrDesc &MCID = MI.getDesc();
368 if (!MI.isPredicable())
369 return false;
370
371 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
372 if (MCID.operands()[i].isPredicate()) {
373 MachineOperand &MO = MI.getOperand(i);
374 if (MO.isReg()) {
375 MO.setReg(Pred[j].getReg());
376 MadeChange = true;
377 } else if (MO.isImm()) {
378 MO.setImm(Pred[j].getImm());
379 MadeChange = true;
380 } else if (MO.isMBB()) {
381 MO.setMBB(Pred[j].getMBB());
382 MadeChange = true;
383 }
384 ++j;
385 }
386 }
387 return MadeChange;
388}
389
390bool TargetInstrInfo::hasLoadFromStackSlot(
391 const MachineInstr &MI,
392 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
393 size_t StartSize = Accesses.size();
394 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
395 oe = MI.memoperands_end();
396 o != oe; ++o) {
397 if ((*o)->isLoad() &&
398 isa_and_nonnull<FixedStackPseudoSourceValue>(Val: (*o)->getPseudoValue()))
399 Accesses.push_back(Elt: *o);
400 }
401 return Accesses.size() != StartSize;
402}
403
404bool TargetInstrInfo::hasStoreToStackSlot(
405 const MachineInstr &MI,
406 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
407 size_t StartSize = Accesses.size();
408 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
409 oe = MI.memoperands_end();
410 o != oe; ++o) {
411 if ((*o)->isStore() &&
412 isa_and_nonnull<FixedStackPseudoSourceValue>(Val: (*o)->getPseudoValue()))
413 Accesses.push_back(Elt: *o);
414 }
415 return Accesses.size() != StartSize;
416}
417
418bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
419 unsigned SubIdx, unsigned &Size,
420 unsigned &Offset,
421 const MachineFunction &MF) const {
422 if (!SubIdx) {
423 Size = TRI.getSpillSize(RC: *RC);
424 Offset = 0;
425 return true;
426 }
427 unsigned BitSize = TRI.getSubRegIdxSize(Idx: SubIdx);
428 // Convert bit size to byte size.
429 if (BitSize % 8)
430 return false;
431
432 int BitOffset = TRI.getSubRegIdxOffset(Idx: SubIdx);
433 if (BitOffset < 0 || BitOffset % 8)
434 return false;
435
436 Size = BitSize / 8;
437 Offset = (unsigned)BitOffset / 8;
438
439 assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
440
441 if (!MF.getDataLayout().isLittleEndian()) {
442 Offset = TRI.getSpillSize(RC: *RC) - (Offset + Size);
443 }
444 return true;
445}
446
447void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
448 MachineBasicBlock::iterator I,
449 Register DestReg, unsigned SubIdx,
450 const MachineInstr &Orig) const {
451 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig: &Orig);
452 MI->substituteRegister(FromReg: MI->getOperand(i: 0).getReg(), ToReg: DestReg, SubIdx, RegInfo: TRI);
453 MBB.insert(I, MI);
454}
455
456bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
457 const MachineInstr &MI1,
458 const MachineRegisterInfo *MRI) const {
459 return MI0.isIdenticalTo(Other: MI1, Check: MachineInstr::IgnoreVRegDefs);
460}
461
462MachineInstr &
463TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
464 MachineBasicBlock::iterator InsertBefore,
465 const MachineInstr &Orig) const {
466 MachineFunction &MF = *MBB.getParent();
467 // CFI instructions are marked as non-duplicable, because Darwin compact
468 // unwind info emission can't handle multiple prologue setups.
469 assert((!Orig.isNotDuplicable() ||
470 (!MF.getTarget().getTargetTriple().isOSDarwin() &&
471 Orig.isCFIInstruction())) &&
472 "Instruction cannot be duplicated");
473
474 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
475}
476
477// If the COPY instruction in MI can be folded to a stack operation, return
478// the register class to use.
479static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
480 const TargetInstrInfo &TII,
481 unsigned FoldIdx) {
482 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
483 if (MI.getNumOperands() != 2)
484 return nullptr;
485 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
486
487 const MachineOperand &FoldOp = MI.getOperand(i: FoldIdx);
488 const MachineOperand &LiveOp = MI.getOperand(i: 1 - FoldIdx);
489
490 if (FoldOp.getSubReg() || LiveOp.getSubReg())
491 return nullptr;
492
493 Register FoldReg = FoldOp.getReg();
494 Register LiveReg = LiveOp.getReg();
495
496 assert(FoldReg.isVirtual() && "Cannot fold physregs");
497
498 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
499 const TargetRegisterClass *RC = MRI.getRegClass(Reg: FoldReg);
500
501 if (LiveOp.getReg().isPhysical())
502 return RC->contains(Reg: LiveOp.getReg()) ? RC : nullptr;
503
504 if (RC->hasSubClassEq(RC: MRI.getRegClass(Reg: LiveReg)))
505 return RC;
506
507 // FIXME: Allow folding when register classes are memory compatible.
508 return nullptr;
509}
510
511MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
512
513/// Try to remove the load by folding it to a register
514/// operand at the use. We fold the load instructions if load defines a virtual
515/// register, the virtual register is used once in the same BB, and the
516/// instructions in-between do not load or store, and have no side effects.
517MachineInstr *TargetInstrInfo::optimizeLoadInstr(MachineInstr &MI,
518 const MachineRegisterInfo *MRI,
519 Register &FoldAsLoadDefReg,
520 MachineInstr *&DefMI) const {
521 // Check whether we can move DefMI here.
522 DefMI = MRI->getVRegDef(Reg: FoldAsLoadDefReg);
523 assert(DefMI);
524 bool SawStore = false;
525 if (!DefMI->isSafeToMove(SawStore))
526 return nullptr;
527
528 // Collect information about virtual register operands of MI.
529 SmallVector<unsigned, 1> SrcOperandIds;
530 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
531 MachineOperand &MO = MI.getOperand(i);
532 if (!MO.isReg())
533 continue;
534 Register Reg = MO.getReg();
535 if (Reg != FoldAsLoadDefReg)
536 continue;
537 // Do not fold if we have a subreg use or a def.
538 if (MO.getSubReg() || MO.isDef())
539 return nullptr;
540 SrcOperandIds.push_back(Elt: i);
541 }
542 if (SrcOperandIds.empty())
543 return nullptr;
544
545 // Check whether we can fold the def into SrcOperandId.
546 if (MachineInstr *FoldMI = foldMemoryOperand(MI, Ops: SrcOperandIds, LoadMI&: *DefMI)) {
547 FoldAsLoadDefReg = 0;
548 return FoldMI;
549 }
550
551 return nullptr;
552}
553
554std::pair<unsigned, unsigned>
555TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
556 switch (MI.getOpcode()) {
557 case TargetOpcode::STACKMAP:
558 // StackMapLiveValues are foldable
559 return std::make_pair(x: 0, y: StackMapOpers(&MI).getVarIdx());
560 case TargetOpcode::PATCHPOINT:
561 // For PatchPoint, the call args are not foldable (even if reported in the
562 // stackmap e.g. via anyregcc).
563 return std::make_pair(x: 0, y: PatchPointOpers(&MI).getVarIdx());
564 case TargetOpcode::STATEPOINT:
565 // For statepoints, fold deopt and gc arguments, but not call arguments.
566 return std::make_pair(x: MI.getNumDefs(), y: StatepointOpers(&MI).getVarIdx());
567 default:
568 llvm_unreachable("unexpected stackmap opcode");
569 }
570}
571
572static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
573 ArrayRef<unsigned> Ops, int FrameIndex,
574 const TargetInstrInfo &TII) {
575 unsigned StartIdx = 0;
576 unsigned NumDefs = 0;
577 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
578 std::tie(args&: NumDefs, args&: StartIdx) = TII.getPatchpointUnfoldableRange(MI);
579
580 unsigned DefToFoldIdx = MI.getNumOperands();
581
582 // Return false if any operands requested for folding are not foldable (not
583 // part of the stackmap's live values).
584 for (unsigned Op : Ops) {
585 if (Op < NumDefs) {
586 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
587 DefToFoldIdx = Op;
588 } else if (Op < StartIdx) {
589 return nullptr;
590 }
591 if (MI.getOperand(i: Op).isTied())
592 return nullptr;
593 }
594
595 MachineInstr *NewMI =
596 MF.CreateMachineInstr(MCID: TII.get(Opcode: MI.getOpcode()), DL: MI.getDebugLoc(), NoImplicit: true);
597 MachineInstrBuilder MIB(MF, NewMI);
598
599 // No need to fold return, the meta data, and function arguments
600 for (unsigned i = 0; i < StartIdx; ++i)
601 if (i != DefToFoldIdx)
602 MIB.add(MO: MI.getOperand(i));
603
604 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
605 MachineOperand &MO = MI.getOperand(i);
606 unsigned TiedTo = e;
607 (void)MI.isRegTiedToDefOperand(UseOpIdx: i, DefOpIdx: &TiedTo);
608
609 if (is_contained(Range&: Ops, Element: i)) {
610 assert(TiedTo == e && "Cannot fold tied operands");
611 unsigned SpillSize;
612 unsigned SpillOffset;
613 // Compute the spill slot size and offset.
614 const TargetRegisterClass *RC =
615 MF.getRegInfo().getRegClass(Reg: MO.getReg());
616 bool Valid =
617 TII.getStackSlotRange(RC, SubIdx: MO.getSubReg(), Size&: SpillSize, Offset&: SpillOffset, MF);
618 if (!Valid)
619 report_fatal_error(reason: "cannot spill patchpoint subregister operand");
620 MIB.addImm(Val: StackMaps::IndirectMemRefOp);
621 MIB.addImm(Val: SpillSize);
622 MIB.addFrameIndex(Idx: FrameIndex);
623 MIB.addImm(Val: SpillOffset);
624 } else {
625 MIB.add(MO);
626 if (TiedTo < e) {
627 assert(TiedTo < NumDefs && "Bad tied operand");
628 if (TiedTo > DefToFoldIdx)
629 --TiedTo;
630 NewMI->tieOperands(DefIdx: TiedTo, UseIdx: NewMI->getNumOperands() - 1);
631 }
632 }
633 }
634 return NewMI;
635}
636
637static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
638 const TargetInstrInfo &TII) {
639 // If the machine operand is tied, untie it first.
640 if (MI->getOperand(i: OpNo).isTied()) {
641 unsigned TiedTo = MI->findTiedOperandIdx(OpIdx: OpNo);
642 MI->untieRegOperand(OpIdx: OpNo);
643 // Intentional recursion!
644 foldInlineAsmMemOperand(MI, OpNo: TiedTo, FI, TII);
645 }
646
647 SmallVector<MachineOperand, 5> NewOps;
648 TII.getFrameIndexOperands(Ops&: NewOps, FI);
649 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
650 MI->removeOperand(OpNo);
651 MI->insert(InsertBefore: MI->operands_begin() + OpNo, Ops: NewOps);
652
653 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
654 // is the per-target number of operands that represent the memory operand
655 // excluding this one (MD). This includes MO.
656 InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size());
657 F.setMemConstraint(InlineAsm::ConstraintCode::m);
658 MachineOperand &MD = MI->getOperand(i: OpNo - 1);
659 MD.setImm(F);
660}
661
662// Returns nullptr if not possible to fold.
663static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
664 ArrayRef<unsigned> Ops, int FI,
665 const TargetInstrInfo &TII) {
666 assert(MI.isInlineAsm() && "wrong opcode");
667 if (Ops.size() > 1)
668 return nullptr;
669 unsigned Op = Ops[0];
670 assert(Op && "should never be first operand");
671 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
672
673 if (!MI.mayFoldInlineAsmRegOp(OpId: Op))
674 return nullptr;
675
676 MachineInstr &NewMI = TII.duplicate(MBB&: *MI.getParent(), InsertBefore: MI.getIterator(), Orig: MI);
677
678 foldInlineAsmMemOperand(MI: &NewMI, OpNo: Op, FI, TII);
679
680 // Update mayload/maystore metadata, and memoperands.
681 const VirtRegInfo &RI =
682 AnalyzeVirtRegInBundle(MI, Reg: MI.getOperand(i: Op).getReg());
683 MachineOperand &ExtraMO = NewMI.getOperand(i: InlineAsm::MIOp_ExtraInfo);
684 MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
685 if (RI.Reads) {
686 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
687 Flags |= MachineMemOperand::MOLoad;
688 }
689 if (RI.Writes) {
690 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
691 Flags |= MachineMemOperand::MOStore;
692 }
693 MachineFunction *MF = NewMI.getMF();
694 const MachineFrameInfo &MFI = MF->getFrameInfo();
695 MachineMemOperand *MMO = MF->getMachineMemOperand(
696 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: Flags, Size: MFI.getObjectSize(ObjectIdx: FI),
697 BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
698 NewMI.addMemOperand(MF&: *MF, MO: MMO);
699
700 return &NewMI;
701}
702
703MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
704 ArrayRef<unsigned> Ops, int FI,
705 LiveIntervals *LIS,
706 VirtRegMap *VRM) const {
707 auto Flags = MachineMemOperand::MONone;
708 for (unsigned OpIdx : Ops)
709 Flags |= MI.getOperand(i: OpIdx).isDef() ? MachineMemOperand::MOStore
710 : MachineMemOperand::MOLoad;
711
712 MachineBasicBlock *MBB = MI.getParent();
713 assert(MBB && "foldMemoryOperand needs an inserted instruction");
714 MachineFunction &MF = *MBB->getParent();
715
716 // If we're not folding a load into a subreg, the size of the load is the
717 // size of the spill slot. But if we are, we need to figure out what the
718 // actual load size is.
719 int64_t MemSize = 0;
720 const MachineFrameInfo &MFI = MF.getFrameInfo();
721
722 if (Flags & MachineMemOperand::MOStore) {
723 MemSize = MFI.getObjectSize(ObjectIdx: FI);
724 } else {
725 for (unsigned OpIdx : Ops) {
726 int64_t OpSize = MFI.getObjectSize(ObjectIdx: FI);
727
728 if (auto SubReg = MI.getOperand(i: OpIdx).getSubReg()) {
729 unsigned SubRegSize = TRI.getSubRegIdxSize(Idx: SubReg);
730 if (SubRegSize > 0 && !(SubRegSize % 8))
731 OpSize = SubRegSize / 8;
732 }
733
734 MemSize = std::max(a: MemSize, b: OpSize);
735 }
736 }
737
738 assert(MemSize && "Did not expect a zero-sized stack slot");
739
740 MachineInstr *NewMI = nullptr;
741
742 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
743 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
744 MI.getOpcode() == TargetOpcode::STATEPOINT) {
745 // Fold stackmap/patchpoint.
746 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex: FI, TII: *this);
747 if (NewMI)
748 MBB->insert(I: MI, MI: NewMI);
749 } else if (MI.isInlineAsm()) {
750 return foldInlineAsmMemOperand(MI, Ops, FI, TII: *this);
751 } else {
752 // Ask the target to do the actual folding.
753 NewMI = foldMemoryOperandImpl(MF, MI, Ops, InsertPt: MI, FrameIndex: FI, LIS, VRM);
754 }
755
756 if (NewMI) {
757 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
758 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
759 assert((!(Flags & MachineMemOperand::MOStore) ||
760 NewMI->mayStore()) &&
761 "Folded a def to a non-store!");
762 assert((!(Flags & MachineMemOperand::MOLoad) ||
763 NewMI->mayLoad()) &&
764 "Folded a use to a non-load!");
765 assert(MFI.getObjectOffset(FI) != -1);
766 MachineMemOperand *MMO =
767 MF.getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF, FI),
768 F: Flags, Size: MemSize, BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
769 NewMI->addMemOperand(MF, MO: MMO);
770
771 // The pass "x86 speculative load hardening" always attaches symbols to
772 // call instructions. We need copy it form old instruction.
773 NewMI->cloneInstrSymbols(MF, MI);
774
775 return NewMI;
776 }
777
778 // Straight COPY may fold as load/store.
779 if (!isCopyInstr(MI) || Ops.size() != 1)
780 return nullptr;
781
782 const TargetRegisterClass *RC = canFoldCopy(MI, TII: *this, FoldIdx: Ops[0]);
783 if (!RC)
784 return nullptr;
785
786 const MachineOperand &MO = MI.getOperand(i: 1 - Ops[0]);
787 MachineBasicBlock::iterator Pos = MI;
788 if (Flags == MachineMemOperand::MOStore) {
789 if (MO.isUndef()) {
790 // If this is an undef copy, we do not need to bother we inserting spill
791 // code.
792 BuildMI(BB&: *MBB, I: Pos, MIMD: MI.getDebugLoc(), MCID: get(Opcode: TargetOpcode::KILL)).add(MO);
793 } else {
794 storeRegToStackSlot(MBB&: *MBB, MI: Pos, SrcReg: MO.getReg(), isKill: MO.isKill(), FrameIndex: FI, RC,
795 VReg: Register());
796 }
797 } else
798 loadRegFromStackSlot(MBB&: *MBB, MI: Pos, DestReg: MO.getReg(), FrameIndex: FI, RC, VReg: Register());
799
800 return &*--Pos;
801}
802
803MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
804 ArrayRef<unsigned> Ops,
805 MachineInstr &LoadMI,
806 LiveIntervals *LIS) const {
807 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
808#ifndef NDEBUG
809 for (unsigned OpIdx : Ops)
810 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
811#endif
812
813 MachineBasicBlock &MBB = *MI.getParent();
814 MachineFunction &MF = *MBB.getParent();
815
816 // Ask the target to do the actual folding.
817 MachineInstr *NewMI = nullptr;
818 int FrameIndex = 0;
819
820 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
821 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
822 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
823 isLoadFromStackSlot(MI: LoadMI, FrameIndex)) {
824 // Fold stackmap/patchpoint.
825 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, TII: *this);
826 if (NewMI)
827 NewMI = &*MBB.insert(I: MI, MI: NewMI);
828 } else if (MI.isInlineAsm() && isLoadFromStackSlot(MI: LoadMI, FrameIndex)) {
829 return foldInlineAsmMemOperand(MI, Ops, FI: FrameIndex, TII: *this);
830 } else {
831 // Ask the target to do the actual folding.
832 NewMI = foldMemoryOperandImpl(MF, MI, Ops, InsertPt: MI, LoadMI, LIS);
833 }
834
835 if (!NewMI)
836 return nullptr;
837
838 // Copy the memoperands from the load to the folded instruction.
839 if (MI.memoperands_empty()) {
840 NewMI->setMemRefs(MF, MemRefs: LoadMI.memoperands());
841 } else {
842 // Handle the rare case of folding multiple loads.
843 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
844 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
845 E = LoadMI.memoperands_end();
846 I != E; ++I) {
847 NewMI->addMemOperand(MF, MO: *I);
848 }
849 }
850 return NewMI;
851}
852
853/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
854/// replacement instructions immediately precede it. Copy any implicit
855/// operands from MI to the replacement instruction.
856static void transferImplicitOperands(MachineInstr *MI,
857 const TargetRegisterInfo *TRI) {
858 MachineBasicBlock::iterator CopyMI = MI;
859 --CopyMI;
860
861 Register DstReg = MI->getOperand(i: 0).getReg();
862 for (const MachineOperand &MO : MI->implicit_operands()) {
863 CopyMI->addOperand(Op: MO);
864
865 // Be conservative about preserving kills when subregister defs are
866 // involved. If there was implicit kill of a super-register overlapping the
867 // copy result, we would kill the subregisters previous copies defined.
868
869 if (MO.isKill() && TRI->regsOverlap(RegA: DstReg, RegB: MO.getReg()))
870 CopyMI->getOperand(i: CopyMI->getNumOperands() - 1).setIsKill(false);
871 }
872}
873
874void TargetInstrInfo::lowerCopy(
875 MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
876 if (MI->allDefsAreDead()) {
877 MI->setDesc(get(Opcode: TargetOpcode::KILL));
878 return;
879 }
880
881 MachineOperand &DstMO = MI->getOperand(i: 0);
882 MachineOperand &SrcMO = MI->getOperand(i: 1);
883
884 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
885 if (IdentityCopy || SrcMO.isUndef()) {
886 // No need to insert an identity copy instruction, but replace with a KILL
887 // if liveness is changed.
888 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
889 // We must make sure the super-register gets killed. Replace the
890 // instruction with KILL.
891 MI->setDesc(get(Opcode: TargetOpcode::KILL));
892 return;
893 }
894 // Vanilla identity copy.
895 MI->eraseFromParent();
896 return;
897 }
898
899 copyPhysReg(MBB&: *MI->getParent(), MI, DL: MI->getDebugLoc(), DestReg: DstMO.getReg(),
900 SrcReg: SrcMO.getReg(), KillSrc: SrcMO.isKill(),
901 RenamableDest: DstMO.getReg().isPhysical() ? DstMO.isRenamable() : false,
902 RenamableSrc: SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
903
904 if (MI->getNumOperands() > 2)
905 transferImplicitOperands(MI, TRI: &TRI);
906 MI->eraseFromParent();
907}
908
909bool TargetInstrInfo::hasReassociableOperands(
910 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
911 const MachineOperand &Op1 = Inst.getOperand(i: 1);
912 const MachineOperand &Op2 = Inst.getOperand(i: 2);
913 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
914
915 // We need virtual register definitions for the operands that we will
916 // reassociate.
917 MachineInstr *MI1 = nullptr;
918 MachineInstr *MI2 = nullptr;
919 if (Op1.isReg() && Op1.getReg().isVirtual())
920 MI1 = MRI.getUniqueVRegDef(Reg: Op1.getReg());
921 if (Op2.isReg() && Op2.getReg().isVirtual())
922 MI2 = MRI.getUniqueVRegDef(Reg: Op2.getReg());
923
924 // And at least one operand must be defined in MBB.
925 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
926}
927
928bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
929 unsigned Opcode2) const {
930 return Opcode1 == Opcode2 || getInverseOpcode(Opcode: Opcode1) == Opcode2;
931}
932
933bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
934 bool &Commuted) const {
935 const MachineBasicBlock *MBB = Inst.getParent();
936 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
937 MachineInstr *MI1 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 1).getReg());
938 MachineInstr *MI2 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 2).getReg());
939 unsigned Opcode = Inst.getOpcode();
940
941 // If only one operand has the same or inverse opcode and it's the second
942 // source operand, the operands must be commuted.
943 Commuted = !areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI1->getOpcode()) &&
944 areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI2->getOpcode());
945 if (Commuted)
946 std::swap(a&: MI1, b&: MI2);
947
948 // 1. The previous instruction must be the same type as Inst.
949 // 2. The previous instruction must also be associative/commutative or be the
950 // inverse of such an operation (this can be different even for
951 // instructions with the same opcode if traits like fast-math-flags are
952 // included).
953 // 3. The previous instruction must have virtual register definitions for its
954 // operands in the same basic block as Inst.
955 // 4. The previous instruction's result must only be used by Inst.
956 return areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI1->getOpcode()) &&
957 (isAssociativeAndCommutative(Inst: *MI1) ||
958 isAssociativeAndCommutative(Inst: *MI1, /* Invert */ true)) &&
959 hasReassociableOperands(Inst: *MI1, MBB) &&
960 MRI.hasOneNonDBGUse(RegNo: MI1->getOperand(i: 0).getReg());
961}
962
963// 1. The operation must be associative and commutative or be the inverse of
964// such an operation.
965// 2. The instruction must have virtual register definitions for its
966// operands in the same basic block.
967// 3. The instruction must have a reassociable sibling.
968bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
969 bool &Commuted) const {
970 return (isAssociativeAndCommutative(Inst) ||
971 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
972 hasReassociableOperands(Inst, MBB: Inst.getParent()) &&
973 hasReassociableSibling(Inst, Commuted);
974}
975
976// Utility routine that checks if \param MO is defined by an
977// \param CombineOpc instruction in the basic block \param MBB.
978// If \param CombineOpc is not provided, the OpCode check will
979// be skipped.
980static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
981 unsigned CombineOpc = 0) {
982 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
983 MachineInstr *MI = nullptr;
984
985 if (MO.isReg() && MO.getReg().isVirtual())
986 MI = MRI.getUniqueVRegDef(Reg: MO.getReg());
987 // And it needs to be in the trace (otherwise, it won't have a depth).
988 if (!MI || MI->getParent() != &MBB ||
989 (MI->getOpcode() != CombineOpc && CombineOpc != 0))
990 return false;
991 // Must only used by the user we combine with.
992 if (!MRI.hasOneNonDBGUse(RegNo: MO.getReg()))
993 return false;
994
995 return true;
996}
997
998// A chain of accumulation instructions will be selected IFF:
999// 1. All the accumulation instructions in the chain have the same opcode,
1000// besides the first that has a slightly different opcode because it does
1001// not accumulate into a register.
1002// 2. All the instructions in the chain are combinable (have a single use
1003// which itself is part of the chain).
1004// 3. Meets the required minimum length.
1005void TargetInstrInfo::getAccumulatorChain(
1006 MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
1007 // Walk up the chain of accumulation instructions and collect them in the
1008 // vector.
1009 MachineBasicBlock &MBB = *CurrentInstr->getParent();
1010 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1011 unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
1012 std::optional<unsigned> ChainStartOpCode =
1013 getAccumulationStartOpcode(Opcode: AccumulatorOpcode);
1014
1015 if (!ChainStartOpCode.has_value())
1016 return;
1017
1018 // Push the first accumulator result to the start of the chain.
1019 Chain.push_back(Elt: CurrentInstr->getOperand(i: 0).getReg());
1020
1021 // Collect the accumulator input register from all instructions in the chain.
1022 while (CurrentInstr &&
1023 canCombine(MBB, MO&: CurrentInstr->getOperand(i: 1), CombineOpc: AccumulatorOpcode)) {
1024 Chain.push_back(Elt: CurrentInstr->getOperand(i: 1).getReg());
1025 CurrentInstr = MRI.getUniqueVRegDef(Reg: CurrentInstr->getOperand(i: 1).getReg());
1026 }
1027
1028 // Add the instruction at the top of the chain.
1029 if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
1030 canCombine(MBB, MO&: CurrentInstr->getOperand(i: 1)))
1031 Chain.push_back(Elt: CurrentInstr->getOperand(i: 1).getReg());
1032}
1033
1034/// Find chains of accumulations that can be rewritten as a tree for increased
1035/// ILP.
1036bool TargetInstrInfo::getAccumulatorReassociationPatterns(
1037 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
1038 if (!EnableAccReassociation)
1039 return false;
1040
1041 unsigned Opc = Root.getOpcode();
1042 if (!isAccumulationOpcode(Opcode: Opc))
1043 return false;
1044
1045 // Verify that this is the end of the chain.
1046 MachineBasicBlock &MBB = *Root.getParent();
1047 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1048 if (!MRI.hasOneNonDBGUser(RegNo: Root.getOperand(i: 0).getReg()))
1049 return false;
1050
1051 auto User = MRI.use_instr_begin(RegNo: Root.getOperand(i: 0).getReg());
1052 if (User->getOpcode() == Opc)
1053 return false;
1054
1055 // Walk up the use chain and collect the reduction chain.
1056 SmallVector<Register, 32> Chain;
1057 getAccumulatorChain(CurrentInstr: &Root, Chain);
1058
1059 // Reject chains which are too short to be worth modifying.
1060 if (Chain.size() < MinAccumulatorDepth)
1061 return false;
1062
1063 // Check if the MBB this instruction is a part of contains any other chains.
1064 // If so, don't apply it.
1065 SmallSet<Register, 32> ReductionChain(llvm::from_range, Chain);
1066 for (const auto &I : MBB) {
1067 if (I.getOpcode() == Opc &&
1068 !ReductionChain.contains(V: I.getOperand(i: 0).getReg()))
1069 return false;
1070 }
1071
1072 Patterns.push_back(Elt: MachineCombinerPattern::ACC_CHAIN);
1073 return true;
1074}
1075
1076// Reduce branches of the accumulator tree by adding them together.
1077void TargetInstrInfo::reduceAccumulatorTree(
1078 SmallVectorImpl<Register> &RegistersToReduce,
1079 SmallVectorImpl<MachineInstr *> &InsInstrs, MachineFunction &MF,
1080 MachineInstr &Root, MachineRegisterInfo &MRI,
1081 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1082 Register ResultReg) const {
1083 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1084 SmallVector<Register, 8> NewRegs;
1085
1086 // Get the opcode for the reduction instruction we will need to build.
1087 // If for some reason it is not defined, early exit and don't apply this.
1088 unsigned ReduceOpCode = getReduceOpcodeForAccumulator(AccumulatorOpCode: Root.getOpcode());
1089
1090 for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
1091 auto RHS = RegistersToReduce[i - 1];
1092 auto LHS = RegistersToReduce[i];
1093 Register Dest;
1094 // If we are reducing 2 registers, reuse the original result register.
1095 if (RegistersToReduce.size() == 2)
1096 Dest = ResultReg;
1097 // Otherwise, create a new virtual register to hold the partial sum.
1098 else {
1099 auto NewVR = MRI.createVirtualRegister(
1100 RegClass: MRI.getRegClass(Reg: Root.getOperand(i: 0).getReg()));
1101 Dest = NewVR;
1102 NewRegs.push_back(Elt: Dest);
1103 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: Dest, y: InsInstrs.size()));
1104 }
1105
1106 // Create the new reduction instruction.
1107 MachineInstrBuilder MIB =
1108 BuildMI(MF, MIMD: MIMetadata(Root), MCID: TII->get(Opcode: ReduceOpCode), DestReg: Dest)
1109 .addReg(RegNo: RHS, Flags: getKillRegState(B: true))
1110 .addReg(RegNo: LHS, Flags: getKillRegState(B: true));
1111 // Copy any flags needed from the original instruction.
1112 MIB->setFlags(Root.getFlags());
1113 InsInstrs.push_back(Elt: MIB);
1114 }
1115
1116 // If the number of registers to reduce is odd, add the remaining register to
1117 // the vector of registers to reduce.
1118 if (RegistersToReduce.size() % 2 != 0)
1119 NewRegs.push_back(Elt: RegistersToReduce[RegistersToReduce.size() - 1]);
1120
1121 RegistersToReduce = std::move(NewRegs);
1122}
1123
1124// The concept of the reassociation pass is that these operations can benefit
1125// from this kind of transformation:
1126//
1127// A = ? op ?
1128// B = A op X (Prev)
1129// C = B op Y (Root)
1130// -->
1131// A = ? op ?
1132// B = X op Y
1133// C = A op B
1134//
1135// breaking the dependency between A and B, allowing them to be executed in
1136// parallel (or back-to-back in a pipeline) instead of depending on each other.
1137
1138// FIXME: This has the potential to be expensive (compile time) while not
1139// improving the code at all. Some ways to limit the overhead:
1140// 1. Track successful transforms; bail out if hit rate gets too low.
1141// 2. Only enable at -O3 or some other non-default optimization level.
1142// 3. Pre-screen pattern candidates here: if an operand of the previous
1143// instruction is known to not increase the critical path, then don't match
1144// that pattern.
1145bool TargetInstrInfo::getMachineCombinerPatterns(
1146 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
1147 bool DoRegPressureReduce) const {
1148 bool Commute;
1149 if (isReassociationCandidate(Inst: Root, Commuted&: Commute)) {
1150 // We found a sequence of instructions that may be suitable for a
1151 // reassociation of operands to increase ILP. Specify each commutation
1152 // possibility for the Prev instruction in the sequence and let the
1153 // machine combiner decide if changing the operands is worthwhile.
1154 if (Commute) {
1155 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_AX_YB);
1156 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_XA_YB);
1157 } else {
1158 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_AX_BY);
1159 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_XA_BY);
1160 }
1161 return true;
1162 }
1163 if (getAccumulatorReassociationPatterns(Root, Patterns))
1164 return true;
1165
1166 return false;
1167}
1168
1169/// Return true when a code sequence can improve loop throughput.
1170bool TargetInstrInfo::isThroughputPattern(unsigned Pattern) const {
1171 return false;
1172}
1173
1174CombinerObjective
1175TargetInstrInfo::getCombinerObjective(unsigned Pattern) const {
1176 switch (Pattern) {
1177 case MachineCombinerPattern::ACC_CHAIN:
1178 return CombinerObjective::MustReduceDepth;
1179 default:
1180 return CombinerObjective::Default;
1181 }
1182}
1183
1184std::pair<unsigned, unsigned>
1185TargetInstrInfo::getReassociationOpcodes(unsigned Pattern,
1186 const MachineInstr &Root,
1187 const MachineInstr &Prev) const {
1188 bool AssocCommutRoot = isAssociativeAndCommutative(Inst: Root);
1189 bool AssocCommutPrev = isAssociativeAndCommutative(Inst: Prev);
1190
1191 // Early exit if both opcodes are associative and commutative. It's a trivial
1192 // reassociation when we only change operands order. In this case opcodes are
1193 // not required to have inverse versions.
1194 if (AssocCommutRoot && AssocCommutPrev) {
1195 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
1196 return std::make_pair(x: Root.getOpcode(), y: Root.getOpcode());
1197 }
1198
1199 // At least one instruction is not associative or commutative.
1200 // Since we have matched one of the reassociation patterns, we expect that the
1201 // instructions' opcodes are equal or one of them is the inversion of the
1202 // other.
1203 assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
1204 "Incorrectly matched pattern");
1205 unsigned AssocCommutOpcode = Root.getOpcode();
1206 unsigned InverseOpcode = *getInverseOpcode(Opcode: Root.getOpcode());
1207 if (!AssocCommutRoot)
1208 std::swap(a&: AssocCommutOpcode, b&: InverseOpcode);
1209
1210 // The transformation rule (`+` is any associative and commutative binary
1211 // operation, `-` is the inverse):
1212 // REASSOC_AX_BY:
1213 // (A + X) + Y => A + (X + Y)
1214 // (A + X) - Y => A + (X - Y)
1215 // (A - X) + Y => A - (X - Y)
1216 // (A - X) - Y => A - (X + Y)
1217 // REASSOC_XA_BY:
1218 // (X + A) + Y => (X + Y) + A
1219 // (X + A) - Y => (X - Y) + A
1220 // (X - A) + Y => (X + Y) - A
1221 // (X - A) - Y => (X - Y) - A
1222 // REASSOC_AX_YB:
1223 // Y + (A + X) => (Y + X) + A
1224 // Y - (A + X) => (Y - X) - A
1225 // Y + (A - X) => (Y - X) + A
1226 // Y - (A - X) => (Y + X) - A
1227 // REASSOC_XA_YB:
1228 // Y + (X + A) => (Y + X) + A
1229 // Y - (X + A) => (Y - X) - A
1230 // Y + (X - A) => (Y + X) - A
1231 // Y - (X - A) => (Y - X) + A
1232 switch (Pattern) {
1233 default:
1234 llvm_unreachable("Unexpected pattern");
1235 case MachineCombinerPattern::REASSOC_AX_BY:
1236 if (!AssocCommutRoot && AssocCommutPrev)
1237 return {AssocCommutOpcode, InverseOpcode};
1238 if (AssocCommutRoot && !AssocCommutPrev)
1239 return {InverseOpcode, InverseOpcode};
1240 if (!AssocCommutRoot && !AssocCommutPrev)
1241 return {InverseOpcode, AssocCommutOpcode};
1242 break;
1243 case MachineCombinerPattern::REASSOC_XA_BY:
1244 if (!AssocCommutRoot && AssocCommutPrev)
1245 return {AssocCommutOpcode, InverseOpcode};
1246 if (AssocCommutRoot && !AssocCommutPrev)
1247 return {InverseOpcode, AssocCommutOpcode};
1248 if (!AssocCommutRoot && !AssocCommutPrev)
1249 return {InverseOpcode, InverseOpcode};
1250 break;
1251 case MachineCombinerPattern::REASSOC_AX_YB:
1252 if (!AssocCommutRoot && AssocCommutPrev)
1253 return {InverseOpcode, InverseOpcode};
1254 if (AssocCommutRoot && !AssocCommutPrev)
1255 return {AssocCommutOpcode, InverseOpcode};
1256 if (!AssocCommutRoot && !AssocCommutPrev)
1257 return {InverseOpcode, AssocCommutOpcode};
1258 break;
1259 case MachineCombinerPattern::REASSOC_XA_YB:
1260 if (!AssocCommutRoot && AssocCommutPrev)
1261 return {InverseOpcode, InverseOpcode};
1262 if (AssocCommutRoot && !AssocCommutPrev)
1263 return {InverseOpcode, AssocCommutOpcode};
1264 if (!AssocCommutRoot && !AssocCommutPrev)
1265 return {AssocCommutOpcode, InverseOpcode};
1266 break;
1267 }
1268 llvm_unreachable("Unhandled combination");
1269}
1270
1271// Return a pair of boolean flags showing if the new root and new prev operands
1272// must be swapped. See visual example of the rule in
1273// TargetInstrInfo::getReassociationOpcodes.
1274static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1275 switch (Pattern) {
1276 default:
1277 llvm_unreachable("Unexpected pattern");
1278 case MachineCombinerPattern::REASSOC_AX_BY:
1279 return {false, false};
1280 case MachineCombinerPattern::REASSOC_XA_BY:
1281 return {true, false};
1282 case MachineCombinerPattern::REASSOC_AX_YB:
1283 return {true, true};
1284 case MachineCombinerPattern::REASSOC_XA_YB:
1285 return {true, true};
1286 }
1287}
1288
1289void TargetInstrInfo::getReassociateOperandIndices(
1290 const MachineInstr &Root, unsigned Pattern,
1291 std::array<unsigned, 5> &OperandIndices) const {
1292 switch (Pattern) {
1293 case MachineCombinerPattern::REASSOC_AX_BY:
1294 OperandIndices = {1, 1, 1, 2, 2};
1295 break;
1296 case MachineCombinerPattern::REASSOC_AX_YB:
1297 OperandIndices = {2, 1, 2, 2, 1};
1298 break;
1299 case MachineCombinerPattern::REASSOC_XA_BY:
1300 OperandIndices = {1, 2, 1, 1, 2};
1301 break;
1302 case MachineCombinerPattern::REASSOC_XA_YB:
1303 OperandIndices = {2, 2, 2, 1, 1};
1304 break;
1305 default:
1306 llvm_unreachable("unexpected MachineCombinerPattern");
1307 }
1308}
1309
1310/// Attempt the reassociation transformation to reduce critical path length.
1311/// See the above comments before getMachineCombinerPatterns().
1312void TargetInstrInfo::reassociateOps(
1313 MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1314 SmallVectorImpl<MachineInstr *> &InsInstrs,
1315 SmallVectorImpl<MachineInstr *> &DelInstrs,
1316 ArrayRef<unsigned> OperandIndices,
1317 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
1318 MachineFunction *MF = Root.getMF();
1319 MachineRegisterInfo &MRI = MF->getRegInfo();
1320 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1321 const TargetRegisterClass *RC = Root.getRegClassConstraint(OpIdx: 0, TII, TRI: &TRI);
1322
1323 MachineOperand &OpA = Prev.getOperand(i: OperandIndices[1]);
1324 MachineOperand &OpB = Root.getOperand(i: OperandIndices[2]);
1325 MachineOperand &OpX = Prev.getOperand(i: OperandIndices[3]);
1326 MachineOperand &OpY = Root.getOperand(i: OperandIndices[4]);
1327 MachineOperand &OpC = Root.getOperand(i: 0);
1328
1329 Register RegA = OpA.getReg();
1330 unsigned SubRegA = OpA.getSubReg();
1331 Register RegB = OpB.getReg();
1332 Register RegX = OpX.getReg();
1333 unsigned SubRegX = OpX.getSubReg();
1334 Register RegY = OpY.getReg();
1335 unsigned SubRegY = OpY.getSubReg();
1336 Register RegC = OpC.getReg();
1337
1338 if (RegA.isVirtual())
1339 MRI.constrainRegClass(Reg: RegA, RC);
1340 if (RegB.isVirtual())
1341 MRI.constrainRegClass(Reg: RegB, RC);
1342 if (RegX.isVirtual())
1343 MRI.constrainRegClass(Reg: RegX, RC);
1344 if (RegY.isVirtual())
1345 MRI.constrainRegClass(Reg: RegY, RC);
1346 if (RegC.isVirtual())
1347 MRI.constrainRegClass(Reg: RegC, RC);
1348
1349 // Create a new virtual register for the result of (X op Y) instead of
1350 // recycling RegB because the MachineCombiner's computation of the critical
1351 // path requires a new register definition rather than an existing one.
1352 Register NewVR = MRI.createVirtualRegister(RegClass: RC);
1353 unsigned SubRegNewVR = 0;
1354 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVR, y: 0));
1355
1356 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1357 bool KillA = OpA.isKill();
1358 bool KillX = OpX.isKill();
1359 bool KillY = OpY.isKill();
1360 bool KillNewVR = true;
1361
1362 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1363
1364 if (SwapPrevOperands) {
1365 std::swap(a&: RegX, b&: RegY);
1366 std::swap(a&: SubRegX, b&: SubRegY);
1367 std::swap(a&: KillX, b&: KillY);
1368 }
1369
1370 unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1371 unsigned RootFirstOpIdx, RootSecondOpIdx;
1372 switch (Pattern) {
1373 case MachineCombinerPattern::REASSOC_AX_BY:
1374 PrevFirstOpIdx = OperandIndices[1];
1375 PrevSecondOpIdx = OperandIndices[3];
1376 RootFirstOpIdx = OperandIndices[2];
1377 RootSecondOpIdx = OperandIndices[4];
1378 break;
1379 case MachineCombinerPattern::REASSOC_AX_YB:
1380 PrevFirstOpIdx = OperandIndices[1];
1381 PrevSecondOpIdx = OperandIndices[3];
1382 RootFirstOpIdx = OperandIndices[4];
1383 RootSecondOpIdx = OperandIndices[2];
1384 break;
1385 case MachineCombinerPattern::REASSOC_XA_BY:
1386 PrevFirstOpIdx = OperandIndices[3];
1387 PrevSecondOpIdx = OperandIndices[1];
1388 RootFirstOpIdx = OperandIndices[2];
1389 RootSecondOpIdx = OperandIndices[4];
1390 break;
1391 case MachineCombinerPattern::REASSOC_XA_YB:
1392 PrevFirstOpIdx = OperandIndices[3];
1393 PrevSecondOpIdx = OperandIndices[1];
1394 RootFirstOpIdx = OperandIndices[4];
1395 RootSecondOpIdx = OperandIndices[2];
1396 break;
1397 default:
1398 llvm_unreachable("unexpected MachineCombinerPattern");
1399 }
1400
1401 // Basically BuildMI but doesn't add implicit operands by default.
1402 auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1403 const MCInstrDesc &MCID, Register DestReg) {
1404 return MachineInstrBuilder(
1405 MF, MF.CreateMachineInstr(MCID, DL: MIMD.getDL(), /*NoImpl=*/NoImplicit: true))
1406 .copyMIMetadata(MIMD)
1407 .addReg(RegNo: DestReg, Flags: RegState::Define);
1408 };
1409
1410 // Create new instructions for insertion.
1411 MachineInstrBuilder MIB1 =
1412 buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(Opcode: NewPrevOpc), NewVR);
1413 for (const auto &MO : Prev.explicit_operands()) {
1414 unsigned Idx = MO.getOperandNo();
1415 // Skip the result operand we'd already added.
1416 if (Idx == 0)
1417 continue;
1418 if (Idx == PrevFirstOpIdx)
1419 MIB1.addReg(RegNo: RegX, Flags: getKillRegState(B: KillX), SubReg: SubRegX);
1420 else if (Idx == PrevSecondOpIdx)
1421 MIB1.addReg(RegNo: RegY, Flags: getKillRegState(B: KillY), SubReg: SubRegY);
1422 else
1423 MIB1.add(MO);
1424 }
1425 MIB1.copyImplicitOps(OtherMI: Prev);
1426
1427 if (SwapRootOperands) {
1428 std::swap(a&: RegA, b&: NewVR);
1429 std::swap(a&: SubRegA, b&: SubRegNewVR);
1430 std::swap(a&: KillA, b&: KillNewVR);
1431 }
1432
1433 MachineInstrBuilder MIB2 =
1434 buildMINoImplicit(*MF, MIMetadata(Root), TII->get(Opcode: NewRootOpc), RegC);
1435 for (const auto &MO : Root.explicit_operands()) {
1436 unsigned Idx = MO.getOperandNo();
1437 // Skip the result operand.
1438 if (Idx == 0)
1439 continue;
1440 if (Idx == RootFirstOpIdx)
1441 MIB2 = MIB2.addReg(RegNo: RegA, Flags: getKillRegState(B: KillA), SubReg: SubRegA);
1442 else if (Idx == RootSecondOpIdx)
1443 MIB2 = MIB2.addReg(RegNo: NewVR, Flags: getKillRegState(B: KillNewVR), SubReg: SubRegNewVR);
1444 else
1445 MIB2 = MIB2.add(MO);
1446 }
1447 MIB2.copyImplicitOps(OtherMI: Root);
1448
1449 // Propagate FP flags from the original instructions.
1450 // But clear poison-generating flags because those may not be valid now.
1451 // TODO: There should be a helper function for copying only fast-math-flags.
1452 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1453 MIB1->setFlags(IntersectedFlags);
1454 MIB1->clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
1455 MIB1->clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
1456 MIB1->clearFlag(Flag: MachineInstr::MIFlag::IsExact);
1457 MIB1->clearFlag(Flag: MachineInstr::MIFlag::Disjoint);
1458
1459 MIB2->setFlags(IntersectedFlags);
1460 MIB2->clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
1461 MIB2->clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
1462 MIB2->clearFlag(Flag: MachineInstr::MIFlag::IsExact);
1463 MIB2->clearFlag(Flag: MachineInstr::MIFlag::Disjoint);
1464
1465 setSpecialOperandAttr(OldMI1&: Root, OldMI2&: Prev, NewMI1&: *MIB1, NewMI2&: *MIB2);
1466
1467 // Record new instructions for insertion and old instructions for deletion.
1468 InsInstrs.push_back(Elt: MIB1);
1469 InsInstrs.push_back(Elt: MIB2);
1470 DelInstrs.push_back(Elt: &Prev);
1471 DelInstrs.push_back(Elt: &Root);
1472
1473 // We transformed:
1474 // B = A op X (Prev)
1475 // C = B op Y (Root)
1476 // Into:
1477 // B = X op Y (MIB1)
1478 // C = A op B (MIB2)
1479 // C has the same value as before, B doesn't; as such, keep the debug number
1480 // of C but not of B.
1481 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1482 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1483}
1484
1485void TargetInstrInfo::genAlternativeCodeSequence(
1486 MachineInstr &Root, unsigned Pattern,
1487 SmallVectorImpl<MachineInstr *> &InsInstrs,
1488 SmallVectorImpl<MachineInstr *> &DelInstrs,
1489 DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
1490 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1491 MachineBasicBlock &MBB = *Root.getParent();
1492 MachineFunction &MF = *MBB.getParent();
1493 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1494
1495 switch (Pattern) {
1496 case MachineCombinerPattern::REASSOC_AX_BY:
1497 case MachineCombinerPattern::REASSOC_AX_YB:
1498 case MachineCombinerPattern::REASSOC_XA_BY:
1499 case MachineCombinerPattern::REASSOC_XA_YB: {
1500 // Select the previous instruction in the sequence based on the input
1501 // pattern.
1502 std::array<unsigned, 5> OperandIndices;
1503 getReassociateOperandIndices(Root, Pattern, OperandIndices);
1504 MachineInstr *Prev =
1505 MRI.getUniqueVRegDef(Reg: Root.getOperand(i: OperandIndices[0]).getReg());
1506
1507 // Don't reassociate if Prev and Root are in different blocks.
1508 if (Prev->getParent() != Root.getParent())
1509 return;
1510
1511 reassociateOps(Root, Prev&: *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1512 InstrIdxForVirtReg&: InstIdxForVirtReg);
1513 break;
1514 }
1515 case MachineCombinerPattern::ACC_CHAIN: {
1516 SmallVector<Register, 32> ChainRegs;
1517 getAccumulatorChain(CurrentInstr: &Root, Chain&: ChainRegs);
1518 unsigned int Depth = ChainRegs.size();
1519 assert(MaxAccumulatorWidth > 1 &&
1520 "Max accumulator width set to illegal value");
1521 unsigned int MaxWidth = Log2_32(Value: Depth) < MaxAccumulatorWidth
1522 ? Log2_32(Value: Depth)
1523 : MaxAccumulatorWidth;
1524
1525 // Walk down the chain and rewrite it as a tree.
1526 for (auto IndexedReg : llvm::enumerate(First: llvm::reverse(C&: ChainRegs))) {
1527 // No need to rewrite the first node, it is already perfect as it is.
1528 if (IndexedReg.index() == 0)
1529 continue;
1530
1531 // FIXME: Losing subregisters
1532 MachineInstr *Instr = MRI.getUniqueVRegDef(Reg: IndexedReg.value());
1533 MachineInstrBuilder MIB;
1534 Register AccReg;
1535 if (IndexedReg.index() < MaxWidth) {
1536 // Now we need to create new instructions for the first row.
1537 AccReg = Instr->getOperand(i: 0).getReg();
1538 unsigned OpCode = getAccumulationStartOpcode(Opcode: Root.getOpcode());
1539
1540 MIB = BuildMI(MF, MIMD: MIMetadata(*Instr), MCID: TII->get(Opcode: OpCode), DestReg: AccReg)
1541 .addReg(RegNo: Instr->getOperand(i: 2).getReg(),
1542 Flags: getKillRegState(B: Instr->getOperand(i: 2).isKill()))
1543 .addReg(RegNo: Instr->getOperand(i: 3).getReg(),
1544 Flags: getKillRegState(B: Instr->getOperand(i: 3).isKill()));
1545 } else {
1546 // For the remaining cases, we need to use an output register of one of
1547 // the newly inserted instuctions as operand 1
1548 AccReg = Instr->getOperand(i: 0).getReg() == Root.getOperand(i: 0).getReg()
1549 ? MRI.createVirtualRegister(
1550 RegClass: MRI.getRegClass(Reg: Root.getOperand(i: 0).getReg()))
1551 : Instr->getOperand(i: 0).getReg();
1552 assert(IndexedReg.index() >= MaxWidth);
1553 auto AccumulatorInput =
1554 ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
1555 MIB = BuildMI(MF, MIMD: MIMetadata(*Instr), MCID: TII->get(Opcode: Instr->getOpcode()),
1556 DestReg: AccReg)
1557 .addReg(RegNo: AccumulatorInput, Flags: getKillRegState(B: true))
1558 .addReg(RegNo: Instr->getOperand(i: 2).getReg(),
1559 Flags: getKillRegState(B: Instr->getOperand(i: 2).isKill()))
1560 .addReg(RegNo: Instr->getOperand(i: 3).getReg(),
1561 Flags: getKillRegState(B: Instr->getOperand(i: 3).isKill()));
1562 }
1563
1564 MIB->setFlags(Instr->getFlags());
1565 InstIdxForVirtReg.insert(KV: std::make_pair(x&: AccReg, y: InsInstrs.size()));
1566 InsInstrs.push_back(Elt: MIB);
1567 DelInstrs.push_back(Elt: Instr);
1568 }
1569
1570 SmallVector<Register, 8> RegistersToReduce;
1571 for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
1572 ++i) {
1573 auto Reg = InsInstrs[i]->getOperand(i: 0).getReg();
1574 RegistersToReduce.push_back(Elt: Reg);
1575 }
1576
1577 while (RegistersToReduce.size() > 1)
1578 reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
1579 InstrIdxForVirtReg&: InstIdxForVirtReg, ResultReg: Root.getOperand(i: 0).getReg());
1580
1581 break;
1582 }
1583 }
1584}
1585
1586MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1587 return MachineTraceStrategy::TS_MinInstrCount;
1588}
1589
1590bool TargetInstrInfo::isReMaterializableImpl(
1591 const MachineInstr &MI) const {
1592 const MachineFunction &MF = *MI.getMF();
1593 const MachineRegisterInfo &MRI = MF.getRegInfo();
1594
1595 // Remat clients assume operand 0 is the defined register.
1596 if (!MI.getNumOperands() || !MI.getOperand(i: 0).isReg())
1597 return false;
1598 Register DefReg = MI.getOperand(i: 0).getReg();
1599
1600 // A sub-register definition can only be rematerialized if the instruction
1601 // doesn't read the other parts of the register. Otherwise it is really a
1602 // read-modify-write operation on the full virtual register which cannot be
1603 // moved safely.
1604 if (DefReg.isVirtual() && MI.getOperand(i: 0).getSubReg() &&
1605 MI.readsVirtualRegister(Reg: DefReg))
1606 return false;
1607
1608 // A load from a fixed stack slot can be rematerialized. This may be
1609 // redundant with subsequent checks, but it's target-independent,
1610 // simple, and a common case.
1611 int FrameIdx = 0;
1612 if (isLoadFromStackSlot(MI, FrameIndex&: FrameIdx) &&
1613 MF.getFrameInfo().isImmutableObjectIndex(ObjectIdx: FrameIdx))
1614 return true;
1615
1616 // Avoid instructions obviously unsafe for remat.
1617 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1618 MI.hasUnmodeledSideEffects())
1619 return false;
1620
1621 // Don't remat inline asm. We have no idea how expensive it is
1622 // even if it's side effect free.
1623 if (MI.isInlineAsm())
1624 return false;
1625
1626 // Avoid instructions which load from potentially varying memory.
1627 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1628 return false;
1629
1630 // If any of the registers accessed are non-constant, conservatively assume
1631 // the instruction is not rematerializable.
1632 for (const MachineOperand &MO : MI.operands()) {
1633 if (!MO.isReg()) continue;
1634 Register Reg = MO.getReg();
1635 if (Reg == 0)
1636 continue;
1637
1638 // Check for a well-behaved physical register.
1639 if (Reg.isPhysical()) {
1640 if (MO.isUse()) {
1641 // If the physreg has no defs anywhere, it's just an ambient register
1642 // and we can freely move its uses. Alternatively, if it's allocatable,
1643 // it could get allocated to something with a def during allocation.
1644 if (!MRI.isConstantPhysReg(PhysReg: Reg))
1645 return false;
1646 } else {
1647 // A physreg def. We can't remat it.
1648 return false;
1649 }
1650 continue;
1651 }
1652
1653 // Only allow one virtual-register def. There may be multiple defs of the
1654 // same virtual register, though.
1655 if (MO.isDef() && Reg != DefReg)
1656 return false;
1657 }
1658
1659 // Everything checked out.
1660 return true;
1661}
1662
1663int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1664 const MachineFunction *MF = MI.getMF();
1665 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1666 bool StackGrowsDown =
1667 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1668
1669 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1670 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1671
1672 if (!isFrameInstr(I: MI))
1673 return 0;
1674
1675 int SPAdj = TFI->alignSPAdjust(SPAdj: getFrameSize(I: MI));
1676
1677 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1678 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1679 SPAdj = -SPAdj;
1680
1681 return SPAdj;
1682}
1683
1684/// isSchedulingBoundary - Test if the given instruction should be
1685/// considered a scheduling boundary. This primarily includes labels
1686/// and terminators.
1687bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1688 const MachineBasicBlock *MBB,
1689 const MachineFunction &MF) const {
1690 // Terminators and labels can't be scheduled around.
1691 if (MI.isTerminator() || MI.isPosition())
1692 return true;
1693
1694 // INLINEASM_BR can jump to another block
1695 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1696 return true;
1697
1698 // Don't attempt to schedule around any instruction that defines
1699 // a stack-oriented pointer, as it's unlikely to be profitable. This
1700 // saves compile time, because it doesn't require every single
1701 // stack slot reference to depend on the instruction that does the
1702 // modification.
1703 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1704 return MI.modifiesRegister(Reg: TLI.getStackPointerRegisterToSaveRestore(), TRI: &TRI);
1705}
1706
1707// Provide a global flag for disabling the PreRA hazard recognizer that targets
1708// may choose to honor.
1709bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1710 return !DisableHazardRecognizer;
1711}
1712
1713// Default implementation of CreateTargetRAHazardRecognizer.
1714ScheduleHazardRecognizer *TargetInstrInfo::
1715CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1716 const ScheduleDAG *DAG) const {
1717 // Dummy hazard recognizer allows all instructions to issue.
1718 return new ScheduleHazardRecognizer();
1719}
1720
1721// Default implementation of CreateTargetMIHazardRecognizer.
1722ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1723 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1724 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1725}
1726
1727// Default implementation of CreateTargetPostRAHazardRecognizer.
1728ScheduleHazardRecognizer *TargetInstrInfo::
1729CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1730 const ScheduleDAG *DAG) const {
1731 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1732}
1733
1734// Default implementation of getMemOperandWithOffset.
1735bool TargetInstrInfo::getMemOperandWithOffset(
1736 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1737 bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
1738 SmallVector<const MachineOperand *, 4> BaseOps;
1739 LocationSize Width = LocationSize::precise(Value: 0);
1740 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1741 Width, TRI: &TRI) ||
1742 BaseOps.size() != 1)
1743 return false;
1744 BaseOp = BaseOps.front();
1745 return true;
1746}
1747
1748//===----------------------------------------------------------------------===//
1749// SelectionDAG latency interface.
1750//===----------------------------------------------------------------------===//
1751
1752std::optional<unsigned>
1753TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1754 SDNode *DefNode, unsigned DefIdx,
1755 SDNode *UseNode, unsigned UseIdx) const {
1756 if (!ItinData || ItinData->isEmpty())
1757 return std::nullopt;
1758
1759 if (!DefNode->isMachineOpcode())
1760 return std::nullopt;
1761
1762 unsigned DefClass = get(Opcode: DefNode->getMachineOpcode()).getSchedClass();
1763 if (!UseNode->isMachineOpcode())
1764 return ItinData->getOperandCycle(ItinClassIndx: DefClass, OperandIdx: DefIdx);
1765 unsigned UseClass = get(Opcode: UseNode->getMachineOpcode()).getSchedClass();
1766 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1767}
1768
1769unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1770 SDNode *N) const {
1771 if (!ItinData || ItinData->isEmpty())
1772 return 1;
1773
1774 if (!N->isMachineOpcode())
1775 return 1;
1776
1777 return ItinData->getStageLatency(ItinClassIndx: get(Opcode: N->getMachineOpcode()).getSchedClass());
1778}
1779
1780//===----------------------------------------------------------------------===//
1781// MachineInstr latency interface.
1782//===----------------------------------------------------------------------===//
1783
1784unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1785 const MachineInstr &MI) const {
1786 if (!ItinData || ItinData->isEmpty())
1787 return 1;
1788
1789 unsigned Class = MI.getDesc().getSchedClass();
1790 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1791 if (UOps >= 0)
1792 return UOps;
1793
1794 // The # of u-ops is dynamically determined. The specific target should
1795 // override this function to return the right number.
1796 return 1;
1797}
1798
1799/// Return the default expected latency for a def based on it's opcode.
1800unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1801 const MachineInstr &DefMI) const {
1802 if (DefMI.isTransient())
1803 return 0;
1804 if (DefMI.mayLoad())
1805 return SchedModel.LoadLatency;
1806 if (isHighLatencyDef(opc: DefMI.getOpcode()))
1807 return SchedModel.HighLatency;
1808 return 1;
1809}
1810
1811unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1812 return 0;
1813}
1814
1815unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1816 const MachineInstr &MI,
1817 unsigned *PredCost) const {
1818 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1819 // still have a MinLatency property, which getStageLatency checks.
1820 if (!ItinData)
1821 return MI.mayLoad() ? 2 : 1;
1822
1823 return ItinData->getStageLatency(ItinClassIndx: MI.getDesc().getSchedClass());
1824}
1825
1826bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1827 const MachineInstr &DefMI,
1828 unsigned DefIdx) const {
1829 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1830 if (!ItinData || ItinData->isEmpty())
1831 return false;
1832
1833 unsigned DefClass = DefMI.getDesc().getSchedClass();
1834 std::optional<unsigned> DefCycle =
1835 ItinData->getOperandCycle(ItinClassIndx: DefClass, OperandIdx: DefIdx);
1836 return DefCycle && DefCycle <= 1U;
1837}
1838
1839bool TargetInstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const {
1840 // TODO: We don't split functions where a section attribute has been set
1841 // since the split part may not be placed in a contiguous region. It may also
1842 // be more beneficial to augment the linker to ensure contiguous layout of
1843 // split functions within the same section as specified by the attribute.
1844 if (MF.getFunction().hasSection())
1845 return false;
1846
1847 // We don't want to proceed further for cold functions
1848 // or functions of unknown hotness. Lukewarm functions have no prefix.
1849 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1850 if (SectionPrefix &&
1851 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1852 return false;
1853 }
1854
1855 return true;
1856}
1857
1858std::optional<ParamLoadedValue>
1859TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1860 Register Reg) const {
1861 const MachineFunction *MF = MI.getMF();
1862 DIExpression *Expr = DIExpression::get(Context&: MF->getFunction().getContext(), Elements: {});
1863 int64_t Offset;
1864 bool OffsetIsScalable;
1865
1866 // To simplify the sub-register handling, verify that we only need to
1867 // consider physical registers.
1868 assert(MF->getProperties().hasNoVRegs());
1869
1870 if (auto DestSrc = isCopyInstr(MI)) {
1871 Register DestReg = DestSrc->Destination->getReg();
1872
1873 // If the copy destination is the forwarding reg, describe the forwarding
1874 // reg using the copy source as the backup location. Example:
1875 //
1876 // x0 = MOV x7
1877 // call callee(x0) ; x0 described as x7
1878 if (Reg == DestReg)
1879 return ParamLoadedValue(*DestSrc->Source, Expr);
1880
1881 // If the target's hook couldn't describe this copy, give up.
1882 return std::nullopt;
1883 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1884 Register SrcReg = RegImm->Reg;
1885 Offset = RegImm->Imm;
1886 Expr = DIExpression::prepend(Expr, Flags: DIExpression::ApplyOffset, Offset);
1887 return ParamLoadedValue(MachineOperand::CreateReg(Reg: SrcReg, isDef: false), Expr);
1888 } else if (MI.hasOneMemOperand()) {
1889 // Only describe memory which provably does not escape the function. As
1890 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1891 // callee (or by another thread).
1892 const MachineFrameInfo &MFI = MF->getFrameInfo();
1893 const MachineMemOperand *MMO = MI.memoperands()[0];
1894 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1895
1896 // If the address points to "special" memory (e.g. a spill slot), it's
1897 // sufficient to check that it isn't aliased by any high-level IR value.
1898 if (!PSV || PSV->mayAlias(&MFI))
1899 return std::nullopt;
1900
1901 const MachineOperand *BaseOp;
1902 if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
1903 return std::nullopt;
1904
1905 // FIXME: Scalable offsets are not yet handled in the offset code below.
1906 if (OffsetIsScalable)
1907 return std::nullopt;
1908
1909 // TODO: Can currently only handle mem instructions with a single define.
1910 // An example from the x86 target:
1911 // ...
1912 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1913 // ...
1914 //
1915 if (MI.getNumExplicitDefs() != 1)
1916 return std::nullopt;
1917
1918 // TODO: In what way do we need to take Reg into consideration here?
1919
1920 SmallVector<uint64_t, 8> Ops;
1921 DIExpression::appendOffset(Ops, Offset);
1922 Ops.push_back(Elt: dwarf::DW_OP_deref_size);
1923 Ops.push_back(Elt: MMO->getSize().hasValue() ? MMO->getSize().getValue()
1924 : ~UINT64_C(0));
1925 Expr = DIExpression::prependOpcodes(Expr, Ops);
1926 return ParamLoadedValue(*BaseOp, Expr);
1927 }
1928
1929 return std::nullopt;
1930}
1931
1932// Get the call frame size just before MI.
1933unsigned TargetInstrInfo::getCallFrameSizeAt(MachineInstr &MI) const {
1934 // Search backwards from MI for the most recent call frame instruction.
1935 MachineBasicBlock *MBB = MI.getParent();
1936 for (auto &AdjI : reverse(C: make_range(x: MBB->instr_begin(), y: MI.getIterator()))) {
1937 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1938 return getFrameTotalSize(I: AdjI);
1939 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1940 return 0;
1941 }
1942
1943 // If none was found, use the call frame size from the start of the basic
1944 // block.
1945 return MBB->getCallFrameSize();
1946}
1947
1948/// Both DefMI and UseMI must be valid. By default, call directly to the
1949/// itinerary. This may be overriden by the target.
1950std::optional<unsigned> TargetInstrInfo::getOperandLatency(
1951 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1952 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1953 unsigned DefClass = DefMI.getDesc().getSchedClass();
1954 unsigned UseClass = UseMI.getDesc().getSchedClass();
1955 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1956}
1957
1958bool TargetInstrInfo::getRegSequenceInputs(
1959 const MachineInstr &MI, unsigned DefIdx,
1960 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1961 assert((MI.isRegSequence() ||
1962 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1963
1964 if (!MI.isRegSequence())
1965 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1966
1967 // We are looking at:
1968 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1969 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1970 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1971 OpIdx += 2) {
1972 const MachineOperand &MOReg = MI.getOperand(i: OpIdx);
1973 if (MOReg.isUndef())
1974 continue;
1975 const MachineOperand &MOSubIdx = MI.getOperand(i: OpIdx + 1);
1976 assert(MOSubIdx.isImm() &&
1977 "One of the subindex of the reg_sequence is not an immediate");
1978 // Record Reg:SubReg, SubIdx.
1979 InputRegs.push_back(Elt: RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1980 (unsigned)MOSubIdx.getImm()));
1981 }
1982 return true;
1983}
1984
1985bool TargetInstrInfo::getExtractSubregInputs(
1986 const MachineInstr &MI, unsigned DefIdx,
1987 RegSubRegPairAndIdx &InputReg) const {
1988 assert((MI.isExtractSubreg() ||
1989 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1990
1991 if (!MI.isExtractSubreg())
1992 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1993
1994 // We are looking at:
1995 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1996 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1997 const MachineOperand &MOReg = MI.getOperand(i: 1);
1998 if (MOReg.isUndef())
1999 return false;
2000 const MachineOperand &MOSubIdx = MI.getOperand(i: 2);
2001 assert(MOSubIdx.isImm() &&
2002 "The subindex of the extract_subreg is not an immediate");
2003
2004 InputReg.Reg = MOReg.getReg();
2005 InputReg.SubReg = MOReg.getSubReg();
2006 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
2007 return true;
2008}
2009
2010bool TargetInstrInfo::getInsertSubregInputs(
2011 const MachineInstr &MI, unsigned DefIdx,
2012 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
2013 assert((MI.isInsertSubreg() ||
2014 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
2015
2016 if (!MI.isInsertSubreg())
2017 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
2018
2019 // We are looking at:
2020 // Def = INSERT_SEQUENCE v0, v1, sub0.
2021 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
2022 const MachineOperand &MOBaseReg = MI.getOperand(i: 1);
2023 const MachineOperand &MOInsertedReg = MI.getOperand(i: 2);
2024 if (MOInsertedReg.isUndef())
2025 return false;
2026 const MachineOperand &MOSubIdx = MI.getOperand(i: 3);
2027 assert(MOSubIdx.isImm() &&
2028 "One of the subindex of the reg_sequence is not an immediate");
2029 BaseReg.Reg = MOBaseReg.getReg();
2030 BaseReg.SubReg = MOBaseReg.getSubReg();
2031
2032 InsertedReg.Reg = MOInsertedReg.getReg();
2033 InsertedReg.SubReg = MOInsertedReg.getSubReg();
2034 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
2035 return true;
2036}
2037
2038// Returns a MIRPrinter comment for this machine operand.
2039std::string TargetInstrInfo::createMIROperandComment(
2040 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2041 const TargetRegisterInfo * /*RemoveMe*/) const {
2042
2043 if (!MI.isInlineAsm())
2044 return "";
2045
2046 std::string Flags;
2047 raw_string_ostream OS(Flags);
2048
2049 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
2050 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
2051 unsigned ExtraInfo = Op.getImm();
2052 OS << interleaved(R: InlineAsm::getExtraInfoNames(ExtraInfo), Separator: " ");
2053 return Flags;
2054 }
2055
2056 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
2057 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
2058 return "";
2059
2060 assert(Op.isImm() && "Expected flag operand to be an immediate");
2061 // Pretty print the inline asm operand descriptor.
2062 unsigned Flag = Op.getImm();
2063 const InlineAsm::Flag F(Flag);
2064 OS << F.getKindName();
2065
2066 unsigned RCID;
2067 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RC&: RCID))
2068 OS << ':' << TRI.getRegClassName(Class: TRI.getRegClass(i: RCID));
2069
2070 if (F.isMemKind()) {
2071 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
2072 OS << ":" << InlineAsm::getMemConstraintName(C: MCID);
2073 }
2074
2075 unsigned TiedTo;
2076 if (F.isUseOperandTiedToDef(Idx&: TiedTo))
2077 OS << " tiedto:$" << TiedTo;
2078
2079 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
2080 F.getRegMayBeFolded())
2081 OS << " foldable";
2082
2083 return Flags;
2084}
2085
2086TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
2087
2088void TargetInstrInfo::mergeOutliningCandidateAttributes(
2089 Function &F, std::vector<outliner::Candidate> &Candidates) const {
2090 // Include target features from an arbitrary candidate for the outlined
2091 // function. This makes sure the outlined function knows what kinds of
2092 // instructions are going into it. This is fine, since all parent functions
2093 // must necessarily support the instructions that are in the outlined region.
2094 outliner::Candidate &FirstCand = Candidates.front();
2095 const Function &ParentFn = FirstCand.getMF()->getFunction();
2096 if (ParentFn.hasFnAttribute(Kind: "target-features"))
2097 F.addFnAttr(Attr: ParentFn.getFnAttribute(Kind: "target-features"));
2098 if (ParentFn.hasFnAttribute(Kind: "target-cpu"))
2099 F.addFnAttr(Attr: ParentFn.getFnAttribute(Kind: "target-cpu"));
2100
2101 // Set nounwind, so we don't generate eh_frame.
2102 if (llvm::all_of(Range&: Candidates, P: [](const outliner::Candidate &C) {
2103 return C.getMF()->getFunction().hasFnAttribute(Kind: Attribute::NoUnwind);
2104 }))
2105 F.addFnAttr(Kind: Attribute::NoUnwind);
2106}
2107
2108outliner::InstrType
2109TargetInstrInfo::getOutliningType(const MachineModuleInfo &MMI,
2110 MachineBasicBlock::iterator &MIT,
2111 unsigned Flags) const {
2112 MachineInstr &MI = *MIT;
2113
2114 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
2115 // have support for outlining those. Special-case that here.
2116 if (MI.isCFIInstruction())
2117 // Just go right to the target implementation.
2118 return getOutliningTypeImpl(MMI, MIT, Flags);
2119
2120 // Be conservative about inline assembly.
2121 if (MI.isInlineAsm())
2122 return outliner::InstrType::Illegal;
2123
2124 // Labels generally can't safely be outlined.
2125 if (MI.isLabel())
2126 return outliner::InstrType::Illegal;
2127
2128 // Don't let debug instructions impact analysis.
2129 if (MI.isDebugInstr())
2130 return outliner::InstrType::Invisible;
2131
2132 // Some other special cases.
2133 switch (MI.getOpcode()) {
2134 case TargetOpcode::IMPLICIT_DEF:
2135 case TargetOpcode::KILL:
2136 case TargetOpcode::LIFETIME_START:
2137 case TargetOpcode::LIFETIME_END:
2138 return outliner::InstrType::Invisible;
2139 default:
2140 break;
2141 }
2142
2143 // Is this a terminator for a basic block?
2144 if (MI.isTerminator()) {
2145 // If this is a branch to another block, we can't outline it.
2146 if (!MI.getParent()->succ_empty())
2147 return outliner::InstrType::Illegal;
2148
2149 // Don't outline if the branch is not unconditional.
2150 if (isPredicated(MI))
2151 return outliner::InstrType::Illegal;
2152 }
2153
2154 // Make sure none of the operands of this instruction do anything that
2155 // might break if they're moved outside their current function.
2156 // This includes MachineBasicBlock references, BlockAddressses,
2157 // Constant pool indices and jump table indices.
2158 //
2159 // A quick note on MO_TargetIndex:
2160 // This doesn't seem to be used in any of the architectures that the
2161 // MachineOutliner supports, but it was still filtered out in all of them.
2162 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
2163 // As such, this check is removed both here and in the target-specific
2164 // implementations. Instead, we assert to make sure this doesn't
2165 // catch anyone off-guard somewhere down the line.
2166 for (const MachineOperand &MOP : MI.operands()) {
2167 // If you hit this assertion, please remove it and adjust
2168 // `getOutliningTypeImpl` for your target appropriately if necessary.
2169 // Adding the assertion back to other supported architectures
2170 // would be nice too :)
2171 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
2172
2173 // CFI instructions should already have been filtered out at this point.
2174 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
2175
2176 // PrologEpilogInserter should've already run at this point.
2177 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
2178
2179 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
2180 return outliner::InstrType::Illegal;
2181 }
2182
2183 // If we don't know, delegate to the target-specific hook.
2184 return getOutliningTypeImpl(MMI, MIT, Flags);
2185}
2186
2187bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2188 unsigned &Flags) const {
2189 // Some instrumentations create special TargetOpcode at the start which
2190 // expands to special code sequences which must be present.
2191 auto First = MBB.getFirstNonDebugInstr();
2192 if (First == MBB.end())
2193 return true;
2194
2195 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
2196 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
2197 return false;
2198
2199 // Some instrumentations create special pseudo-instructions at or just before
2200 // the end that must be present.
2201 auto Last = MBB.getLastNonDebugInstr();
2202 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
2203 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2204 return false;
2205
2206 if (Last != First && Last->isReturn()) {
2207 --Last;
2208 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
2209 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
2210 return false;
2211 }
2212 return true;
2213}
2214
2215bool TargetInstrInfo::isGlobalMemoryObject(const MachineInstr *MI) const {
2216 return MI->isCall() || MI->hasUnmodeledSideEffects() ||
2217 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad());
2218}
2219