1//===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the PowerPC implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PPCInstrInfo.h"
14#include "MCTargetDesc/PPCPredicates.h"
15#include "PPC.h"
16#include "PPCHazardRecognizers.h"
17#include "PPCInstrBuilder.h"
18#include "PPCMachineFunctionInfo.h"
19#include "PPCTargetMachine.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/CodeGen/LiveIntervals.h"
23#include "llvm/CodeGen/LivePhysRegs.h"
24#include "llvm/CodeGen/MachineCombinerPattern.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineInstrBuilder.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/CodeGen/RegisterClassInfo.h"
32#include "llvm/CodeGen/RegisterPressure.h"
33#include "llvm/CodeGen/RegisterScavenging.h"
34#include "llvm/CodeGen/ScheduleDAG.h"
35#include "llvm/CodeGen/SlotIndexes.h"
36#include "llvm/CodeGen/StackMaps.h"
37#include "llvm/IR/Module.h"
38#include "llvm/MC/MCInst.h"
39#include "llvm/MC/TargetRegistry.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/raw_ostream.h"
44
45using namespace llvm;
46
47#define DEBUG_TYPE "ppc-instr-info"
48
49#define GET_INSTRMAP_INFO
50#define GET_INSTRINFO_CTOR_DTOR
51#include "PPCGenInstrInfo.inc"
52
53STATISTIC(NumStoreSPILLVSRRCAsVec,
54 "Number of spillvsrrc spilled to stack as vec");
55STATISTIC(NumStoreSPILLVSRRCAsGpr,
56 "Number of spillvsrrc spilled to stack as gpr");
57STATISTIC(NumGPRtoVSRSpill, "Number of gpr spills to spillvsrrc");
58STATISTIC(CmpIselsConverted,
59 "Number of ISELs that depend on comparison of constants converted");
60STATISTIC(MissedConvertibleImmediateInstrs,
61 "Number of compare-immediate instructions fed by constants");
62STATISTIC(NumRcRotatesConvertedToRcAnd,
63 "Number of record-form rotates converted to record-form andi");
64
65static cl::
66opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
67 cl::desc("Disable analysis for CTR loops"));
68
69static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
70cl::desc("Disable compare instruction optimization"), cl::Hidden);
71
72static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
73cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
74cl::Hidden);
75
76static cl::opt<bool>
77UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden,
78 cl::desc("Use the old (incorrect) instruction latency calculation"));
79
80static cl::opt<float>
81 FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(Val: 1.5),
82 cl::desc("register pressure factor for the transformations."));
83
84static cl::opt<bool> EnableFMARegPressureReduction(
85 "ppc-fma-rp-reduction", cl::Hidden, cl::init(Val: true),
86 cl::desc("enable register pressure reduce in machine combiner pass."));
87
88// Pin the vtable to this file.
89void PPCInstrInfo::anchor() {}
90
91PPCInstrInfo::PPCInstrInfo(const PPCSubtarget &STI)
92 : PPCGenInstrInfo(STI, RI, PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP,
93 /* CatchRetOpcode */ -1,
94 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
95 Subtarget(STI), RI(STI.getTargetMachine()) {}
96
97/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
98/// this target when scheduling the DAG.
99ScheduleHazardRecognizer *
100PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
101 const ScheduleDAG *DAG) const {
102 unsigned Directive =
103 static_cast<const PPCSubtarget *>(STI)->getCPUDirective();
104 if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
105 Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
106 const InstrItineraryData *II =
107 static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData();
108 return new ScoreboardHazardRecognizer(II, DAG);
109 }
110
111 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
112}
113
114/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
115/// to use for this target when scheduling the DAG.
116ScheduleHazardRecognizer *
117PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
118 const ScheduleDAG *DAG) const {
119 unsigned Directive =
120 DAG->MF.getSubtarget<PPCSubtarget>().getCPUDirective();
121
122 // FIXME: Leaving this as-is until we have POWER9 scheduling info
123 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
124 return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
125
126 // Most subtargets use a PPC970 recognizer.
127 if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
128 Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
129 assert(DAG->TII && "No InstrInfo?");
130
131 return new PPCHazardRecognizer970(*DAG);
132 }
133
134 return new ScoreboardHazardRecognizer(II, DAG);
135}
136
137unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
138 const MachineInstr &MI,
139 unsigned *PredCost) const {
140 if (!ItinData || UseOldLatencyCalc)
141 return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost);
142
143 // The default implementation of getInstrLatency calls getStageLatency, but
144 // getStageLatency does not do the right thing for us. While we have
145 // itinerary, most cores are fully pipelined, and so the itineraries only
146 // express the first part of the pipeline, not every stage. Instead, we need
147 // to use the listed output operand cycle number (using operand 0 here, which
148 // is an output).
149
150 unsigned Latency = 1;
151 unsigned DefClass = MI.getDesc().getSchedClass();
152 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
153 const MachineOperand &MO = MI.getOperand(i);
154 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
155 continue;
156
157 std::optional<unsigned> Cycle = ItinData->getOperandCycle(ItinClassIndx: DefClass, OperandIdx: i);
158 if (!Cycle)
159 continue;
160
161 Latency = std::max(a: Latency, b: *Cycle);
162 }
163
164 return Latency;
165}
166
167std::optional<unsigned> PPCInstrInfo::getOperandLatency(
168 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
169 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
170 std::optional<unsigned> Latency = PPCGenInstrInfo::getOperandLatency(
171 ItinData, DefMI, DefIdx, UseMI, UseIdx);
172
173 if (!DefMI.getParent())
174 return Latency;
175
176 const MachineOperand &DefMO = DefMI.getOperand(i: DefIdx);
177 Register Reg = DefMO.getReg();
178
179 bool IsRegCR;
180 if (Reg.isVirtual()) {
181 const MachineRegisterInfo *MRI =
182 &DefMI.getParent()->getParent()->getRegInfo();
183 IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(RC: &PPC::CRRCRegClass) ||
184 MRI->getRegClass(Reg)->hasSuperClassEq(RC: &PPC::CRBITRCRegClass);
185 } else {
186 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
187 PPC::CRBITRCRegClass.contains(Reg);
188 }
189
190 if (UseMI.isBranch() && IsRegCR) {
191 if (!Latency)
192 Latency = getInstrLatency(ItinData, MI: DefMI);
193
194 // On some cores, there is an additional delay between writing to a condition
195 // register, and using it from a branch.
196 unsigned Directive = Subtarget.getCPUDirective();
197 switch (Directive) {
198 default: break;
199 case PPC::DIR_7400:
200 case PPC::DIR_750:
201 case PPC::DIR_970:
202 case PPC::DIR_E5500:
203 case PPC::DIR_PWR4:
204 case PPC::DIR_PWR5:
205 case PPC::DIR_PWR5X:
206 case PPC::DIR_PWR6:
207 case PPC::DIR_PWR6X:
208 case PPC::DIR_PWR7:
209 case PPC::DIR_PWR8:
210 // FIXME: Is this needed for POWER9?
211 Latency = *Latency + 2;
212 break;
213 }
214 }
215
216 return Latency;
217}
218
219void PPCInstrInfo::setSpecialOperandAttr(MachineInstr &MI,
220 uint32_t Flags) const {
221 MI.setFlags(Flags);
222 MI.clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
223 MI.clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
224 MI.clearFlag(Flag: MachineInstr::MIFlag::IsExact);
225}
226
227// This function does not list all associative and commutative operations, but
228// only those worth feeding through the machine combiner in an attempt to
229// reduce the critical path. Mostly, this means floating-point operations,
230// because they have high latencies(>=5) (compared to other operations, such as
231// and/or, which are also associative and commutative, but have low latencies).
232bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
233 bool Invert) const {
234 if (Invert)
235 return false;
236 switch (Inst.getOpcode()) {
237 // Floating point:
238 // FP Add:
239 case PPC::FADD:
240 case PPC::FADDS:
241 // FP Multiply:
242 case PPC::FMUL:
243 case PPC::FMULS:
244 // Altivec Add:
245 case PPC::VADDFP:
246 // VSX Add:
247 case PPC::XSADDDP:
248 case PPC::XVADDDP:
249 case PPC::XVADDSP:
250 case PPC::XSADDSP:
251 // VSX Multiply:
252 case PPC::XSMULDP:
253 case PPC::XVMULDP:
254 case PPC::XVMULSP:
255 case PPC::XSMULSP:
256 return Inst.getFlag(Flag: MachineInstr::MIFlag::FmReassoc) &&
257 Inst.getFlag(Flag: MachineInstr::MIFlag::FmNsz);
258 // Fixed point:
259 // Multiply:
260 case PPC::MULHD:
261 case PPC::MULLD:
262 case PPC::MULHW:
263 case PPC::MULLW:
264 return true;
265 default:
266 return false;
267 }
268}
269
270#define InfoArrayIdxFMAInst 0
271#define InfoArrayIdxFAddInst 1
272#define InfoArrayIdxFMULInst 2
273#define InfoArrayIdxAddOpIdx 3
274#define InfoArrayIdxMULOpIdx 4
275#define InfoArrayIdxFSubInst 5
276// Array keeps info for FMA instructions:
277// Index 0(InfoArrayIdxFMAInst): FMA instruction;
278// Index 1(InfoArrayIdxFAddInst): ADD instruction associated with FMA;
279// Index 2(InfoArrayIdxFMULInst): MUL instruction associated with FMA;
280// Index 3(InfoArrayIdxAddOpIdx): ADD operand index in FMA operands;
281// Index 4(InfoArrayIdxMULOpIdx): first MUL operand index in FMA operands;
282// second MUL operand index is plus 1;
283// Index 5(InfoArrayIdxFSubInst): SUB instruction associated with FMA.
284static const uint16_t FMAOpIdxInfo[][6] = {
285 // FIXME: Add more FMA instructions like XSNMADDADP and so on.
286 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
287 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
288 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
289 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
290 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
291 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
292
293// Check if an opcode is a FMA instruction. If it is, return the index in array
294// FMAOpIdxInfo. Otherwise, return -1.
295int16_t PPCInstrInfo::getFMAOpIdxInfo(unsigned Opcode) const {
296 for (unsigned I = 0; I < std::size(FMAOpIdxInfo); I++)
297 if (FMAOpIdxInfo[I][InfoArrayIdxFMAInst] == Opcode)
298 return I;
299 return -1;
300}
301
302// On PowerPC target, we have two kinds of patterns related to FMA:
303// 1: Improve ILP.
304// Try to reassociate FMA chains like below:
305//
306// Pattern 1:
307// A = FADD X, Y (Leaf)
308// B = FMA A, M21, M22 (Prev)
309// C = FMA B, M31, M32 (Root)
310// -->
311// A = FMA X, M21, M22
312// B = FMA Y, M31, M32
313// C = FADD A, B
314//
315// Pattern 2:
316// A = FMA X, M11, M12 (Leaf)
317// B = FMA A, M21, M22 (Prev)
318// C = FMA B, M31, M32 (Root)
319// -->
320// A = FMUL M11, M12
321// B = FMA X, M21, M22
322// D = FMA A, M31, M32
323// C = FADD B, D
324//
325// breaking the dependency between A and B, allowing FMA to be executed in
326// parallel (or back-to-back in a pipeline) instead of depending on each other.
327//
328// 2: Reduce register pressure.
329// Try to reassociate FMA with FSUB and a constant like below:
330// C is a floating point const.
331//
332// Pattern 1:
333// A = FSUB X, Y (Leaf)
334// D = FMA B, C, A (Root)
335// -->
336// A = FMA B, Y, -C
337// D = FMA A, X, C
338//
339// Pattern 2:
340// A = FSUB X, Y (Leaf)
341// D = FMA B, A, C (Root)
342// -->
343// A = FMA B, Y, -C
344// D = FMA A, X, C
345//
346// Before the transformation, A must be assigned with different hardware
347// register with D. After the transformation, A and D must be assigned with
348// same hardware register due to TIE attribute of FMA instructions.
349//
350bool PPCInstrInfo::getFMAPatterns(MachineInstr &Root,
351 SmallVectorImpl<unsigned> &Patterns,
352 bool DoRegPressureReduce) const {
353 MachineBasicBlock *MBB = Root.getParent();
354 const MachineRegisterInfo *MRI = &MBB->getParent()->getRegInfo();
355 const TargetRegisterInfo *TRI = &getRegisterInfo();
356
357 auto IsAllOpsVirtualReg = [](const MachineInstr &Instr) {
358 for (const auto &MO : Instr.explicit_operands())
359 if (!(MO.isReg() && MO.getReg().isVirtual()))
360 return false;
361 return true;
362 };
363
364 auto IsReassociableAddOrSub = [&](const MachineInstr &Instr,
365 unsigned OpType) {
366 if (Instr.getOpcode() !=
367 FMAOpIdxInfo[getFMAOpIdxInfo(Opcode: Root.getOpcode())][OpType])
368 return false;
369
370 // Instruction can be reassociated.
371 // fast math flags may prohibit reassociation.
372 if (!(Instr.getFlag(Flag: MachineInstr::MIFlag::FmReassoc) &&
373 Instr.getFlag(Flag: MachineInstr::MIFlag::FmNsz)))
374 return false;
375
376 // Instruction operands are virtual registers for reassociation.
377 if (!IsAllOpsVirtualReg(Instr))
378 return false;
379
380 // For register pressure reassociation, the FSub must have only one use as
381 // we want to delete the sub to save its def.
382 if (OpType == InfoArrayIdxFSubInst &&
383 !MRI->hasOneNonDBGUse(RegNo: Instr.getOperand(i: 0).getReg()))
384 return false;
385
386 return true;
387 };
388
389 auto IsReassociableFMA = [&](const MachineInstr &Instr, int16_t &AddOpIdx,
390 int16_t &MulOpIdx, bool IsLeaf) {
391 int16_t Idx = getFMAOpIdxInfo(Opcode: Instr.getOpcode());
392 if (Idx < 0)
393 return false;
394
395 // Instruction can be reassociated.
396 // fast math flags may prohibit reassociation.
397 if (!(Instr.getFlag(Flag: MachineInstr::MIFlag::FmReassoc) &&
398 Instr.getFlag(Flag: MachineInstr::MIFlag::FmNsz)))
399 return false;
400
401 // Instruction operands are virtual registers for reassociation.
402 if (!IsAllOpsVirtualReg(Instr))
403 return false;
404
405 MulOpIdx = FMAOpIdxInfo[Idx][InfoArrayIdxMULOpIdx];
406 if (IsLeaf)
407 return true;
408
409 AddOpIdx = FMAOpIdxInfo[Idx][InfoArrayIdxAddOpIdx];
410
411 const MachineOperand &OpAdd = Instr.getOperand(i: AddOpIdx);
412 MachineInstr *MIAdd = MRI->getUniqueVRegDef(Reg: OpAdd.getReg());
413 // If 'add' operand's def is not in current block, don't do ILP related opt.
414 if (!MIAdd || MIAdd->getParent() != MBB)
415 return false;
416
417 // If this is not Leaf FMA Instr, its 'add' operand should only have one use
418 // as this fma will be changed later.
419 return MRI->hasOneNonDBGUse(RegNo: OpAdd.getReg());
420 };
421
422 int16_t AddOpIdx = -1;
423 int16_t MulOpIdx = -1;
424
425 bool IsUsedOnceL = false;
426 bool IsUsedOnceR = false;
427 MachineInstr *MULInstrL = nullptr;
428 MachineInstr *MULInstrR = nullptr;
429
430 auto IsRPReductionCandidate = [&]() {
431 // Currently, we only support float and double.
432 // FIXME: add support for other types.
433 unsigned Opcode = Root.getOpcode();
434 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
435 return false;
436
437 // Root must be a valid FMA like instruction.
438 // Treat it as leaf as we don't care its add operand.
439 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx, true)) {
440 assert((MulOpIdx >= 0) && "mul operand index not right!");
441 Register MULRegL = TRI->lookThruSingleUseCopyChain(
442 SrcReg: Root.getOperand(i: MulOpIdx).getReg(), MRI);
443 Register MULRegR = TRI->lookThruSingleUseCopyChain(
444 SrcReg: Root.getOperand(i: MulOpIdx + 1).getReg(), MRI);
445 if (!MULRegL && !MULRegR)
446 return false;
447
448 if (MULRegL && !MULRegR) {
449 MULRegR =
450 TRI->lookThruCopyLike(SrcReg: Root.getOperand(i: MulOpIdx + 1).getReg(), MRI);
451 IsUsedOnceL = true;
452 } else if (!MULRegL && MULRegR) {
453 MULRegL =
454 TRI->lookThruCopyLike(SrcReg: Root.getOperand(i: MulOpIdx).getReg(), MRI);
455 IsUsedOnceR = true;
456 } else {
457 IsUsedOnceL = true;
458 IsUsedOnceR = true;
459 }
460
461 if (!MULRegL.isVirtual() || !MULRegR.isVirtual())
462 return false;
463
464 MULInstrL = MRI->getVRegDef(Reg: MULRegL);
465 MULInstrR = MRI->getVRegDef(Reg: MULRegR);
466 return true;
467 }
468 return false;
469 };
470
471 // Register pressure fma reassociation patterns.
472 if (DoRegPressureReduce && IsRPReductionCandidate()) {
473 assert((MULInstrL && MULInstrR) && "wrong register preduction candidate!");
474 // Register pressure pattern 1
475 if (isLoadFromConstantPool(I: MULInstrL) && IsUsedOnceR &&
476 IsReassociableAddOrSub(*MULInstrR, InfoArrayIdxFSubInst)) {
477 LLVM_DEBUG(dbgs() << "add pattern REASSOC_XY_BCA\n");
478 Patterns.push_back(Elt: PPCMachineCombinerPattern::REASSOC_XY_BCA);
479 return true;
480 }
481
482 // Register pressure pattern 2
483 if ((isLoadFromConstantPool(I: MULInstrR) && IsUsedOnceL &&
484 IsReassociableAddOrSub(*MULInstrL, InfoArrayIdxFSubInst))) {
485 LLVM_DEBUG(dbgs() << "add pattern REASSOC_XY_BAC\n");
486 Patterns.push_back(Elt: PPCMachineCombinerPattern::REASSOC_XY_BAC);
487 return true;
488 }
489 }
490
491 // ILP fma reassociation patterns.
492 // Root must be a valid FMA like instruction.
493 AddOpIdx = -1;
494 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx, false))
495 return false;
496
497 assert((AddOpIdx >= 0) && "add operand index not right!");
498
499 Register RegB = Root.getOperand(i: AddOpIdx).getReg();
500 MachineInstr *Prev = MRI->getUniqueVRegDef(Reg: RegB);
501
502 // Prev must be a valid FMA like instruction.
503 AddOpIdx = -1;
504 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx, false))
505 return false;
506
507 assert((AddOpIdx >= 0) && "add operand index not right!");
508
509 Register RegA = Prev->getOperand(i: AddOpIdx).getReg();
510 MachineInstr *Leaf = MRI->getUniqueVRegDef(Reg: RegA);
511 AddOpIdx = -1;
512 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx, true)) {
513 Patterns.push_back(Elt: PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM);
514 LLVM_DEBUG(dbgs() << "add pattern REASSOC_XMM_AMM_BMM\n");
515 return true;
516 }
517 if (IsReassociableAddOrSub(*Leaf, InfoArrayIdxFAddInst)) {
518 Patterns.push_back(Elt: PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM);
519 LLVM_DEBUG(dbgs() << "add pattern REASSOC_XY_AMM_BMM\n");
520 return true;
521 }
522 return false;
523}
524
525void PPCInstrInfo::finalizeInsInstrs(
526 MachineInstr &Root, unsigned &Pattern,
527 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
528 assert(!InsInstrs.empty() && "Instructions set to be inserted is empty!");
529
530 MachineFunction *MF = Root.getMF();
531 MachineRegisterInfo *MRI = &MF->getRegInfo();
532 const TargetRegisterInfo *TRI = &getRegisterInfo();
533 MachineConstantPool *MCP = MF->getConstantPool();
534
535 int16_t Idx = getFMAOpIdxInfo(Opcode: Root.getOpcode());
536 if (Idx < 0)
537 return;
538
539 uint16_t FirstMulOpIdx = FMAOpIdxInfo[Idx][InfoArrayIdxMULOpIdx];
540
541 // For now we only need to fix up placeholder for register pressure reduce
542 // patterns.
543 Register ConstReg = 0;
544 switch (Pattern) {
545 case PPCMachineCombinerPattern::REASSOC_XY_BCA:
546 ConstReg =
547 TRI->lookThruCopyLike(SrcReg: Root.getOperand(i: FirstMulOpIdx).getReg(), MRI);
548 break;
549 case PPCMachineCombinerPattern::REASSOC_XY_BAC:
550 ConstReg =
551 TRI->lookThruCopyLike(SrcReg: Root.getOperand(i: FirstMulOpIdx + 1).getReg(), MRI);
552 break;
553 default:
554 // Not register pressure reduce patterns.
555 return;
556 }
557
558 MachineInstr *ConstDefInstr = MRI->getVRegDef(Reg: ConstReg);
559 // Get const value from const pool.
560 const Constant *C = getConstantFromConstantPool(I: ConstDefInstr);
561 assert(isa<llvm::ConstantFP>(C) && "not a valid constant!");
562
563 // Get negative fp const.
564 APFloat F1((dyn_cast<ConstantFP>(Val: C))->getValueAPF());
565 F1.changeSign();
566 Constant *NegC = ConstantFP::get(Context&: dyn_cast<ConstantFP>(Val: C)->getContext(), V: F1);
567 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: C->getType());
568
569 // Put negative fp const into constant pool.
570 unsigned ConstPoolIdx = MCP->getConstantPoolIndex(C: NegC, Alignment);
571
572 MachineOperand *Placeholder = nullptr;
573 // Record the placeholder PPC::ZERO8 we add in reassociateFMA.
574 for (auto *Inst : InsInstrs) {
575 for (MachineOperand &Operand : Inst->explicit_operands()) {
576 assert(Operand.isReg() && "Invalid instruction in InsInstrs!");
577 if (Operand.getReg() == PPC::ZERO8) {
578 Placeholder = &Operand;
579 break;
580 }
581 }
582 }
583
584 assert(Placeholder && "Placeholder does not exist!");
585
586 // Generate instructions to load the const fp from constant pool.
587 // We only support PPC64 and medium code model.
588 Register LoadNewConst =
589 generateLoadForNewConst(Idx: ConstPoolIdx, MI: &Root, Ty: C->getType(), InsInstrs);
590
591 // Fill the placeholder with the new load from constant pool.
592 Placeholder->setReg(LoadNewConst);
593}
594
595bool PPCInstrInfo::shouldReduceRegisterPressure(
596 const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const {
597
598 if (!EnableFMARegPressureReduction)
599 return false;
600
601 // Currently, we only enable register pressure reducing in machine combiner
602 // for: 1: PPC64; 2: Code Model is Medium; 3: Power9 which also has vector
603 // support.
604 //
605 // So we need following instructions to access a TOC entry:
606 //
607 // %6:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, %const.0
608 // %7:vssrc = DFLOADf32 target-flags(ppc-toc-lo) %const.0,
609 // killed %6:g8rc_and_g8rc_nox0, implicit $x2 :: (load 4 from constant-pool)
610 //
611 // FIXME: add more supported targets, like Small and Large code model, PPC32,
612 // AIX.
613 if (!(Subtarget.isPPC64() && Subtarget.hasP9Vector() &&
614 Subtarget.getTargetMachine().getCodeModel() == CodeModel::Medium))
615 return false;
616
617 const TargetRegisterInfo *TRI = &getRegisterInfo();
618 const MachineFunction *MF = MBB->getParent();
619 const MachineRegisterInfo *MRI = &MF->getRegInfo();
620
621 auto GetMBBPressure =
622 [&](const MachineBasicBlock *MBB) -> std::vector<unsigned> {
623 RegionPressure Pressure;
624 RegPressureTracker RPTracker(Pressure);
625
626 // Initialize the register pressure tracker.
627 RPTracker.init(mf: MBB->getParent(), rci: RegClassInfo, lis: nullptr, mbb: MBB, pos: MBB->end(),
628 /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true);
629
630 for (const auto &MI : reverse(C: *MBB)) {
631 if (MI.isDebugValue() || MI.isDebugLabel())
632 continue;
633 RegisterOperands RegOpers;
634 RegOpers.collect(MI, TRI: *TRI, MRI: *MRI, TrackLaneMasks: false, IgnoreDead: false);
635 RPTracker.recedeSkipDebugValues();
636 assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!");
637 RPTracker.recede(RegOpers);
638 }
639
640 // Close the RPTracker to finalize live ins.
641 RPTracker.closeRegion();
642
643 return RPTracker.getPressure().MaxSetPressure;
644 };
645
646 // For now we only care about float and double type fma.
647 unsigned VSSRCLimit =
648 RegClassInfo->getRegPressureSetLimit(Idx: PPC::RegisterPressureSets::VSSRC);
649
650 // Only reduce register pressure when pressure is high.
651 return GetMBBPressure(MBB)[PPC::RegisterPressureSets::VSSRC] >
652 (float)VSSRCLimit * FMARPFactor;
653}
654
655bool PPCInstrInfo::isLoadFromConstantPool(MachineInstr *I) const {
656 // I has only one memory operand which is load from constant pool.
657 if (!I->hasOneMemOperand())
658 return false;
659
660 MachineMemOperand *Op = I->memoperands()[0];
661 return Op->isLoad() && Op->getPseudoValue() &&
662 Op->getPseudoValue()->kind() == PseudoSourceValue::ConstantPool;
663}
664
665Register PPCInstrInfo::generateLoadForNewConst(
666 unsigned Idx, MachineInstr *MI, Type *Ty,
667 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
668 // Now we only support PPC64, Medium code model and P9 with vector.
669 // We have immutable pattern to access const pool. See function
670 // shouldReduceRegisterPressure.
671 assert((Subtarget.isPPC64() && Subtarget.hasP9Vector() &&
672 Subtarget.getTargetMachine().getCodeModel() == CodeModel::Medium) &&
673 "Target not supported!\n");
674
675 MachineFunction *MF = MI->getMF();
676 MachineRegisterInfo *MRI = &MF->getRegInfo();
677
678 // Generate ADDIStocHA8
679 Register VReg1 = MRI->createVirtualRegister(RegClass: &PPC::G8RC_and_G8RC_NOX0RegClass);
680 MachineInstrBuilder TOCOffset =
681 BuildMI(MF&: *MF, MIMD: MI->getDebugLoc(), MCID: get(Opcode: PPC::ADDIStocHA8), DestReg: VReg1)
682 .addReg(RegNo: PPC::X2)
683 .addConstantPoolIndex(Idx);
684
685 assert((Ty->isFloatTy() || Ty->isDoubleTy()) &&
686 "Only float and double are supported!");
687
688 unsigned LoadOpcode;
689 // Should be float type or double type.
690 if (Ty->isFloatTy())
691 LoadOpcode = PPC::DFLOADf32;
692 else
693 LoadOpcode = PPC::DFLOADf64;
694
695 const TargetRegisterClass *RC = MRI->getRegClass(Reg: MI->getOperand(i: 0).getReg());
696 Register VReg2 = MRI->createVirtualRegister(RegClass: RC);
697 MachineMemOperand *MMO = MF->getMachineMemOperand(
698 PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), F: MachineMemOperand::MOLoad,
699 Size: Ty->getScalarSizeInBits() / 8, BaseAlignment: MF->getDataLayout().getPrefTypeAlign(Ty));
700
701 // Generate Load from constant pool.
702 MachineInstrBuilder Load =
703 BuildMI(MF&: *MF, MIMD: MI->getDebugLoc(), MCID: get(Opcode: LoadOpcode), DestReg: VReg2)
704 .addConstantPoolIndex(Idx)
705 .addReg(RegNo: VReg1, Flags: getKillRegState(B: true))
706 .addMemOperand(MMO);
707
708 Load->getOperand(i: 1).setTargetFlags(PPCII::MO_TOC_LO);
709
710 // Insert the toc load instructions into InsInstrs.
711 InsInstrs.insert(I: InsInstrs.begin(), Elt: Load);
712 InsInstrs.insert(I: InsInstrs.begin(), Elt: TOCOffset);
713 return VReg2;
714}
715
716// This function returns the const value in constant pool if the \p I is a load
717// from constant pool.
718const Constant *
719PPCInstrInfo::getConstantFromConstantPool(MachineInstr *I) const {
720 MachineFunction *MF = I->getMF();
721 MachineRegisterInfo *MRI = &MF->getRegInfo();
722 MachineConstantPool *MCP = MF->getConstantPool();
723 assert(I->mayLoad() && "Should be a load instruction.\n");
724 for (auto MO : I->uses()) {
725 if (!MO.isReg())
726 continue;
727 Register Reg = MO.getReg();
728 if (Reg == 0 || !Reg.isVirtual())
729 continue;
730 // Find the toc address.
731 MachineInstr *DefMI = MRI->getVRegDef(Reg);
732 for (auto MO2 : DefMI->uses())
733 if (MO2.isCPI())
734 return (MCP->getConstants())[MO2.getIndex()].Val.ConstVal;
735 }
736 return nullptr;
737}
738
739CombinerObjective PPCInstrInfo::getCombinerObjective(unsigned Pattern) const {
740 switch (Pattern) {
741 case PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM:
742 case PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM:
743 return CombinerObjective::MustReduceDepth;
744 case PPCMachineCombinerPattern::REASSOC_XY_BCA:
745 case PPCMachineCombinerPattern::REASSOC_XY_BAC:
746 return CombinerObjective::MustReduceRegisterPressure;
747 default:
748 return TargetInstrInfo::getCombinerObjective(Pattern);
749 }
750}
751
752bool PPCInstrInfo::getMachineCombinerPatterns(
753 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
754 bool DoRegPressureReduce) const {
755 // Using the machine combiner in this way is potentially expensive, so
756 // restrict to when aggressive optimizations are desired.
757 if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOptLevel::Aggressive)
758 return false;
759
760 if (getFMAPatterns(Root, Patterns, DoRegPressureReduce))
761 return true;
762
763 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
764 DoRegPressureReduce);
765}
766
767void PPCInstrInfo::genAlternativeCodeSequence(
768 MachineInstr &Root, unsigned Pattern,
769 SmallVectorImpl<MachineInstr *> &InsInstrs,
770 SmallVectorImpl<MachineInstr *> &DelInstrs,
771 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
772 switch (Pattern) {
773 case PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM:
774 case PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM:
775 case PPCMachineCombinerPattern::REASSOC_XY_BCA:
776 case PPCMachineCombinerPattern::REASSOC_XY_BAC:
777 reassociateFMA(Root, Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
778 break;
779 default:
780 // Reassociate default patterns.
781 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
782 DelInstrs, InstIdxForVirtReg&: InstrIdxForVirtReg);
783 break;
784 }
785}
786
787void PPCInstrInfo::reassociateFMA(
788 MachineInstr &Root, unsigned Pattern,
789 SmallVectorImpl<MachineInstr *> &InsInstrs,
790 SmallVectorImpl<MachineInstr *> &DelInstrs,
791 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
792 MachineFunction *MF = Root.getMF();
793 MachineRegisterInfo &MRI = MF->getRegInfo();
794 const TargetRegisterInfo *TRI = &getRegisterInfo();
795 MachineOperand &OpC = Root.getOperand(i: 0);
796 Register RegC = OpC.getReg();
797 const TargetRegisterClass *RC = MRI.getRegClass(Reg: RegC);
798 MRI.constrainRegClass(Reg: RegC, RC);
799
800 unsigned FmaOp = Root.getOpcode();
801 int16_t Idx = getFMAOpIdxInfo(Opcode: FmaOp);
802 assert(Idx >= 0 && "Root must be a FMA instruction");
803
804 bool IsILPReassociate =
805 (Pattern == PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM) ||
806 (Pattern == PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM);
807
808 uint16_t AddOpIdx = FMAOpIdxInfo[Idx][InfoArrayIdxAddOpIdx];
809 uint16_t FirstMulOpIdx = FMAOpIdxInfo[Idx][InfoArrayIdxMULOpIdx];
810
811 MachineInstr *Prev = nullptr;
812 MachineInstr *Leaf = nullptr;
813 switch (Pattern) {
814 default:
815 llvm_unreachable("not recognized pattern!");
816 case PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM:
817 case PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM:
818 Prev = MRI.getUniqueVRegDef(Reg: Root.getOperand(i: AddOpIdx).getReg());
819 Leaf = MRI.getUniqueVRegDef(Reg: Prev->getOperand(i: AddOpIdx).getReg());
820 break;
821 case PPCMachineCombinerPattern::REASSOC_XY_BAC: {
822 Register MULReg =
823 TRI->lookThruCopyLike(SrcReg: Root.getOperand(i: FirstMulOpIdx).getReg(), MRI: &MRI);
824 Leaf = MRI.getVRegDef(Reg: MULReg);
825 break;
826 }
827 case PPCMachineCombinerPattern::REASSOC_XY_BCA: {
828 Register MULReg = TRI->lookThruCopyLike(
829 SrcReg: Root.getOperand(i: FirstMulOpIdx + 1).getReg(), MRI: &MRI);
830 Leaf = MRI.getVRegDef(Reg: MULReg);
831 break;
832 }
833 }
834
835 uint32_t IntersectedFlags = 0;
836 if (IsILPReassociate)
837 IntersectedFlags = Root.getFlags() & Prev->getFlags() & Leaf->getFlags();
838 else
839 IntersectedFlags = Root.getFlags() & Leaf->getFlags();
840
841 auto GetOperandInfo = [&](const MachineOperand &Operand, Register &Reg,
842 bool &KillFlag) {
843 Reg = Operand.getReg();
844 MRI.constrainRegClass(Reg, RC);
845 KillFlag = Operand.isKill();
846 };
847
848 auto GetFMAInstrInfo = [&](const MachineInstr &Instr, Register &MulOp1,
849 Register &MulOp2, Register &AddOp,
850 bool &MulOp1KillFlag, bool &MulOp2KillFlag,
851 bool &AddOpKillFlag) {
852 GetOperandInfo(Instr.getOperand(i: FirstMulOpIdx), MulOp1, MulOp1KillFlag);
853 GetOperandInfo(Instr.getOperand(i: FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
854 GetOperandInfo(Instr.getOperand(i: AddOpIdx), AddOp, AddOpKillFlag);
855 };
856
857 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
858 RegA21, RegB;
859 bool KillX = false, KillY = false, KillM11 = false, KillM12 = false,
860 KillM21 = false, KillM22 = false, KillM31 = false, KillM32 = false,
861 KillA11 = false, KillA21 = false, KillB = false;
862
863 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
864
865 if (IsILPReassociate)
866 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
867
868 if (Pattern == PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM) {
869 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
870 GetOperandInfo(Leaf->getOperand(i: AddOpIdx), RegX, KillX);
871 } else if (Pattern == PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM) {
872 GetOperandInfo(Leaf->getOperand(i: 1), RegX, KillX);
873 GetOperandInfo(Leaf->getOperand(i: 2), RegY, KillY);
874 } else {
875 // Get FSUB instruction info.
876 GetOperandInfo(Leaf->getOperand(i: 1), RegX, KillX);
877 GetOperandInfo(Leaf->getOperand(i: 2), RegY, KillY);
878 }
879
880 // Create new virtual registers for the new results instead of
881 // recycling legacy ones because the MachineCombiner's computation of the
882 // critical path requires a new register definition rather than an existing
883 // one.
884 // For register pressure reassociation, we only need create one virtual
885 // register for the new fma.
886 Register NewVRA = MRI.createVirtualRegister(RegClass: RC);
887 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVRA, y: 0));
888
889 Register NewVRB = 0;
890 if (IsILPReassociate) {
891 NewVRB = MRI.createVirtualRegister(RegClass: RC);
892 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVRB, y: 1));
893 }
894
895 Register NewVRD = 0;
896 if (Pattern == PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM) {
897 NewVRD = MRI.createVirtualRegister(RegClass: RC);
898 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVRD, y: 2));
899 }
900
901 auto AdjustOperandOrder = [&](MachineInstr *MI, Register RegAdd, bool KillAdd,
902 Register RegMul1, bool KillRegMul1,
903 Register RegMul2, bool KillRegMul2) {
904 MI->getOperand(i: AddOpIdx).setReg(RegAdd);
905 MI->getOperand(i: AddOpIdx).setIsKill(KillAdd);
906 MI->getOperand(i: FirstMulOpIdx).setReg(RegMul1);
907 MI->getOperand(i: FirstMulOpIdx).setIsKill(KillRegMul1);
908 MI->getOperand(i: FirstMulOpIdx + 1).setReg(RegMul2);
909 MI->getOperand(i: FirstMulOpIdx + 1).setIsKill(KillRegMul2);
910 };
911
912 MachineInstrBuilder NewARegPressure, NewCRegPressure;
913 switch (Pattern) {
914 default:
915 llvm_unreachable("not recognized pattern!");
916 case PPCMachineCombinerPattern::REASSOC_XY_AMM_BMM: {
917 // Create new instructions for insertion.
918 MachineInstrBuilder MINewB =
919 BuildMI(MF&: *MF, MIMD: Prev->getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: NewVRB)
920 .addReg(RegNo: RegX, Flags: getKillRegState(B: KillX))
921 .addReg(RegNo: RegM21, Flags: getKillRegState(B: KillM21))
922 .addReg(RegNo: RegM22, Flags: getKillRegState(B: KillM22));
923 MachineInstrBuilder MINewA =
924 BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: NewVRA)
925 .addReg(RegNo: RegY, Flags: getKillRegState(B: KillY))
926 .addReg(RegNo: RegM31, Flags: getKillRegState(B: KillM31))
927 .addReg(RegNo: RegM32, Flags: getKillRegState(B: KillM32));
928 // If AddOpIdx is not 1, adjust the order.
929 if (AddOpIdx != 1) {
930 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
931 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
932 }
933
934 MachineInstrBuilder MINewC =
935 BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(),
936 MCID: get(Opcode: FMAOpIdxInfo[Idx][InfoArrayIdxFAddInst]), DestReg: RegC)
937 .addReg(RegNo: NewVRB, Flags: getKillRegState(B: true))
938 .addReg(RegNo: NewVRA, Flags: getKillRegState(B: true));
939
940 // Update flags for newly created instructions.
941 setSpecialOperandAttr(MI&: *MINewA, Flags: IntersectedFlags);
942 setSpecialOperandAttr(MI&: *MINewB, Flags: IntersectedFlags);
943 setSpecialOperandAttr(MI&: *MINewC, Flags: IntersectedFlags);
944
945 // Record new instructions for insertion.
946 InsInstrs.push_back(Elt: MINewA);
947 InsInstrs.push_back(Elt: MINewB);
948 InsInstrs.push_back(Elt: MINewC);
949 break;
950 }
951 case PPCMachineCombinerPattern::REASSOC_XMM_AMM_BMM: {
952 assert(NewVRD && "new FMA register not created!");
953 // Create new instructions for insertion.
954 MachineInstrBuilder MINewA =
955 BuildMI(MF&: *MF, MIMD: Leaf->getDebugLoc(),
956 MCID: get(Opcode: FMAOpIdxInfo[Idx][InfoArrayIdxFMULInst]), DestReg: NewVRA)
957 .addReg(RegNo: RegM11, Flags: getKillRegState(B: KillM11))
958 .addReg(RegNo: RegM12, Flags: getKillRegState(B: KillM12));
959 MachineInstrBuilder MINewB =
960 BuildMI(MF&: *MF, MIMD: Prev->getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: NewVRB)
961 .addReg(RegNo: RegX, Flags: getKillRegState(B: KillX))
962 .addReg(RegNo: RegM21, Flags: getKillRegState(B: KillM21))
963 .addReg(RegNo: RegM22, Flags: getKillRegState(B: KillM22));
964 MachineInstrBuilder MINewD =
965 BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: NewVRD)
966 .addReg(RegNo: NewVRA, Flags: getKillRegState(B: true))
967 .addReg(RegNo: RegM31, Flags: getKillRegState(B: KillM31))
968 .addReg(RegNo: RegM32, Flags: getKillRegState(B: KillM32));
969 // If AddOpIdx is not 1, adjust the order.
970 if (AddOpIdx != 1) {
971 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
972 AdjustOperandOrder(MINewD, NewVRA, true, RegM31, KillM31, RegM32,
973 KillM32);
974 }
975
976 MachineInstrBuilder MINewC =
977 BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(),
978 MCID: get(Opcode: FMAOpIdxInfo[Idx][InfoArrayIdxFAddInst]), DestReg: RegC)
979 .addReg(RegNo: NewVRB, Flags: getKillRegState(B: true))
980 .addReg(RegNo: NewVRD, Flags: getKillRegState(B: true));
981
982 // Update flags for newly created instructions.
983 setSpecialOperandAttr(MI&: *MINewA, Flags: IntersectedFlags);
984 setSpecialOperandAttr(MI&: *MINewB, Flags: IntersectedFlags);
985 setSpecialOperandAttr(MI&: *MINewD, Flags: IntersectedFlags);
986 setSpecialOperandAttr(MI&: *MINewC, Flags: IntersectedFlags);
987
988 // Record new instructions for insertion.
989 InsInstrs.push_back(Elt: MINewA);
990 InsInstrs.push_back(Elt: MINewB);
991 InsInstrs.push_back(Elt: MINewD);
992 InsInstrs.push_back(Elt: MINewC);
993 break;
994 }
995 case PPCMachineCombinerPattern::REASSOC_XY_BAC:
996 case PPCMachineCombinerPattern::REASSOC_XY_BCA: {
997 Register VarReg;
998 bool KillVarReg = false;
999 if (Pattern == PPCMachineCombinerPattern::REASSOC_XY_BCA) {
1000 VarReg = RegM31;
1001 KillVarReg = KillM31;
1002 } else {
1003 VarReg = RegM32;
1004 KillVarReg = KillM32;
1005 }
1006 // We don't want to get negative const from memory pool too early, as the
1007 // created entry will not be deleted even if it has no users. Since all
1008 // operand of Leaf and Root are virtual register, we use zero register
1009 // here as a placeholder. When the InsInstrs is selected in
1010 // MachineCombiner, we call finalizeInsInstrs to replace the zero register
1011 // with a virtual register which is a load from constant pool.
1012 NewARegPressure = BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: NewVRA)
1013 .addReg(RegNo: RegB, Flags: getKillRegState(B: RegB))
1014 .addReg(RegNo: RegY, Flags: getKillRegState(B: KillY))
1015 .addReg(RegNo: PPC::ZERO8);
1016 NewCRegPressure = BuildMI(MF&: *MF, MIMD: Root.getDebugLoc(), MCID: get(Opcode: FmaOp), DestReg: RegC)
1017 .addReg(RegNo: NewVRA, Flags: getKillRegState(B: true))
1018 .addReg(RegNo: RegX, Flags: getKillRegState(B: KillX))
1019 .addReg(RegNo: VarReg, Flags: getKillRegState(B: KillVarReg));
1020 // For now, we only support xsmaddadp/xsmaddasp, their add operand are
1021 // both at index 1, no need to adjust.
1022 // FIXME: when add more fma instructions support, like fma/fmas, adjust
1023 // the operand index here.
1024 break;
1025 }
1026 }
1027
1028 if (!IsILPReassociate) {
1029 setSpecialOperandAttr(MI&: *NewARegPressure, Flags: IntersectedFlags);
1030 setSpecialOperandAttr(MI&: *NewCRegPressure, Flags: IntersectedFlags);
1031
1032 InsInstrs.push_back(Elt: NewARegPressure);
1033 InsInstrs.push_back(Elt: NewCRegPressure);
1034 }
1035
1036 assert(!InsInstrs.empty() &&
1037 "Insertion instructions set should not be empty!");
1038
1039 // Record old instructions for deletion.
1040 DelInstrs.push_back(Elt: Leaf);
1041 if (IsILPReassociate)
1042 DelInstrs.push_back(Elt: Prev);
1043 DelInstrs.push_back(Elt: &Root);
1044}
1045
1046// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
1047bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
1048 Register &SrcReg, Register &DstReg,
1049 unsigned &SubIdx) const {
1050 switch (MI.getOpcode()) {
1051 default: return false;
1052 case PPC::EXTSW:
1053 case PPC::EXTSW_32:
1054 case PPC::EXTSW_32_64:
1055 SrcReg = MI.getOperand(i: 1).getReg();
1056 DstReg = MI.getOperand(i: 0).getReg();
1057 SubIdx = PPC::sub_32;
1058 return true;
1059 }
1060}
1061
1062Register PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1063 int &FrameIndex) const {
1064 if (llvm::is_contained(Range: getLoadOpcodesForSpillArray(), Element: MI.getOpcode())) {
1065 // Check for the operands added by addFrameReference (the immediate is the
1066 // offset which defaults to 0).
1067 if (MI.getOperand(i: 1).isImm() && !MI.getOperand(i: 1).getImm() &&
1068 MI.getOperand(i: 2).isFI()) {
1069 FrameIndex = MI.getOperand(i: 2).getIndex();
1070 return MI.getOperand(i: 0).getReg();
1071 }
1072 }
1073 return 0;
1074}
1075
1076// For opcodes with the ReMaterializable flag set, this function is called to
1077// verify the instruction is really rematable.
1078bool PPCInstrInfo::isReMaterializableImpl(
1079 const MachineInstr &MI) const {
1080 switch (MI.getOpcode()) {
1081 default:
1082 // Let base implementaion decide.
1083 break;
1084 case PPC::LI:
1085 case PPC::LI8:
1086 case PPC::PLI:
1087 case PPC::PLI8:
1088 case PPC::LIS:
1089 case PPC::LIS8:
1090 case PPC::ADDIStocHA:
1091 case PPC::ADDIStocHA8:
1092 case PPC::ADDItocL:
1093 case PPC::ADDItocL8:
1094 case PPC::LOAD_STACK_GUARD:
1095 case PPC::PPCLdFixedAddr:
1096 case PPC::XXLXORz:
1097 case PPC::XXLXORspz:
1098 case PPC::XXLXORdpz:
1099 case PPC::XXLEQVOnes:
1100 case PPC::XXSPLTI32DX:
1101 case PPC::XXSPLTIW:
1102 case PPC::XXSPLTIDP:
1103 case PPC::V_SET0B:
1104 case PPC::V_SET0H:
1105 case PPC::V_SET0:
1106 case PPC::V_SETALLONESB:
1107 case PPC::V_SETALLONESH:
1108 case PPC::V_SETALLONES:
1109 case PPC::CRSET:
1110 case PPC::CRUNSET:
1111 case PPC::XXSETACCZ:
1112 case PPC::DMXXSETACCZ:
1113 return true;
1114 }
1115 return TargetInstrInfo::isReMaterializableImpl(MI);
1116}
1117
1118Register PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1119 int &FrameIndex) const {
1120 if (llvm::is_contained(Range: getStoreOpcodesForSpillArray(), Element: MI.getOpcode())) {
1121 if (MI.getOperand(i: 1).isImm() && !MI.getOperand(i: 1).getImm() &&
1122 MI.getOperand(i: 2).isFI()) {
1123 FrameIndex = MI.getOperand(i: 2).getIndex();
1124 return MI.getOperand(i: 0).getReg();
1125 }
1126 }
1127 return 0;
1128}
1129
1130MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
1131 unsigned OpIdx1,
1132 unsigned OpIdx2) const {
1133 MachineFunction &MF = *MI.getParent()->getParent();
1134
1135 // Normal instructions can be commuted the obvious way.
1136 if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMI_rec)
1137 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1138 // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
1139 // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
1140 // changing the relative order of the mask operands might change what happens
1141 // to the high-bits of the mask (and, thus, the result).
1142
1143 // Cannot commute if it has a non-zero rotate count.
1144 if (MI.getOperand(i: 3).getImm() != 0)
1145 return nullptr;
1146
1147 // If we have a zero rotate count, we have:
1148 // M = mask(MB,ME)
1149 // Op0 = (Op1 & ~M) | (Op2 & M)
1150 // Change this to:
1151 // M = mask((ME+1)&31, (MB-1)&31)
1152 // Op0 = (Op2 & ~M) | (Op1 & M)
1153
1154 // Swap op1/op2
1155 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1156 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1157 Register Reg0 = MI.getOperand(i: 0).getReg();
1158 Register Reg1 = MI.getOperand(i: 1).getReg();
1159 Register Reg2 = MI.getOperand(i: 2).getReg();
1160 unsigned SubReg1 = MI.getOperand(i: 1).getSubReg();
1161 unsigned SubReg2 = MI.getOperand(i: 2).getSubReg();
1162 bool Reg1IsKill = MI.getOperand(i: 1).isKill();
1163 bool Reg2IsKill = MI.getOperand(i: 2).isKill();
1164 bool ChangeReg0 = false;
1165 // If machine instrs are no longer in two-address forms, update
1166 // destination register as well.
1167 if (Reg0 == Reg1) {
1168 // Must be two address instruction (i.e. op1 is tied to op0).
1169 assert(MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) == 0 &&
1170 "Expecting a two-address instruction!");
1171 assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
1172 Reg2IsKill = false;
1173 ChangeReg0 = true;
1174 }
1175
1176 // Masks.
1177 unsigned MB = MI.getOperand(i: 4).getImm();
1178 unsigned ME = MI.getOperand(i: 5).getImm();
1179
1180 // We can't commute a trivial mask (there is no way to represent an all-zero
1181 // mask).
1182 if (MB == 0 && ME == 31)
1183 return nullptr;
1184
1185 if (NewMI) {
1186 // Create a new instruction.
1187 Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(i: 0).getReg();
1188 bool Reg0IsDead = MI.getOperand(i: 0).isDead();
1189 return BuildMI(MF, MIMD: MI.getDebugLoc(), MCID: MI.getDesc())
1190 .addReg(RegNo: Reg0, Flags: RegState::Define | getDeadRegState(B: Reg0IsDead))
1191 .addReg(RegNo: Reg2, Flags: getKillRegState(B: Reg2IsKill))
1192 .addReg(RegNo: Reg1, Flags: getKillRegState(B: Reg1IsKill))
1193 .addImm(Val: (ME + 1) & 31)
1194 .addImm(Val: (MB - 1) & 31);
1195 }
1196
1197 if (ChangeReg0) {
1198 MI.getOperand(i: 0).setReg(Reg2);
1199 MI.getOperand(i: 0).setSubReg(SubReg2);
1200 }
1201 MI.getOperand(i: 2).setReg(Reg1);
1202 MI.getOperand(i: 1).setReg(Reg2);
1203 MI.getOperand(i: 2).setSubReg(SubReg1);
1204 MI.getOperand(i: 1).setSubReg(SubReg2);
1205 MI.getOperand(i: 2).setIsKill(Reg1IsKill);
1206 MI.getOperand(i: 1).setIsKill(Reg2IsKill);
1207
1208 // Swap the mask around.
1209 MI.getOperand(i: 4).setImm((ME + 1) & 31);
1210 MI.getOperand(i: 5).setImm((MB - 1) & 31);
1211 return &MI;
1212}
1213
1214bool PPCInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
1215 unsigned &SrcOpIdx1,
1216 unsigned &SrcOpIdx2) const {
1217 // For VSX A-Type FMA instructions, it is the first two operands that can be
1218 // commuted, however, because the non-encoded tied input operand is listed
1219 // first, the operands to swap are actually the second and third.
1220
1221 int AltOpc = PPC::getAltVSXFMAOpcode(Opcode: MI.getOpcode());
1222 if (AltOpc == -1)
1223 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1224
1225 // The commutable operand indices are 2 and 3. Return them in SrcOpIdx1
1226 // and SrcOpIdx2.
1227 return fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1: 2, CommutableOpIdx2: 3);
1228}
1229
1230void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
1231 MachineBasicBlock::iterator MI) const {
1232 // This function is used for scheduling, and the nop wanted here is the type
1233 // that terminates dispatch groups on the POWER cores.
1234 unsigned Directive = Subtarget.getCPUDirective();
1235 unsigned Opcode;
1236 switch (Directive) {
1237 default: Opcode = PPC::NOP; break;
1238 case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
1239 case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
1240 case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
1241 // FIXME: Update when POWER9 scheduling model is ready.
1242 case PPC::DIR_PWR9: Opcode = PPC::NOP_GT_PWR7; break;
1243 }
1244
1245 DebugLoc DL;
1246 BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: get(Opcode));
1247}
1248
1249/// Return the noop instruction to use for a noop.
1250MCInst PPCInstrInfo::getNop() const {
1251 MCInst Nop;
1252 Nop.setOpcode(PPC::NOP);
1253 return Nop;
1254}
1255
1256// Branch analysis.
1257// Note: If the condition register is set to CTR or CTR8 then this is a
1258// BDNZ (imm == 1) or BDZ (imm == 0) branch.
1259bool PPCInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
1260 MachineBasicBlock *&TBB,
1261 MachineBasicBlock *&FBB,
1262 SmallVectorImpl<MachineOperand> &Cond,
1263 bool AllowModify) const {
1264 bool isPPC64 = Subtarget.isPPC64();
1265
1266 // If the block has no terminators, it just falls into the block after it.
1267 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1268 if (I == MBB.end())
1269 return false;
1270
1271 if (!isUnpredicatedTerminator(MI: *I))
1272 return false;
1273
1274 if (AllowModify) {
1275 // If the BB ends with an unconditional branch to the fallthrough BB,
1276 // we eliminate the branch instruction.
1277 if (I->getOpcode() == PPC::B &&
1278 MBB.isLayoutSuccessor(MBB: I->getOperand(i: 0).getMBB())) {
1279 I->eraseFromParent();
1280
1281 // We update iterator after deleting the last branch.
1282 I = MBB.getLastNonDebugInstr();
1283 if (I == MBB.end() || !isUnpredicatedTerminator(MI: *I))
1284 return false;
1285 }
1286 }
1287
1288 // Get the last instruction in the block.
1289 MachineInstr &LastInst = *I;
1290
1291 // If there is only one terminator instruction, process it.
1292 if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) {
1293 if (LastInst.getOpcode() == PPC::B) {
1294 if (!LastInst.getOperand(i: 0).isMBB())
1295 return true;
1296 TBB = LastInst.getOperand(i: 0).getMBB();
1297 return false;
1298 } else if (LastInst.getOpcode() == PPC::BCC) {
1299 if (!LastInst.getOperand(i: 2).isMBB())
1300 return true;
1301 // Block ends with fall-through condbranch.
1302 TBB = LastInst.getOperand(i: 2).getMBB();
1303 Cond.push_back(Elt: LastInst.getOperand(i: 0));
1304 Cond.push_back(Elt: LastInst.getOperand(i: 1));
1305 return false;
1306 } else if (LastInst.getOpcode() == PPC::BC) {
1307 if (!LastInst.getOperand(i: 1).isMBB())
1308 return true;
1309 // Block ends with fall-through condbranch.
1310 TBB = LastInst.getOperand(i: 1).getMBB();
1311 Cond.push_back(Elt: MachineOperand::CreateImm(Val: PPC::PRED_BIT_SET));
1312 Cond.push_back(Elt: LastInst.getOperand(i: 0));
1313 return false;
1314 } else if (LastInst.getOpcode() == PPC::BCn) {
1315 if (!LastInst.getOperand(i: 1).isMBB())
1316 return true;
1317 // Block ends with fall-through condbranch.
1318 TBB = LastInst.getOperand(i: 1).getMBB();
1319 Cond.push_back(Elt: MachineOperand::CreateImm(Val: PPC::PRED_BIT_UNSET));
1320 Cond.push_back(Elt: LastInst.getOperand(i: 0));
1321 return false;
1322 } else if (LastInst.getOpcode() == PPC::BDNZ8 ||
1323 LastInst.getOpcode() == PPC::BDNZ) {
1324 if (!LastInst.getOperand(i: 0).isMBB())
1325 return true;
1326 if (DisableCTRLoopAnal)
1327 return true;
1328 TBB = LastInst.getOperand(i: 0).getMBB();
1329 Cond.push_back(Elt: MachineOperand::CreateImm(Val: 1));
1330 Cond.push_back(Elt: MachineOperand::CreateReg(Reg: isPPC64 ? PPC::CTR8 : PPC::CTR,
1331 isDef: true));
1332 return false;
1333 } else if (LastInst.getOpcode() == PPC::BDZ8 ||
1334 LastInst.getOpcode() == PPC::BDZ) {
1335 if (!LastInst.getOperand(i: 0).isMBB())
1336 return true;
1337 if (DisableCTRLoopAnal)
1338 return true;
1339 TBB = LastInst.getOperand(i: 0).getMBB();
1340 Cond.push_back(Elt: MachineOperand::CreateImm(Val: 0));
1341 Cond.push_back(Elt: MachineOperand::CreateReg(Reg: isPPC64 ? PPC::CTR8 : PPC::CTR,
1342 isDef: true));
1343 return false;
1344 }
1345
1346 // Otherwise, don't know what this is.
1347 return true;
1348 }
1349
1350 // Get the instruction before it if it's a terminator.
1351 MachineInstr &SecondLastInst = *I;
1352
1353 // If there are three terminators, we don't know what sort of block this is.
1354 if (I != MBB.begin() && isUnpredicatedTerminator(MI: *--I))
1355 return true;
1356
1357 // If the block ends with PPC::B and PPC:BCC, handle it.
1358 if (SecondLastInst.getOpcode() == PPC::BCC &&
1359 LastInst.getOpcode() == PPC::B) {
1360 if (!SecondLastInst.getOperand(i: 2).isMBB() ||
1361 !LastInst.getOperand(i: 0).isMBB())
1362 return true;
1363 TBB = SecondLastInst.getOperand(i: 2).getMBB();
1364 Cond.push_back(Elt: SecondLastInst.getOperand(i: 0));
1365 Cond.push_back(Elt: SecondLastInst.getOperand(i: 1));
1366 FBB = LastInst.getOperand(i: 0).getMBB();
1367 return false;
1368 } else if (SecondLastInst.getOpcode() == PPC::BC &&
1369 LastInst.getOpcode() == PPC::B) {
1370 if (!SecondLastInst.getOperand(i: 1).isMBB() ||
1371 !LastInst.getOperand(i: 0).isMBB())
1372 return true;
1373 TBB = SecondLastInst.getOperand(i: 1).getMBB();
1374 Cond.push_back(Elt: MachineOperand::CreateImm(Val: PPC::PRED_BIT_SET));
1375 Cond.push_back(Elt: SecondLastInst.getOperand(i: 0));
1376 FBB = LastInst.getOperand(i: 0).getMBB();
1377 return false;
1378 } else if (SecondLastInst.getOpcode() == PPC::BCn &&
1379 LastInst.getOpcode() == PPC::B) {
1380 if (!SecondLastInst.getOperand(i: 1).isMBB() ||
1381 !LastInst.getOperand(i: 0).isMBB())
1382 return true;
1383 TBB = SecondLastInst.getOperand(i: 1).getMBB();
1384 Cond.push_back(Elt: MachineOperand::CreateImm(Val: PPC::PRED_BIT_UNSET));
1385 Cond.push_back(Elt: SecondLastInst.getOperand(i: 0));
1386 FBB = LastInst.getOperand(i: 0).getMBB();
1387 return false;
1388 } else if ((SecondLastInst.getOpcode() == PPC::BDNZ8 ||
1389 SecondLastInst.getOpcode() == PPC::BDNZ) &&
1390 LastInst.getOpcode() == PPC::B) {
1391 if (!SecondLastInst.getOperand(i: 0).isMBB() ||
1392 !LastInst.getOperand(i: 0).isMBB())
1393 return true;
1394 if (DisableCTRLoopAnal)
1395 return true;
1396 TBB = SecondLastInst.getOperand(i: 0).getMBB();
1397 Cond.push_back(Elt: MachineOperand::CreateImm(Val: 1));
1398 Cond.push_back(Elt: MachineOperand::CreateReg(Reg: isPPC64 ? PPC::CTR8 : PPC::CTR,
1399 isDef: true));
1400 FBB = LastInst.getOperand(i: 0).getMBB();
1401 return false;
1402 } else if ((SecondLastInst.getOpcode() == PPC::BDZ8 ||
1403 SecondLastInst.getOpcode() == PPC::BDZ) &&
1404 LastInst.getOpcode() == PPC::B) {
1405 if (!SecondLastInst.getOperand(i: 0).isMBB() ||
1406 !LastInst.getOperand(i: 0).isMBB())
1407 return true;
1408 if (DisableCTRLoopAnal)
1409 return true;
1410 TBB = SecondLastInst.getOperand(i: 0).getMBB();
1411 Cond.push_back(Elt: MachineOperand::CreateImm(Val: 0));
1412 Cond.push_back(Elt: MachineOperand::CreateReg(Reg: isPPC64 ? PPC::CTR8 : PPC::CTR,
1413 isDef: true));
1414 FBB = LastInst.getOperand(i: 0).getMBB();
1415 return false;
1416 }
1417
1418 // If the block ends with two PPC:Bs, handle it. The second one is not
1419 // executed, so remove it.
1420 if (SecondLastInst.getOpcode() == PPC::B && LastInst.getOpcode() == PPC::B) {
1421 if (!SecondLastInst.getOperand(i: 0).isMBB())
1422 return true;
1423 TBB = SecondLastInst.getOperand(i: 0).getMBB();
1424 I = LastInst;
1425 if (AllowModify)
1426 I->eraseFromParent();
1427 return false;
1428 }
1429
1430 // Otherwise, can't handle this.
1431 return true;
1432}
1433
1434unsigned PPCInstrInfo::removeBranch(MachineBasicBlock &MBB,
1435 int *BytesRemoved) const {
1436 assert(!BytesRemoved && "code size not handled");
1437
1438 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1439 if (I == MBB.end())
1440 return 0;
1441
1442 if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
1443 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
1444 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
1445 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
1446 return 0;
1447
1448 // Remove the branch.
1449 I->eraseFromParent();
1450
1451 I = MBB.end();
1452
1453 if (I == MBB.begin()) return 1;
1454 --I;
1455 if (I->getOpcode() != PPC::BCC &&
1456 I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
1457 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
1458 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
1459 return 1;
1460
1461 // Remove the branch.
1462 I->eraseFromParent();
1463 return 2;
1464}
1465
1466unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB,
1467 MachineBasicBlock *TBB,
1468 MachineBasicBlock *FBB,
1469 ArrayRef<MachineOperand> Cond,
1470 const DebugLoc &DL,
1471 int *BytesAdded) const {
1472 // Shouldn't be a fall through.
1473 assert(TBB && "insertBranch must not be told to insert a fallthrough");
1474 assert((Cond.size() == 2 || Cond.size() == 0) &&
1475 "PPC branch conditions have two components!");
1476 assert(!BytesAdded && "code size not handled");
1477
1478 bool isPPC64 = Subtarget.isPPC64();
1479
1480 // One-way branch.
1481 if (!FBB) {
1482 if (Cond.empty()) // Unconditional branch
1483 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::B)).addMBB(MBB: TBB);
1484 else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
1485 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Cond[0].getImm() ?
1486 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1487 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(MBB: TBB);
1488 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
1489 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BC)).add(MO: Cond[1]).addMBB(MBB: TBB);
1490 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
1491 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BCn)).add(MO: Cond[1]).addMBB(MBB: TBB);
1492 else // Conditional branch
1493 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BCC))
1494 .addImm(Val: Cond[0].getImm())
1495 .add(MO: Cond[1])
1496 .addMBB(MBB: TBB);
1497 return 1;
1498 }
1499
1500 // Two-way Conditional Branch.
1501 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
1502 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Cond[0].getImm() ?
1503 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1504 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(MBB: TBB);
1505 else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
1506 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BC)).add(MO: Cond[1]).addMBB(MBB: TBB);
1507 else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
1508 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BCn)).add(MO: Cond[1]).addMBB(MBB: TBB);
1509 else
1510 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::BCC))
1511 .addImm(Val: Cond[0].getImm())
1512 .add(MO: Cond[1])
1513 .addMBB(MBB: TBB);
1514 BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: PPC::B)).addMBB(MBB: FBB);
1515 return 2;
1516}
1517
1518// Select analysis.
1519bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
1520 ArrayRef<MachineOperand> Cond,
1521 Register DstReg, Register TrueReg,
1522 Register FalseReg, int &CondCycles,
1523 int &TrueCycles, int &FalseCycles) const {
1524 if (!Subtarget.hasISEL())
1525 return false;
1526
1527 if (Cond.size() != 2)
1528 return false;
1529
1530 // If this is really a bdnz-like condition, then it cannot be turned into a
1531 // select.
1532 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
1533 return false;
1534
1535 // If the conditional branch uses a physical register, then it cannot be
1536 // turned into a select.
1537 if (Cond[1].getReg().isPhysical())
1538 return false;
1539
1540 // Check register classes.
1541 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1542 const TargetRegisterClass *RC =
1543 RI.getCommonSubClass(A: MRI.getRegClass(Reg: TrueReg), B: MRI.getRegClass(Reg: FalseReg));
1544 if (!RC)
1545 return false;
1546
1547 // isel is for regular integer GPRs only.
1548 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1549 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1550 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1551 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1552 return false;
1553
1554 // FIXME: These numbers are for the A2, how well they work for other cores is
1555 // an open question. On the A2, the isel instruction has a 2-cycle latency
1556 // but single-cycle throughput. These numbers are used in combination with
1557 // the MispredictPenalty setting from the active SchedMachineModel.
1558 CondCycles = 1;
1559 TrueCycles = 1;
1560 FalseCycles = 1;
1561
1562 return true;
1563}
1564
1565void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
1566 MachineBasicBlock::iterator MI,
1567 const DebugLoc &dl, Register DestReg,
1568 ArrayRef<MachineOperand> Cond, Register TrueReg,
1569 Register FalseReg) const {
1570 assert(Cond.size() == 2 &&
1571 "PPC branch conditions have two components!");
1572
1573 // Get the register classes.
1574 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1575 const TargetRegisterClass *RC =
1576 RI.getCommonSubClass(A: MRI.getRegClass(Reg: TrueReg), B: MRI.getRegClass(Reg: FalseReg));
1577 assert(RC && "TrueReg and FalseReg must have overlapping register classes");
1578
1579 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1580 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1581 assert((Is64Bit ||
1582 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1583 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1584 "isel is for regular integer GPRs only");
1585
1586 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1587 auto SelectPred = static_cast<PPC::Predicate>(Cond[0].getImm());
1588
1589 unsigned SubIdx = 0;
1590 bool SwapOps = false;
1591 switch (SelectPred) {
1592 case PPC::PRED_EQ:
1593 case PPC::PRED_EQ_MINUS:
1594 case PPC::PRED_EQ_PLUS:
1595 SubIdx = PPC::sub_eq; SwapOps = false; break;
1596 case PPC::PRED_NE:
1597 case PPC::PRED_NE_MINUS:
1598 case PPC::PRED_NE_PLUS:
1599 SubIdx = PPC::sub_eq; SwapOps = true; break;
1600 case PPC::PRED_LT:
1601 case PPC::PRED_LT_MINUS:
1602 case PPC::PRED_LT_PLUS:
1603 SubIdx = PPC::sub_lt; SwapOps = false; break;
1604 case PPC::PRED_GE:
1605 case PPC::PRED_GE_MINUS:
1606 case PPC::PRED_GE_PLUS:
1607 SubIdx = PPC::sub_lt; SwapOps = true; break;
1608 case PPC::PRED_GT:
1609 case PPC::PRED_GT_MINUS:
1610 case PPC::PRED_GT_PLUS:
1611 SubIdx = PPC::sub_gt; SwapOps = false; break;
1612 case PPC::PRED_LE:
1613 case PPC::PRED_LE_MINUS:
1614 case PPC::PRED_LE_PLUS:
1615 SubIdx = PPC::sub_gt; SwapOps = true; break;
1616 case PPC::PRED_UN:
1617 case PPC::PRED_UN_MINUS:
1618 case PPC::PRED_UN_PLUS:
1619 SubIdx = PPC::sub_un; SwapOps = false; break;
1620 case PPC::PRED_NU:
1621 case PPC::PRED_NU_MINUS:
1622 case PPC::PRED_NU_PLUS:
1623 SubIdx = PPC::sub_un; SwapOps = true; break;
1624 case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break;
1625 case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
1626 }
1627
1628 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1629 SecondReg = SwapOps ? TrueReg : FalseReg;
1630
1631 // The first input register of isel cannot be r0. If it is a member
1632 // of a register class that can be r0, then copy it first (the
1633 // register allocator should eliminate the copy).
1634 if (MRI.getRegClass(Reg: FirstReg)->contains(Reg: PPC::R0) ||
1635 MRI.getRegClass(Reg: FirstReg)->contains(Reg: PPC::X0)) {
1636 const TargetRegisterClass *FirstRC =
1637 MRI.getRegClass(Reg: FirstReg)->contains(Reg: PPC::X0) ?
1638 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1639 Register OldFirstReg = FirstReg;
1640 FirstReg = MRI.createVirtualRegister(RegClass: FirstRC);
1641 BuildMI(BB&: MBB, I: MI, MIMD: dl, MCID: get(Opcode: TargetOpcode::COPY), DestReg: FirstReg)
1642 .addReg(RegNo: OldFirstReg);
1643 }
1644
1645 BuildMI(BB&: MBB, I: MI, MIMD: dl, MCID: get(Opcode: OpCode), DestReg)
1646 .addReg(RegNo: FirstReg)
1647 .addReg(RegNo: SecondReg)
1648 .addReg(RegNo: Cond[1].getReg(), Flags: {}, SubReg: SubIdx);
1649}
1650
1651static unsigned getCRBitValue(unsigned CRBit) {
1652 unsigned Ret = 4;
1653 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1654 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1655 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1656 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1657 Ret = 3;
1658 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1659 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1660 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1661 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1662 Ret = 2;
1663 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1664 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1665 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1666 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1667 Ret = 1;
1668 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1669 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1670 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1671 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1672 Ret = 0;
1673
1674 assert(Ret != 4 && "Invalid CR bit register");
1675 return Ret;
1676}
1677
1678void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1679 MachineBasicBlock::iterator I,
1680 const DebugLoc &DL, Register DestReg,
1681 Register SrcReg, bool KillSrc,
1682 bool RenamableDest, bool RenamableSrc) const {
1683 // We can end up with self copies and similar things as a result of VSX copy
1684 // legalization. Promote them here.
1685 const TargetRegisterInfo *TRI = &getRegisterInfo();
1686 if (PPC::F8RCRegClass.contains(Reg: DestReg) &&
1687 PPC::VSRCRegClass.contains(Reg: SrcReg)) {
1688 MCRegister SuperReg =
1689 TRI->getMatchingSuperReg(Reg: DestReg, SubIdx: PPC::sub_64, RC: &PPC::VSRCRegClass);
1690
1691 if (VSXSelfCopyCrash && SrcReg == SuperReg)
1692 llvm_unreachable("nop VSX copy");
1693
1694 DestReg = SuperReg;
1695 } else if (PPC::F8RCRegClass.contains(Reg: SrcReg) &&
1696 PPC::VSRCRegClass.contains(Reg: DestReg)) {
1697 MCRegister SuperReg =
1698 TRI->getMatchingSuperReg(Reg: SrcReg, SubIdx: PPC::sub_64, RC: &PPC::VSRCRegClass);
1699
1700 if (VSXSelfCopyCrash && DestReg == SuperReg)
1701 llvm_unreachable("nop VSX copy");
1702
1703 SrcReg = SuperReg;
1704 }
1705
1706 // Different class register copy
1707 if (PPC::CRBITRCRegClass.contains(Reg: SrcReg) &&
1708 PPC::GPRCRegClass.contains(Reg: DestReg)) {
1709 MCRegister CRReg = getCRFromCRBit(SrcReg);
1710 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::MFOCRF), DestReg).addReg(RegNo: CRReg);
1711 getKillRegState(B: KillSrc);
1712 // Rotate the CR bit in the CR fields to be the least significant bit and
1713 // then mask with 0x1 (MB = ME = 31).
1714 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::RLWINM), DestReg)
1715 .addReg(RegNo: DestReg, Flags: RegState::Kill)
1716 .addImm(Val: TRI->getEncodingValue(Reg: CRReg) * 4 + (4 - getCRBitValue(CRBit: SrcReg)))
1717 .addImm(Val: 31)
1718 .addImm(Val: 31);
1719 return;
1720 } else if (PPC::CRRCRegClass.contains(Reg: SrcReg) &&
1721 (PPC::G8RCRegClass.contains(Reg: DestReg) ||
1722 PPC::GPRCRegClass.contains(Reg: DestReg))) {
1723 bool Is64Bit = PPC::G8RCRegClass.contains(Reg: DestReg);
1724 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1725 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1726 unsigned CRNum = TRI->getEncodingValue(Reg: SrcReg);
1727 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: MvCode), DestReg).addReg(RegNo: SrcReg);
1728 getKillRegState(B: KillSrc);
1729 if (CRNum == 7)
1730 return;
1731 // Shift the CR bits to make the CR field in the lowest 4 bits of GRC.
1732 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: ShCode), DestReg)
1733 .addReg(RegNo: DestReg, Flags: RegState::Kill)
1734 .addImm(Val: CRNum * 4 + 4)
1735 .addImm(Val: 28)
1736 .addImm(Val: 31);
1737 return;
1738 } else if (PPC::G8RCRegClass.contains(Reg: SrcReg) &&
1739 PPC::VSFRCRegClass.contains(Reg: DestReg)) {
1740 assert(Subtarget.hasDirectMove() &&
1741 "Subtarget doesn't support directmove, don't know how to copy.");
1742 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::MTVSRD), DestReg).addReg(RegNo: SrcReg);
1743 NumGPRtoVSRSpill++;
1744 getKillRegState(B: KillSrc);
1745 return;
1746 } else if (PPC::VSFRCRegClass.contains(Reg: SrcReg) &&
1747 PPC::G8RCRegClass.contains(Reg: DestReg)) {
1748 assert(Subtarget.hasDirectMove() &&
1749 "Subtarget doesn't support directmove, don't know how to copy.");
1750 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::MFVSRD), DestReg).addReg(RegNo: SrcReg);
1751 getKillRegState(B: KillSrc);
1752 return;
1753 } else if (PPC::SPERCRegClass.contains(Reg: SrcReg) &&
1754 PPC::GPRCRegClass.contains(Reg: DestReg)) {
1755 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::EFSCFD), DestReg).addReg(RegNo: SrcReg);
1756 getKillRegState(B: KillSrc);
1757 return;
1758 } else if (PPC::GPRCRegClass.contains(Reg: SrcReg) &&
1759 PPC::SPERCRegClass.contains(Reg: DestReg)) {
1760 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::EFDCFS), DestReg).addReg(RegNo: SrcReg);
1761 getKillRegState(B: KillSrc);
1762 return;
1763 } else if ((PPC::G8RCRegClass.contains(Reg: DestReg) ||
1764 PPC::GPRCRegClass.contains(Reg: DestReg)) &&
1765 SrcReg == PPC::CARRY) {
1766 bool Is64Bit = PPC::G8RCRegClass.contains(Reg: DestReg);
1767 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: Is64Bit ? PPC::MFSPR8 : PPC::MFSPR), DestReg)
1768 .addImm(Val: 1)
1769 .addReg(RegNo: PPC::CARRY, Flags: RegState::Implicit);
1770 return;
1771 } else if ((PPC::G8RCRegClass.contains(Reg: SrcReg) ||
1772 PPC::GPRCRegClass.contains(Reg: SrcReg)) &&
1773 DestReg == PPC::CARRY) {
1774 bool Is64Bit = PPC::G8RCRegClass.contains(Reg: SrcReg);
1775 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: Is64Bit ? PPC::MTSPR8 : PPC::MTSPR))
1776 .addImm(Val: 1)
1777 .addReg(RegNo: SrcReg)
1778 .addReg(RegNo: PPC::CARRY, Flags: RegState::ImplicitDefine);
1779 return;
1780 }
1781
1782 unsigned Opc;
1783 if (PPC::GPRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1784 Opc = PPC::OR;
1785 else if (PPC::G8RCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1786 Opc = PPC::OR8;
1787 else if (PPC::F4RCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1788 Opc = PPC::FMR;
1789 else if (PPC::CRRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1790 Opc = PPC::MCRF;
1791 else if (PPC::VRRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1792 Opc = PPC::VOR;
1793 else if (PPC::VSRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1794 // There are two different ways this can be done:
1795 // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only
1796 // issue in VSU pipeline 0.
1797 // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but
1798 // can go to either pipeline.
1799 // We'll always use xxlor here, because in practically all cases where
1800 // copies are generated, they are close enough to some use that the
1801 // lower-latency form is preferable.
1802 Opc = PPC::XXLOR;
1803 else if (PPC::VSFRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg) ||
1804 PPC::VSSRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1805 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1806 else if (Subtarget.pairedVectorMemops() &&
1807 PPC::VSRpRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) {
1808 if (SrcReg > PPC::VSRp15)
1809 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1810 else
1811 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1812 if (DestReg > PPC::VSRp15)
1813 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1814 else
1815 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1816 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXLOR), DestReg).
1817 addReg(RegNo: SrcReg).addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
1818 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXLOR), DestReg: DestReg + 1).
1819 addReg(RegNo: SrcReg + 1).addReg(RegNo: SrcReg + 1, Flags: getKillRegState(B: KillSrc));
1820 return;
1821 }
1822 else if (PPC::CRBITRCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1823 Opc = PPC::CROR;
1824 else if (PPC::SPERCRegClass.contains(Reg1: DestReg, Reg2: SrcReg))
1825 Opc = PPC::EVOR;
1826 else if ((PPC::ACCRCRegClass.contains(Reg: DestReg) ||
1827 PPC::UACCRCRegClass.contains(Reg: DestReg)) &&
1828 (PPC::ACCRCRegClass.contains(Reg: SrcReg) ||
1829 PPC::UACCRCRegClass.contains(Reg: SrcReg))) {
1830 // If primed, de-prime the source register, copy the individual registers
1831 // and prime the destination if needed. The vector subregisters are
1832 // vs[(u)acc * 4] - vs[(u)acc * 4 + 3]. If the copy is not a kill and the
1833 // source is primed, we need to re-prime it after the copy as well.
1834 PPCRegisterInfo::emitAccCopyInfo(MBB, DestReg, SrcReg);
1835 bool DestPrimed = PPC::ACCRCRegClass.contains(Reg: DestReg);
1836 bool SrcPrimed = PPC::ACCRCRegClass.contains(Reg: SrcReg);
1837 MCRegister VSLSrcReg =
1838 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1839 MCRegister VSLDestReg =
1840 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1841 if (SrcPrimed)
1842 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXMFACC), DestReg: SrcReg).addReg(RegNo: SrcReg);
1843 for (unsigned Idx = 0; Idx < 4; Idx++)
1844 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXLOR), DestReg: VSLDestReg + Idx)
1845 .addReg(RegNo: VSLSrcReg + Idx)
1846 .addReg(RegNo: VSLSrcReg + Idx, Flags: getKillRegState(B: KillSrc));
1847 if (DestPrimed)
1848 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXMTACC), DestReg).addReg(RegNo: DestReg);
1849 if (SrcPrimed && !KillSrc)
1850 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::XXMTACC), DestReg: SrcReg).addReg(RegNo: SrcReg);
1851 return;
1852 } else if (PPC::G8pRCRegClass.contains(Reg: DestReg) &&
1853 PPC::G8pRCRegClass.contains(Reg: SrcReg)) {
1854 // TODO: Handle G8RC to G8pRC (and vice versa) copy.
1855 unsigned DestRegIdx = DestReg - PPC::G8p0;
1856 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1857 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1858 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1859 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1860 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1861 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::OR8), DestReg: DestRegSub0)
1862 .addReg(RegNo: SrcRegSub0)
1863 .addReg(RegNo: SrcRegSub0, Flags: getKillRegState(B: KillSrc));
1864 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::OR8), DestReg: DestRegSub1)
1865 .addReg(RegNo: SrcRegSub1)
1866 .addReg(RegNo: SrcRegSub1, Flags: getKillRegState(B: KillSrc));
1867 return;
1868 } else if ((PPC::WACCRCRegClass.contains(Reg: DestReg) ||
1869 PPC::WACC_HIRCRegClass.contains(Reg: DestReg)) &&
1870 (PPC::WACCRCRegClass.contains(Reg: SrcReg) ||
1871 PPC::WACC_HIRCRegClass.contains(Reg: SrcReg))) {
1872
1873 Opc = PPC::WACCRCRegClass.contains(Reg: SrcReg) ? PPC::DMXXEXTFDMR512
1874 : PPC::DMXXEXTFDMR512_HI;
1875
1876 RegScavenger RS;
1877 RS.enterBasicBlockEnd(MBB);
1878 RS.backward(I: std::next(x: I));
1879
1880 Register TmpReg1 = RS.scavengeRegisterBackwards(RC: PPC::VSRpRCRegClass, To: I,
1881 /* RestoreAfter */ false, SPAdj: 0,
1882 /* AllowSpill */ false);
1883
1884 RS.setRegUsed(Reg: TmpReg1);
1885 Register TmpReg2 = RS.scavengeRegisterBackwards(RC: PPC::VSRpRCRegClass, To: I,
1886 /* RestoreAfter */ false, SPAdj: 0,
1887 /* AllowSpill */ false);
1888
1889 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: Opc))
1890 .addReg(RegNo: TmpReg1, Flags: RegState::Define)
1891 .addReg(RegNo: TmpReg2, Flags: RegState::Define)
1892 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
1893
1894 Opc = PPC::WACCRCRegClass.contains(Reg: DestReg) ? PPC::DMXXINSTDMR512
1895 : PPC::DMXXINSTDMR512_HI;
1896
1897 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: Opc), DestReg)
1898 .addReg(RegNo: TmpReg1, Flags: RegState::Kill)
1899 .addReg(RegNo: TmpReg2, Flags: RegState::Kill);
1900
1901 return;
1902 } else if (PPC::DMRRCRegClass.contains(Reg: DestReg) &&
1903 PPC::DMRRCRegClass.contains(Reg: SrcReg)) {
1904
1905 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: PPC::DMMR), DestReg)
1906 .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
1907
1908 return;
1909
1910 } else
1911 llvm_unreachable("Impossible reg-to-reg copy");
1912
1913 const MCInstrDesc &MCID = get(Opcode: Opc);
1914 if (MCID.getNumOperands() == 3)
1915 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg)
1916 .addReg(RegNo: SrcReg).addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
1917 else
1918 BuildMI(BB&: MBB, I, MIMD: DL, MCID, DestReg).addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc));
1919}
1920
1921unsigned PPCInstrInfo::getSpillIndex(const TargetRegisterClass *RC) const {
1922 int OpcodeIndex = 0;
1923
1924 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1925 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1926 OpcodeIndex = SOK_Int4Spill;
1927 } else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1928 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1929 OpcodeIndex = SOK_Int8Spill;
1930 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1931 OpcodeIndex = SOK_Float8Spill;
1932 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1933 OpcodeIndex = SOK_Float4Spill;
1934 } else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1935 OpcodeIndex = SOK_SPESpill;
1936 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1937 OpcodeIndex = SOK_CRSpill;
1938 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1939 OpcodeIndex = SOK_CRBitSpill;
1940 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1941 OpcodeIndex = SOK_VRVectorSpill;
1942 } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1943 OpcodeIndex = SOK_VSXVectorSpill;
1944 } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1945 OpcodeIndex = SOK_VectorFloat8Spill;
1946 } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1947 OpcodeIndex = SOK_VectorFloat4Spill;
1948 } else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1949 OpcodeIndex = SOK_SpillToVSR;
1950 } else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1951 assert(Subtarget.pairedVectorMemops() &&
1952 "Register unexpected when paired memops are disabled.");
1953 OpcodeIndex = SOK_AccumulatorSpill;
1954 } else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1955 assert(Subtarget.pairedVectorMemops() &&
1956 "Register unexpected when paired memops are disabled.");
1957 OpcodeIndex = SOK_UAccumulatorSpill;
1958 } else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1959 assert(Subtarget.pairedVectorMemops() &&
1960 "Register unexpected when paired memops are disabled.");
1961 OpcodeIndex = SOK_WAccumulatorSpill;
1962 } else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1963 assert(Subtarget.pairedVectorMemops() &&
1964 "Register unexpected when paired memops are disabled.");
1965 OpcodeIndex = SOK_PairedVecSpill;
1966 } else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1967 OpcodeIndex = SOK_PairedG8Spill;
1968 } else if (PPC::DMRROWRCRegClass.hasSubClassEq(RC)) {
1969 llvm_unreachable("TODO: Implement spill DMRROW regclass!");
1970 } else if (PPC::DMRROWpRCRegClass.hasSubClassEq(RC)) {
1971 llvm_unreachable("TODO: Implement spill DMRROWp regclass!");
1972 } else if (PPC::DMRpRCRegClass.hasSubClassEq(RC)) {
1973 OpcodeIndex = SOK_DMRpSpill;
1974 } else if (PPC::DMRRCRegClass.hasSubClassEq(RC)) {
1975 OpcodeIndex = SOK_DMRSpill;
1976 } else {
1977 llvm_unreachable("Unknown regclass!");
1978 }
1979 return OpcodeIndex;
1980}
1981
1982unsigned
1983PPCInstrInfo::getStoreOpcodeForSpill(const TargetRegisterClass *RC) const {
1984 ArrayRef<unsigned> OpcodesForSpill = getStoreOpcodesForSpillArray();
1985 return OpcodesForSpill[getSpillIndex(RC)];
1986}
1987
1988unsigned
1989PPCInstrInfo::getLoadOpcodeForSpill(const TargetRegisterClass *RC) const {
1990 ArrayRef<unsigned> OpcodesForSpill = getLoadOpcodesForSpillArray();
1991 return OpcodesForSpill[getSpillIndex(RC)];
1992}
1993
1994void PPCInstrInfo::StoreRegToStackSlot(
1995 MachineFunction &MF, unsigned SrcReg, bool isKill, int FrameIdx,
1996 const TargetRegisterClass *RC,
1997 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1998 unsigned Opcode = getStoreOpcodeForSpill(RC);
1999 DebugLoc DL;
2000
2001 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2002 FuncInfo->setHasSpills();
2003
2004 NewMIs.push_back(Elt: addFrameReference(
2005 MIB: BuildMI(MF, MIMD: DL, MCID: get(Opcode)).addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)),
2006 FI: FrameIdx));
2007
2008 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
2009 PPC::CRBITRCRegClass.hasSubClassEq(RC))
2010 FuncInfo->setSpillsCR();
2011
2012 if (isXFormMemOp(Opcode))
2013 FuncInfo->setHasNonRISpills();
2014}
2015
2016void PPCInstrInfo::storeRegToStackSlotNoUpd(
2017 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg,
2018 bool isKill, int FrameIdx, const TargetRegisterClass *RC) const {
2019 MachineFunction &MF = *MBB.getParent();
2020 SmallVector<MachineInstr *, 4> NewMIs;
2021
2022 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
2023
2024 for (MachineInstr *NewMI : NewMIs)
2025 MBB.insert(I: MI, MI: NewMI);
2026
2027 const MachineFrameInfo &MFI = MF.getFrameInfo();
2028 MachineMemOperand *MMO = MF.getMachineMemOperand(
2029 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI: FrameIdx),
2030 F: MachineMemOperand::MOStore, Size: MFI.getObjectSize(ObjectIdx: FrameIdx),
2031 BaseAlignment: MFI.getObjectAlign(ObjectIdx: FrameIdx));
2032 NewMIs.back()->addMemOperand(MF, MO: MMO);
2033}
2034
2035void PPCInstrInfo::storeRegToStackSlot(
2036 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
2037 bool isKill, int FrameIdx, const TargetRegisterClass *RC, Register VReg,
2038 MachineInstr::MIFlag Flags) const {
2039 // We need to avoid a situation in which the value from a VRRC register is
2040 // spilled using an Altivec instruction and reloaded into a VSRC register
2041 // using a VSX instruction. The issue with this is that the VSX
2042 // load/store instructions swap the doublewords in the vector and the Altivec
2043 // ones don't. The register classes on the spill/reload may be different if
2044 // the register is defined using an Altivec instruction and is then used by a
2045 // VSX instruction.
2046 RC = updatedRC(RC);
2047 storeRegToStackSlotNoUpd(MBB, MI, SrcReg, isKill, FrameIdx, RC);
2048}
2049
2050void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
2051 unsigned DestReg, int FrameIdx,
2052 const TargetRegisterClass *RC,
2053 SmallVectorImpl<MachineInstr *> &NewMIs)
2054 const {
2055 unsigned Opcode = getLoadOpcodeForSpill(RC);
2056 NewMIs.push_back(Elt: addFrameReference(MIB: BuildMI(MF, MIMD: DL, MCID: get(Opcode), DestReg),
2057 FI: FrameIdx));
2058}
2059
2060void PPCInstrInfo::loadRegFromStackSlotNoUpd(
2061 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg,
2062 int FrameIdx, const TargetRegisterClass *RC) const {
2063 MachineFunction &MF = *MBB.getParent();
2064 SmallVector<MachineInstr*, 4> NewMIs;
2065 DebugLoc DL;
2066 if (MI != MBB.end()) DL = MI->getDebugLoc();
2067
2068 LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs);
2069
2070 for (MachineInstr *NewMI : NewMIs)
2071 MBB.insert(I: MI, MI: NewMI);
2072
2073 const MachineFrameInfo &MFI = MF.getFrameInfo();
2074 MachineMemOperand *MMO = MF.getMachineMemOperand(
2075 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI: FrameIdx),
2076 F: MachineMemOperand::MOLoad, Size: MFI.getObjectSize(ObjectIdx: FrameIdx),
2077 BaseAlignment: MFI.getObjectAlign(ObjectIdx: FrameIdx));
2078 NewMIs.back()->addMemOperand(MF, MO: MMO);
2079}
2080
2081void PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
2082 MachineBasicBlock::iterator MI,
2083 Register DestReg, int FrameIdx,
2084 const TargetRegisterClass *RC,
2085 Register VReg, unsigned SubReg,
2086 MachineInstr::MIFlag Flags) const {
2087 // We need to avoid a situation in which the value from a VRRC register is
2088 // spilled using an Altivec instruction and reloaded into a VSRC register
2089 // using a VSX instruction. The issue with this is that the VSX
2090 // load/store instructions swap the doublewords in the vector and the Altivec
2091 // ones don't. The register classes on the spill/reload may be different if
2092 // the register is defined using an Altivec instruction and is then used by a
2093 // VSX instruction.
2094 RC = updatedRC(RC);
2095
2096 loadRegFromStackSlotNoUpd(MBB, MI, DestReg, FrameIdx, RC);
2097}
2098
2099bool PPCInstrInfo::
2100reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
2101 assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
2102 if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR)
2103 Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0);
2104 else
2105 // Leave the CR# the same, but invert the condition.
2106 Cond[0].setImm(PPC::InvertPredicate(Opcode: (PPC::Predicate)Cond[0].getImm()));
2107 return false;
2108}
2109
2110// For some instructions, it is legal to fold ZERO into the RA register field.
2111// This function performs that fold by replacing the operand with PPC::ZERO,
2112// it does not consider whether the load immediate zero is no longer in use.
2113bool PPCInstrInfo::onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2114 Register Reg) const {
2115 // A zero immediate should always be loaded with a single li.
2116 unsigned DefOpc = DefMI.getOpcode();
2117 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2118 return false;
2119 if (!DefMI.getOperand(i: 1).isImm())
2120 return false;
2121 if (DefMI.getOperand(i: 1).getImm() != 0)
2122 return false;
2123
2124 // Note that we cannot here invert the arguments of an isel in order to fold
2125 // a ZERO into what is presented as the second argument. All we have here
2126 // is the condition bit, and that might come from a CR-logical bit operation.
2127
2128 const MCInstrDesc &UseMCID = UseMI.getDesc();
2129
2130 // Only fold into real machine instructions.
2131 if (UseMCID.isPseudo())
2132 return false;
2133
2134 // We need to find which of the User's operands is to be folded, that will be
2135 // the operand that matches the given register ID.
2136 unsigned UseIdx;
2137 for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx)
2138 if (UseMI.getOperand(i: UseIdx).isReg() &&
2139 UseMI.getOperand(i: UseIdx).getReg() == Reg)
2140 break;
2141
2142 assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
2143 assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
2144
2145 // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
2146 // register (which might also be specified as a pointer class kind).
2147
2148 const MCOperandInfo &UseInfo = UseMCID.operands()[UseIdx];
2149 int16_t RegClass = getOpRegClassID(OpInfo: UseInfo);
2150 if (UseInfo.RegClass != PPC::GPRC_NOR0RegClassID &&
2151 UseInfo.RegClass != PPC::G8RC_NOX0RegClassID)
2152 return false;
2153
2154 // Make sure this is not tied to an output register (or otherwise
2155 // constrained). This is true for ST?UX registers, for example, which
2156 // are tied to their output registers.
2157 if (UseInfo.Constraints != 0)
2158 return false;
2159
2160 MCRegister ZeroReg =
2161 RegClass == PPC::G8RC_NOX0RegClassID ? PPC::ZERO8 : PPC::ZERO;
2162
2163 LLVM_DEBUG(dbgs() << "Folded immediate zero for: ");
2164 LLVM_DEBUG(UseMI.dump());
2165 UseMI.getOperand(i: UseIdx).setReg(ZeroReg);
2166 LLVM_DEBUG(dbgs() << "Into: ");
2167 LLVM_DEBUG(UseMI.dump());
2168 return true;
2169}
2170
2171// Folds zero into instructions which have a load immediate zero as an operand
2172// but also recognize zero as immediate zero. If the definition of the load
2173// has no more users it is deleted.
2174bool PPCInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2175 Register Reg, MachineRegisterInfo *MRI) const {
2176 bool Changed = onlyFoldImmediate(UseMI, DefMI, Reg);
2177 if (MRI->use_nodbg_empty(RegNo: Reg))
2178 DefMI.eraseFromParent();
2179 return Changed;
2180}
2181
2182static bool MBBDefinesCTR(MachineBasicBlock &MBB) {
2183 for (MachineInstr &MI : MBB)
2184 if (MI.definesRegister(Reg: PPC::CTR, /*TRI=*/nullptr) ||
2185 MI.definesRegister(Reg: PPC::CTR8, /*TRI=*/nullptr))
2186 return true;
2187 return false;
2188}
2189
2190// We should make sure that, if we're going to predicate both sides of a
2191// condition (a diamond), that both sides don't define the counter register. We
2192// can predicate counter-decrement-based branches, but while that predicates
2193// the branching, it does not predicate the counter decrement. If we tried to
2194// merge the triangle into one predicated block, we'd decrement the counter
2195// twice.
2196bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
2197 unsigned NumT, unsigned ExtraT,
2198 MachineBasicBlock &FMBB,
2199 unsigned NumF, unsigned ExtraF,
2200 BranchProbability Probability) const {
2201 return !(MBBDefinesCTR(MBB&: TMBB) && MBBDefinesCTR(MBB&: FMBB));
2202}
2203
2204
2205bool PPCInstrInfo::isPredicated(const MachineInstr &MI) const {
2206 // The predicated branches are identified by their type, not really by the
2207 // explicit presence of a predicate. Furthermore, some of them can be
2208 // predicated more than once. Because if conversion won't try to predicate
2209 // any instruction which already claims to be predicated (by returning true
2210 // here), always return false. In doing so, we let isPredicable() be the
2211 // final word on whether not the instruction can be (further) predicated.
2212
2213 return false;
2214}
2215
2216bool PPCInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
2217 const MachineBasicBlock *MBB,
2218 const MachineFunction &MF) const {
2219 switch (MI.getOpcode()) {
2220 default:
2221 break;
2222 // Set MFFS and MTFSF as scheduling boundary to avoid unexpected code motion
2223 // across them, since some FP operations may change content of FPSCR.
2224 // TODO: Model FPSCR in PPC instruction definitions and remove the workaround
2225 case PPC::MFFS:
2226 case PPC::MTFSF:
2227 case PPC::FENCE:
2228 return true;
2229 }
2230 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF);
2231}
2232
2233bool PPCInstrInfo::PredicateInstruction(MachineInstr &MI,
2234 ArrayRef<MachineOperand> Pred) const {
2235 unsigned OpC = MI.getOpcode();
2236 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2237 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
2238 bool isPPC64 = Subtarget.isPPC64();
2239 MI.setDesc(get(Opcode: Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2240 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2241 // Need add Def and Use for CTR implicit operand.
2242 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2243 .addReg(RegNo: Pred[1].getReg(), Flags: RegState::Implicit)
2244 .addReg(RegNo: Pred[1].getReg(), Flags: RegState::ImplicitDefine);
2245 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
2246 MI.setDesc(get(Opcode: PPC::BCLR));
2247 MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(MO: Pred[1]);
2248 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
2249 MI.setDesc(get(Opcode: PPC::BCLRn));
2250 MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(MO: Pred[1]);
2251 } else {
2252 MI.setDesc(get(Opcode: PPC::BCCLR));
2253 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2254 .addImm(Val: Pred[0].getImm())
2255 .add(MO: Pred[1]);
2256 }
2257
2258 return true;
2259 } else if (OpC == PPC::B) {
2260 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
2261 bool isPPC64 = Subtarget.isPPC64();
2262 MI.setDesc(get(Opcode: Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2263 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2264 // Need add Def and Use for CTR implicit operand.
2265 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2266 .addReg(RegNo: Pred[1].getReg(), Flags: RegState::Implicit)
2267 .addReg(RegNo: Pred[1].getReg(), Flags: RegState::ImplicitDefine);
2268 } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
2269 MachineBasicBlock *MBB = MI.getOperand(i: 0).getMBB();
2270 MI.removeOperand(OpNo: 0);
2271
2272 MI.setDesc(get(Opcode: PPC::BC));
2273 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2274 .add(MO: Pred[1])
2275 .addMBB(MBB);
2276 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
2277 MachineBasicBlock *MBB = MI.getOperand(i: 0).getMBB();
2278 MI.removeOperand(OpNo: 0);
2279
2280 MI.setDesc(get(Opcode: PPC::BCn));
2281 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2282 .add(MO: Pred[1])
2283 .addMBB(MBB);
2284 } else {
2285 MachineBasicBlock *MBB = MI.getOperand(i: 0).getMBB();
2286 MI.removeOperand(OpNo: 0);
2287
2288 MI.setDesc(get(Opcode: PPC::BCC));
2289 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2290 .addImm(Val: Pred[0].getImm())
2291 .add(MO: Pred[1])
2292 .addMBB(MBB);
2293 }
2294
2295 return true;
2296 } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2297 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2298 OpC == PPC::BCTRL8_RM) {
2299 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR)
2300 llvm_unreachable("Cannot predicate bctr[l] on the ctr register");
2301
2302 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2303 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2304 bool isPPC64 = Subtarget.isPPC64();
2305
2306 if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
2307 MI.setDesc(get(Opcode: isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2308 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2309 MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(MO: Pred[1]);
2310 } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
2311 MI.setDesc(get(Opcode: isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2312 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2313 MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(MO: Pred[1]);
2314 } else {
2315 MI.setDesc(get(Opcode: isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2316 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2317 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2318 .addImm(Val: Pred[0].getImm())
2319 .add(MO: Pred[1]);
2320 }
2321
2322 // Need add Def and Use for LR implicit operand.
2323 if (setLR)
2324 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2325 .addReg(RegNo: isPPC64 ? PPC::LR8 : PPC::LR, Flags: RegState::Implicit)
2326 .addReg(RegNo: isPPC64 ? PPC::LR8 : PPC::LR, Flags: RegState::ImplicitDefine);
2327 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2328 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
2329 .addReg(RegNo: PPC::RM, Flags: RegState::ImplicitDefine);
2330
2331 return true;
2332 }
2333
2334 return false;
2335}
2336
2337bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
2338 ArrayRef<MachineOperand> Pred2) const {
2339 assert(Pred1.size() == 2 && "Invalid PPC first predicate");
2340 assert(Pred2.size() == 2 && "Invalid PPC second predicate");
2341
2342 if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR)
2343 return false;
2344 if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR)
2345 return false;
2346
2347 // P1 can only subsume P2 if they test the same condition register.
2348 if (Pred1[1].getReg() != Pred2[1].getReg())
2349 return false;
2350
2351 PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm();
2352 PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm();
2353
2354 if (P1 == P2)
2355 return true;
2356
2357 // Does P1 subsume P2, e.g. GE subsumes GT.
2358 if (P1 == PPC::PRED_LE &&
2359 (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ))
2360 return true;
2361 if (P1 == PPC::PRED_GE &&
2362 (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ))
2363 return true;
2364
2365 return false;
2366}
2367
2368bool PPCInstrInfo::ClobbersPredicate(MachineInstr &MI,
2369 std::vector<MachineOperand> &Pred,
2370 bool SkipDead) const {
2371 // Note: At the present time, the contents of Pred from this function is
2372 // unused by IfConversion. This implementation follows ARM by pushing the
2373 // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of
2374 // predicate, instructions defining CTR or CTR8 are also included as
2375 // predicate-defining instructions.
2376
2377 const TargetRegisterClass *RCs[] =
2378 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2379 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2380
2381 bool Found = false;
2382 for (const MachineOperand &MO : MI.operands()) {
2383 for (unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2384 const TargetRegisterClass *RC = RCs[c];
2385 if (MO.isReg()) {
2386 if (MO.isDef() && RC->contains(Reg: MO.getReg())) {
2387 Pred.push_back(x: MO);
2388 Found = true;
2389 }
2390 } else if (MO.isRegMask()) {
2391 for (MCPhysReg R : *RC)
2392 if (MO.clobbersPhysReg(PhysReg: R)) {
2393 Pred.push_back(x: MO);
2394 Found = true;
2395 }
2396 }
2397 }
2398 }
2399
2400 return Found;
2401}
2402
2403bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
2404 Register &SrcReg2, int64_t &Mask,
2405 int64_t &Value) const {
2406 unsigned Opc = MI.getOpcode();
2407
2408 switch (Opc) {
2409 default: return false;
2410 case PPC::CMPWI:
2411 case PPC::CMPLWI:
2412 case PPC::CMPDI:
2413 case PPC::CMPLDI:
2414 SrcReg = MI.getOperand(i: 1).getReg();
2415 SrcReg2 = 0;
2416 Value = MI.getOperand(i: 2).getImm();
2417 Mask = 0xFFFF;
2418 return true;
2419 case PPC::CMPW:
2420 case PPC::CMPLW:
2421 case PPC::CMPD:
2422 case PPC::CMPLD:
2423 case PPC::FCMPUS:
2424 case PPC::FCMPUD:
2425 SrcReg = MI.getOperand(i: 1).getReg();
2426 SrcReg2 = MI.getOperand(i: 2).getReg();
2427 Value = 0;
2428 Mask = 0;
2429 return true;
2430 }
2431}
2432
2433bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
2434 Register SrcReg2, int64_t Mask,
2435 int64_t Value,
2436 const MachineRegisterInfo *MRI) const {
2437 if (DisableCmpOpt)
2438 return false;
2439
2440 int OpC = CmpInstr.getOpcode();
2441 Register CRReg = CmpInstr.getOperand(i: 0).getReg();
2442
2443 // FP record forms set CR1 based on the exception status bits, not a
2444 // comparison with zero.
2445 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2446 return false;
2447
2448 const TargetRegisterInfo *TRI = &getRegisterInfo();
2449 // The record forms set the condition register based on a signed comparison
2450 // with zero (so says the ISA manual). This is not as straightforward as it
2451 // seems, however, because this is always a 64-bit comparison on PPC64, even
2452 // for instructions that are 32-bit in nature (like slw for example).
2453 // So, on PPC32, for unsigned comparisons, we can use the record forms only
2454 // for equality checks (as those don't depend on the sign). On PPC64,
2455 // we are restricted to equality for unsigned 64-bit comparisons and for
2456 // signed 32-bit comparisons the applicability is more restricted.
2457 bool isPPC64 = Subtarget.isPPC64();
2458 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2459 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2460 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2461
2462 // Look through copies unless that gets us to a physical register.
2463 Register ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI);
2464 if (ActualSrc.isVirtual())
2465 SrcReg = ActualSrc;
2466
2467 // Get the unique definition of SrcReg.
2468 MachineInstr *MI = MRI->getUniqueVRegDef(Reg: SrcReg);
2469 if (!MI) return false;
2470
2471 bool equalityOnly = false;
2472 bool noSub = false;
2473 if (isPPC64) {
2474 if (is32BitSignedCompare) {
2475 // We can perform this optimization only if SrcReg is sign-extending.
2476 if (isSignExtended(Reg: SrcReg, MRI))
2477 noSub = true;
2478 else
2479 return false;
2480 } else if (is32BitUnsignedCompare) {
2481 // We can perform this optimization, equality only, if SrcReg is
2482 // zero-extending.
2483 if (isZeroExtended(Reg: SrcReg, MRI)) {
2484 noSub = true;
2485 equalityOnly = true;
2486 } else
2487 return false;
2488 } else
2489 equalityOnly = is64BitUnsignedCompare;
2490 } else
2491 equalityOnly = is32BitUnsignedCompare;
2492
2493 if (equalityOnly) {
2494 // We need to check the uses of the condition register in order to reject
2495 // non-equality comparisons.
2496 for (MachineRegisterInfo::use_instr_iterator
2497 I = MRI->use_instr_begin(RegNo: CRReg), IE = MRI->use_instr_end();
2498 I != IE; ++I) {
2499 MachineInstr *UseMI = &*I;
2500 if (UseMI->getOpcode() == PPC::BCC) {
2501 PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(i: 0).getImm();
2502 unsigned PredCond = PPC::getPredicateCondition(Opcode: Pred);
2503 // We ignore hint bits when checking for non-equality comparisons.
2504 if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE)
2505 return false;
2506 } else if (UseMI->getOpcode() == PPC::ISEL ||
2507 UseMI->getOpcode() == PPC::ISEL8) {
2508 unsigned SubIdx = UseMI->getOperand(i: 3).getSubReg();
2509 if (SubIdx != PPC::sub_eq)
2510 return false;
2511 } else
2512 return false;
2513 }
2514 }
2515
2516 MachineBasicBlock::iterator I = CmpInstr;
2517
2518 // Scan forward to find the first use of the compare.
2519 for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL;
2520 ++I) {
2521 bool FoundUse = false;
2522 for (MachineRegisterInfo::use_instr_iterator
2523 J = MRI->use_instr_begin(RegNo: CRReg), JE = MRI->use_instr_end();
2524 J != JE; ++J)
2525 if (&*J == &*I) {
2526 FoundUse = true;
2527 break;
2528 }
2529
2530 if (FoundUse)
2531 break;
2532 }
2533
2534 SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate;
2535 SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate;
2536
2537 // There are two possible candidates which can be changed to set CR[01].
2538 // One is MI, the other is a SUB instruction.
2539 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
2540 MachineInstr *Sub = nullptr;
2541 if (SrcReg2 != 0)
2542 // MI is not a candidate for CMPrr.
2543 MI = nullptr;
2544 // FIXME: Conservatively refuse to convert an instruction which isn't in the
2545 // same BB as the comparison. This is to allow the check below to avoid calls
2546 // (and other explicit clobbers); instead we should really check for these
2547 // more explicitly (in at least a few predecessors).
2548 else if (MI->getParent() != CmpInstr.getParent())
2549 return false;
2550 else if (Value != 0) {
2551 // The record-form instructions set CR bit based on signed comparison
2552 // against 0. We try to convert a compare against 1 or -1 into a compare
2553 // against 0 to exploit record-form instructions. For example, we change
2554 // the condition "greater than -1" into "greater than or equal to 0"
2555 // and "less than 1" into "less than or equal to 0".
2556
2557 // Since we optimize comparison based on a specific branch condition,
2558 // we don't optimize if condition code is used by more than once.
2559 if (equalityOnly || !MRI->hasOneUse(RegNo: CRReg))
2560 return false;
2561
2562 MachineInstr *UseMI = &*MRI->use_instr_begin(RegNo: CRReg);
2563 if (UseMI->getOpcode() != PPC::BCC)
2564 return false;
2565
2566 PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(i: 0).getImm();
2567 unsigned PredCond = PPC::getPredicateCondition(Opcode: Pred);
2568 unsigned PredHint = PPC::getPredicateHint(Opcode: Pred);
2569 int16_t Immed = (int16_t)Value;
2570
2571 // When modifying the condition in the predicate, we propagate hint bits
2572 // from the original predicate to the new one.
2573 if (Immed == -1 && PredCond == PPC::PRED_GT)
2574 // We convert "greater than -1" into "greater than or equal to 0",
2575 // since we are assuming signed comparison by !equalityOnly
2576 Pred = PPC::getPredicate(Condition: PPC::PRED_GE, Hint: PredHint);
2577 else if (Immed == -1 && PredCond == PPC::PRED_LE)
2578 // We convert "less than or equal to -1" into "less than 0".
2579 Pred = PPC::getPredicate(Condition: PPC::PRED_LT, Hint: PredHint);
2580 else if (Immed == 1 && PredCond == PPC::PRED_LT)
2581 // We convert "less than 1" into "less than or equal to 0".
2582 Pred = PPC::getPredicate(Condition: PPC::PRED_LE, Hint: PredHint);
2583 else if (Immed == 1 && PredCond == PPC::PRED_GE)
2584 // We convert "greater than or equal to 1" into "greater than 0".
2585 Pred = PPC::getPredicate(Condition: PPC::PRED_GT, Hint: PredHint);
2586 else
2587 return false;
2588
2589 // Convert the comparison and its user to a compare against zero with the
2590 // appropriate predicate on the branch. Zero comparison might provide
2591 // optimization opportunities post-RA (see optimization in
2592 // PPCPreEmitPeephole.cpp).
2593 UseMI->getOperand(i: 0).setImm(Pred);
2594 CmpInstr.getOperand(i: 2).setImm(0);
2595 }
2596
2597 // Search for Sub.
2598 --I;
2599
2600 // Get ready to iterate backward from CmpInstr.
2601 MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin();
2602
2603 for (; I != E && !noSub; --I) {
2604 const MachineInstr &Instr = *I;
2605 unsigned IOpC = Instr.getOpcode();
2606
2607 if (&*I != &CmpInstr && (Instr.modifiesRegister(Reg: PPC::CR0, TRI) ||
2608 Instr.readsRegister(Reg: PPC::CR0, TRI)))
2609 // This instruction modifies or uses the record condition register after
2610 // the one we want to change. While we could do this transformation, it
2611 // would likely not be profitable. This transformation removes one
2612 // instruction, and so even forcing RA to generate one move probably
2613 // makes it unprofitable.
2614 return false;
2615
2616 // Check whether CmpInstr can be made redundant by the current instruction.
2617 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2618 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2619 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2620 ((Instr.getOperand(i: 1).getReg() == SrcReg &&
2621 Instr.getOperand(i: 2).getReg() == SrcReg2) ||
2622 (Instr.getOperand(i: 1).getReg() == SrcReg2 &&
2623 Instr.getOperand(i: 2).getReg() == SrcReg))) {
2624 Sub = &*I;
2625 break;
2626 }
2627
2628 if (I == B)
2629 // The 'and' is below the comparison instruction.
2630 return false;
2631 }
2632
2633 // Return false if no candidates exist.
2634 if (!MI && !Sub)
2635 return false;
2636
2637 // The single candidate is called MI.
2638 if (!MI) MI = Sub;
2639
2640 int NewOpC = -1;
2641 int MIOpC = MI->getOpcode();
2642 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2643 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2644 NewOpC = MIOpC;
2645 else {
2646 NewOpC = PPC::getRecordFormOpcode(Opcode: MIOpC);
2647 if (NewOpC == -1 && PPC::getNonRecordFormOpcode(Opcode: MIOpC) != -1)
2648 NewOpC = MIOpC;
2649 }
2650
2651 // FIXME: On the non-embedded POWER architectures, only some of the record
2652 // forms are fast, and we should use only the fast ones.
2653
2654 // The defining instruction has a record form (or is already a record
2655 // form). It is possible, however, that we'll need to reverse the condition
2656 // code of the users.
2657 if (NewOpC == -1)
2658 return false;
2659
2660 // This transformation should not be performed if `nsw` is missing and is not
2661 // `equalityOnly` comparison. Since if there is overflow, sub_lt, sub_gt in
2662 // CRReg do not reflect correct order. If `equalityOnly` is true, sub_eq in
2663 // CRReg can reflect if compared values are equal, this optz is still valid.
2664 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2665 Sub && !Sub->getFlag(Flag: MachineInstr::NoSWrap))
2666 return false;
2667
2668 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP
2669 // needs to be updated to be based on SUB. Push the condition code
2670 // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the
2671 // condition code of these operands will be modified.
2672 // Here, Value == 0 means we haven't converted comparison against 1 or -1 to
2673 // comparison against 0, which may modify predicate.
2674 bool ShouldSwap = false;
2675 if (Sub && Value == 0) {
2676 ShouldSwap = SrcReg2 != 0 && Sub->getOperand(i: 1).getReg() == SrcReg2 &&
2677 Sub->getOperand(i: 2).getReg() == SrcReg;
2678
2679 // The operands to subf are the opposite of sub, so only in the fixed-point
2680 // case, invert the order.
2681 ShouldSwap = !ShouldSwap;
2682 }
2683
2684 if (ShouldSwap)
2685 for (MachineRegisterInfo::use_instr_iterator
2686 I = MRI->use_instr_begin(RegNo: CRReg), IE = MRI->use_instr_end();
2687 I != IE; ++I) {
2688 MachineInstr *UseMI = &*I;
2689 if (UseMI->getOpcode() == PPC::BCC) {
2690 PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(i: 0).getImm();
2691 unsigned PredCond = PPC::getPredicateCondition(Opcode: Pred);
2692 assert((!equalityOnly ||
2693 PredCond == PPC::PRED_EQ || PredCond == PPC::PRED_NE) &&
2694 "Invalid predicate for equality-only optimization");
2695 (void)PredCond; // To suppress warning in release build.
2696 PredsToUpdate.push_back(Elt: std::make_pair(x: &(UseMI->getOperand(i: 0)),
2697 y: PPC::getSwappedPredicate(Opcode: Pred)));
2698 } else if (UseMI->getOpcode() == PPC::ISEL ||
2699 UseMI->getOpcode() == PPC::ISEL8) {
2700 unsigned NewSubReg = UseMI->getOperand(i: 3).getSubReg();
2701 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2702 "Invalid CR bit for equality-only optimization");
2703
2704 if (NewSubReg == PPC::sub_lt)
2705 NewSubReg = PPC::sub_gt;
2706 else if (NewSubReg == PPC::sub_gt)
2707 NewSubReg = PPC::sub_lt;
2708
2709 SubRegsToUpdate.push_back(Elt: std::make_pair(x: &(UseMI->getOperand(i: 3)),
2710 y&: NewSubReg));
2711 } else // We need to abort on a user we don't understand.
2712 return false;
2713 }
2714 assert(!(Value != 0 && ShouldSwap) &&
2715 "Non-zero immediate support and ShouldSwap"
2716 "may conflict in updating predicate");
2717
2718 // Create a new virtual register to hold the value of the CR set by the
2719 // record-form instruction. If the instruction was not previously in
2720 // record form, then set the kill flag on the CR.
2721 CmpInstr.eraseFromParent();
2722
2723 MachineBasicBlock::iterator MII = MI;
2724 BuildMI(BB&: *MI->getParent(), I: std::next(x: MII), MIMD: MI->getDebugLoc(),
2725 MCID: get(Opcode: TargetOpcode::COPY), DestReg: CRReg)
2726 .addReg(RegNo: PPC::CR0, Flags: getKillRegState(B: MIOpC != NewOpC));
2727
2728 // Even if CR0 register were dead before, it is alive now since the
2729 // instruction we just built uses it.
2730 MI->clearRegisterDeads(Reg: PPC::CR0);
2731
2732 if (MIOpC != NewOpC) {
2733 // We need to be careful here: we're replacing one instruction with
2734 // another, and we need to make sure that we get all of the right
2735 // implicit uses and defs. On the other hand, the caller may be holding
2736 // an iterator to this instruction, and so we can't delete it (this is
2737 // specifically the case if this is the instruction directly after the
2738 // compare).
2739
2740 // Rotates are expensive instructions. If we're emitting a record-form
2741 // rotate that can just be an andi/andis, we should just emit that.
2742 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2743 Register GPRRes = MI->getOperand(i: 0).getReg();
2744 int64_t SH = MI->getOperand(i: 2).getImm();
2745 int64_t MB = MI->getOperand(i: 3).getImm();
2746 int64_t ME = MI->getOperand(i: 4).getImm();
2747 // We can only do this if both the start and end of the mask are in the
2748 // same halfword.
2749 bool MBInLoHWord = MB >= 16;
2750 bool MEInLoHWord = ME >= 16;
2751 uint64_t Mask = ~0LLU;
2752
2753 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2754 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2755 // The mask value needs to shift right 16 if we're emitting andis.
2756 Mask >>= MBInLoHWord ? 0 : 16;
2757 NewOpC = MIOpC == PPC::RLWINM
2758 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2759 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2760 } else if (MRI->use_empty(RegNo: GPRRes) && (ME == 31) &&
2761 (ME - MB + 1 == SH) && (MB >= 16)) {
2762 // If we are rotating by the exact number of bits as are in the mask
2763 // and the mask is in the least significant bits of the register,
2764 // that's just an andis. (as long as the GPR result has no uses).
2765 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2766 Mask >>= 16;
2767 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2768 }
2769 // If we've set the mask, we can transform.
2770 if (Mask != ~0LLU) {
2771 MI->removeOperand(OpNo: 4);
2772 MI->removeOperand(OpNo: 3);
2773 MI->getOperand(i: 2).setImm(Mask);
2774 NumRcRotatesConvertedToRcAnd++;
2775 }
2776 } else if (MIOpC == PPC::RLDICL && MI->getOperand(i: 2).getImm() == 0) {
2777 int64_t MB = MI->getOperand(i: 3).getImm();
2778 if (MB >= 48) {
2779 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2780 NewOpC = PPC::ANDI8_rec;
2781 MI->removeOperand(OpNo: 3);
2782 MI->getOperand(i: 2).setImm(Mask);
2783 NumRcRotatesConvertedToRcAnd++;
2784 }
2785 }
2786
2787 const MCInstrDesc &NewDesc = get(Opcode: NewOpC);
2788 MI->setDesc(NewDesc);
2789
2790 for (MCPhysReg ImpDef : NewDesc.implicit_defs()) {
2791 if (!MI->definesRegister(Reg: ImpDef, /*TRI=*/nullptr)) {
2792 MI->addOperand(MF&: *MI->getParent()->getParent(),
2793 Op: MachineOperand::CreateReg(Reg: ImpDef, isDef: true, isImp: true));
2794 }
2795 }
2796 for (MCPhysReg ImpUse : NewDesc.implicit_uses()) {
2797 if (!MI->readsRegister(Reg: ImpUse, /*TRI=*/nullptr)) {
2798 MI->addOperand(MF&: *MI->getParent()->getParent(),
2799 Op: MachineOperand::CreateReg(Reg: ImpUse, isDef: false, isImp: true));
2800 }
2801 }
2802 }
2803 assert(MI->definesRegister(PPC::CR0, /*TRI=*/nullptr) &&
2804 "Record-form instruction does not define cr0?");
2805
2806 // Modify the condition code of operands in OperandsToUpdate.
2807 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
2808 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
2809 for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++)
2810 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2811
2812 for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++)
2813 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2814
2815 return true;
2816}
2817
2818bool PPCInstrInfo::optimizeCmpPostRA(MachineInstr &CmpMI) const {
2819 MachineRegisterInfo *MRI = &CmpMI.getParent()->getParent()->getRegInfo();
2820 if (MRI->isSSA())
2821 return false;
2822
2823 Register SrcReg, SrcReg2;
2824 int64_t CmpMask, CmpValue;
2825 if (!analyzeCompare(MI: CmpMI, SrcReg, SrcReg2, Mask&: CmpMask, Value&: CmpValue))
2826 return false;
2827
2828 // Try to optimize the comparison against 0.
2829 if (CmpValue || !CmpMask || SrcReg2)
2830 return false;
2831
2832 // The record forms set the condition register based on a signed comparison
2833 // with zero (see comments in optimizeCompareInstr). Since we can't do the
2834 // equality checks in post-RA, we are more restricted on a unsigned
2835 // comparison.
2836 unsigned Opc = CmpMI.getOpcode();
2837 if (Opc == PPC::CMPLWI || Opc == PPC::CMPLDI)
2838 return false;
2839
2840 // The record forms are always based on a 64-bit comparison on PPC64
2841 // (similary, a 32-bit comparison on PPC32), while the CMPWI is a 32-bit
2842 // comparison. Since we can't do the equality checks in post-RA, we bail out
2843 // the case.
2844 if (Subtarget.isPPC64() && Opc == PPC::CMPWI)
2845 return false;
2846
2847 // CmpMI can't be deleted if it has implicit def.
2848 if (CmpMI.hasImplicitDef())
2849 return false;
2850
2851 bool SrcRegHasOtherUse = false;
2852 MachineInstr *SrcMI = getDefMIPostRA(Reg: SrcReg, MI&: CmpMI, SeenIntermediateUse&: SrcRegHasOtherUse);
2853 if (!SrcMI || !SrcMI->definesRegister(Reg: SrcReg, /*TRI=*/nullptr))
2854 return false;
2855
2856 MachineOperand RegMO = CmpMI.getOperand(i: 0);
2857 Register CRReg = RegMO.getReg();
2858 if (CRReg != PPC::CR0)
2859 return false;
2860
2861 // Make sure there is no def/use of CRReg between SrcMI and CmpMI.
2862 bool SeenUseOfCRReg = false;
2863 bool IsCRRegKilled = false;
2864 if (!isRegElgibleForForwarding(RegMO, DefMI: *SrcMI, MI: CmpMI, KillDefMI: false, IsFwdFeederRegKilled&: IsCRRegKilled,
2865 SeenIntermediateUse&: SeenUseOfCRReg) ||
2866 SrcMI->definesRegister(Reg: CRReg, /*TRI=*/nullptr) || SeenUseOfCRReg)
2867 return false;
2868
2869 int SrcMIOpc = SrcMI->getOpcode();
2870 int NewOpC = PPC::getRecordFormOpcode(Opcode: SrcMIOpc);
2871 if (NewOpC == -1)
2872 return false;
2873
2874 LLVM_DEBUG(dbgs() << "Replace Instr: ");
2875 LLVM_DEBUG(SrcMI->dump());
2876
2877 const MCInstrDesc &NewDesc = get(Opcode: NewOpC);
2878 SrcMI->setDesc(NewDesc);
2879 MachineInstrBuilder(*SrcMI->getParent()->getParent(), SrcMI)
2880 .addReg(RegNo: CRReg, Flags: RegState::ImplicitDefine);
2881 SrcMI->clearRegisterDeads(Reg: CRReg);
2882
2883 assert(SrcMI->definesRegister(PPC::CR0, /*TRI=*/nullptr) &&
2884 "Record-form instruction does not define cr0?");
2885
2886 LLVM_DEBUG(dbgs() << "with: ");
2887 LLVM_DEBUG(SrcMI->dump());
2888 LLVM_DEBUG(dbgs() << "Delete dead instruction: ");
2889 LLVM_DEBUG(CmpMI.dump());
2890 return true;
2891}
2892
2893bool PPCInstrInfo::getMemOperandsWithOffsetWidth(
2894 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
2895 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
2896 const TargetRegisterInfo *TRI) const {
2897 const MachineOperand *BaseOp;
2898 OffsetIsScalable = false;
2899 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
2900 return false;
2901 BaseOps.push_back(Elt: BaseOp);
2902 return true;
2903}
2904
2905static bool isLdStSafeToCluster(const MachineInstr &LdSt,
2906 const TargetRegisterInfo *TRI) {
2907 // If this is a volatile load/store, don't mess with it.
2908 if (LdSt.hasOrderedMemoryRef() || LdSt.getNumExplicitOperands() != 3)
2909 return false;
2910
2911 if (LdSt.getOperand(i: 2).isFI())
2912 return true;
2913
2914 assert(LdSt.getOperand(2).isReg() && "Expected a reg operand.");
2915 // Can't cluster if the instruction modifies the base register
2916 // or it is update form. e.g. ld r2,3(r2)
2917 if (LdSt.modifiesRegister(Reg: LdSt.getOperand(i: 2).getReg(), TRI))
2918 return false;
2919
2920 return true;
2921}
2922
2923// Only cluster instruction pair that have the same opcode, and they are
2924// clusterable according to PowerPC specification.
2925static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc,
2926 const PPCSubtarget &Subtarget) {
2927 switch (FirstOpc) {
2928 default:
2929 return false;
2930 case PPC::STD:
2931 case PPC::STFD:
2932 case PPC::STXSD:
2933 case PPC::DFSTOREf64:
2934 return FirstOpc == SecondOpc;
2935 // PowerPC backend has opcode STW/STW8 for instruction "stw" to deal with
2936 // 32bit and 64bit instruction selection. They are clusterable pair though
2937 // they are different opcode.
2938 case PPC::STW:
2939 case PPC::STW8:
2940 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2941 }
2942}
2943
2944bool PPCInstrInfo::shouldClusterMemOps(
2945 ArrayRef<const MachineOperand *> BaseOps1, int64_t OpOffset1,
2946 bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
2947 int64_t OpOffset2, bool OffsetIsScalable2, unsigned ClusterSize,
2948 unsigned NumBytes) const {
2949
2950 assert(BaseOps1.size() == 1 && BaseOps2.size() == 1);
2951 const MachineOperand &BaseOp1 = *BaseOps1.front();
2952 const MachineOperand &BaseOp2 = *BaseOps2.front();
2953 assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
2954 "Only base registers and frame indices are supported.");
2955
2956 // ClusterSize means the number of memory operations that will have been
2957 // clustered if this hook returns true.
2958 // Don't cluster memory op if there are already two ops clustered at least.
2959 if (ClusterSize > 2)
2960 return false;
2961
2962 // Cluster the load/store only when they have the same base
2963 // register or FI.
2964 if ((BaseOp1.isReg() != BaseOp2.isReg()) ||
2965 (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg()) ||
2966 (BaseOp1.isFI() && BaseOp1.getIndex() != BaseOp2.getIndex()))
2967 return false;
2968
2969 // Check if the load/store are clusterable according to the PowerPC
2970 // specification.
2971 const MachineInstr &FirstLdSt = *BaseOp1.getParent();
2972 const MachineInstr &SecondLdSt = *BaseOp2.getParent();
2973 unsigned FirstOpc = FirstLdSt.getOpcode();
2974 unsigned SecondOpc = SecondLdSt.getOpcode();
2975 const TargetRegisterInfo *TRI = &getRegisterInfo();
2976 // Cluster the load/store only when they have the same opcode, and they are
2977 // clusterable opcode according to PowerPC specification.
2978 if (!isClusterableLdStOpcPair(FirstOpc, SecondOpc, Subtarget))
2979 return false;
2980
2981 // Can't cluster load/store that have ordered or volatile memory reference.
2982 if (!isLdStSafeToCluster(LdSt: FirstLdSt, TRI) ||
2983 !isLdStSafeToCluster(LdSt: SecondLdSt, TRI))
2984 return false;
2985
2986 int64_t Offset1 = 0, Offset2 = 0;
2987 LocationSize Width1 = LocationSize::precise(Value: 0),
2988 Width2 = LocationSize::precise(Value: 0);
2989 const MachineOperand *Base1 = nullptr, *Base2 = nullptr;
2990 if (!getMemOperandWithOffsetWidth(LdSt: FirstLdSt, BaseOp&: Base1, Offset&: Offset1, Width&: Width1, TRI) ||
2991 !getMemOperandWithOffsetWidth(LdSt: SecondLdSt, BaseOp&: Base2, Offset&: Offset2, Width&: Width2, TRI) ||
2992 Width1 != Width2)
2993 return false;
2994
2995 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2996 "getMemOperandWithOffsetWidth return incorrect base op");
2997 // The caller should already have ordered FirstMemOp/SecondMemOp by offset.
2998 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
2999 return Offset1 + (int64_t)Width1.getValue() == Offset2;
3000}
3001
3002/// GetInstSize - Return the number of bytes of code the specified
3003/// instruction may be. This returns the maximum number of bytes.
3004///
3005unsigned PPCInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3006 unsigned Opcode = MI.getOpcode();
3007
3008 switch (Opcode) {
3009 case PPC::INLINEASM:
3010 case PPC::INLINEASM_BR: {
3011 const MachineFunction *MF = MI.getParent()->getParent();
3012 const char *AsmStr = MI.getOperand(i: 0).getSymbolName();
3013 return getInlineAsmLength(Str: AsmStr, MAI: *MF->getTarget().getMCAsmInfo());
3014 }
3015 case TargetOpcode::STACKMAP: {
3016 StackMapOpers Opers(&MI);
3017 return Opers.getNumPatchBytes();
3018 }
3019 case TargetOpcode::PATCHPOINT: {
3020 PatchPointOpers Opers(&MI);
3021 return Opers.getNumPatchBytes();
3022 }
3023 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: {
3024 const MachineFunction *MF = MI.getParent()->getParent();
3025 const Function &F = MF->getFunction();
3026 unsigned Num = 0;
3027 (void)F.getFnAttribute(Kind: "patchable-function-entry")
3028 .getValueAsString()
3029 .getAsInteger(Radix: 10, Result&: Num);
3030 return Num * 4;
3031 }
3032 default:
3033 return get(Opcode).getSize();
3034 }
3035}
3036
3037std::pair<unsigned, unsigned>
3038PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
3039 // PPC always uses a direct mask.
3040 return std::make_pair(x&: TF, y: 0u);
3041}
3042
3043ArrayRef<std::pair<unsigned, const char *>>
3044PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
3045 using namespace PPCII;
3046 static const std::pair<unsigned, const char *> TargetFlags[] = {
3047 {MO_PLT, "ppc-plt"},
3048 {MO_PIC_FLAG, "ppc-pic"},
3049 {MO_PCREL_FLAG, "ppc-pcrel"},
3050 {MO_GOT_FLAG, "ppc-got"},
3051 {MO_PCREL_OPT_FLAG, "ppc-opt-pcrel"},
3052 {MO_TLSGD_FLAG, "ppc-tlsgd"},
3053 {MO_TPREL_FLAG, "ppc-tprel"},
3054 {MO_TLSLDM_FLAG, "ppc-tlsldm"},
3055 {MO_TLSLD_FLAG, "ppc-tlsld"},
3056 {MO_TLSGDM_FLAG, "ppc-tlsgdm"},
3057 {MO_GOT_TLSGD_PCREL_FLAG, "ppc-got-tlsgd-pcrel"},
3058 {MO_GOT_TLSLD_PCREL_FLAG, "ppc-got-tlsld-pcrel"},
3059 {MO_GOT_TPREL_PCREL_FLAG, "ppc-got-tprel-pcrel"},
3060 {MO_LO, "ppc-lo"},
3061 {MO_HA, "ppc-ha"},
3062 {MO_TPREL_LO, "ppc-tprel-lo"},
3063 {MO_TPREL_HA, "ppc-tprel-ha"},
3064 {MO_DTPREL_LO, "ppc-dtprel-lo"},
3065 {MO_TLSLD_LO, "ppc-tlsld-lo"},
3066 {MO_TOC_LO, "ppc-toc-lo"},
3067 {MO_TLS, "ppc-tls"},
3068 {MO_PIC_HA_FLAG, "ppc-ha-pic"},
3069 {MO_PIC_LO_FLAG, "ppc-lo-pic"},
3070 {MO_TPREL_PCREL_FLAG, "ppc-tprel-pcrel"},
3071 {MO_TLS_PCREL_FLAG, "ppc-tls-pcrel"},
3072 {MO_GOT_PCREL_FLAG, "ppc-got-pcrel"},
3073 };
3074 return ArrayRef(TargetFlags);
3075}
3076
3077// Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction.
3078// The VSX versions have the advantage of a full 64-register target whereas
3079// the FP ones have the advantage of lower latency and higher throughput. So
3080// what we are after is using the faster instructions in low register pressure
3081// situations and using the larger register file in high register pressure
3082// situations.
3083bool PPCInstrInfo::expandVSXMemPseudo(MachineInstr &MI) const {
3084 unsigned UpperOpcode, LowerOpcode;
3085 switch (MI.getOpcode()) {
3086 case PPC::DFLOADf32:
3087 UpperOpcode = PPC::LXSSP;
3088 LowerOpcode = PPC::LFS;
3089 break;
3090 case PPC::DFLOADf64:
3091 UpperOpcode = PPC::LXSD;
3092 LowerOpcode = PPC::LFD;
3093 break;
3094 case PPC::DFSTOREf32:
3095 UpperOpcode = PPC::STXSSP;
3096 LowerOpcode = PPC::STFS;
3097 break;
3098 case PPC::DFSTOREf64:
3099 UpperOpcode = PPC::STXSD;
3100 LowerOpcode = PPC::STFD;
3101 break;
3102 case PPC::XFLOADf32:
3103 UpperOpcode = PPC::LXSSPX;
3104 LowerOpcode = PPC::LFSX;
3105 break;
3106 case PPC::XFLOADf64:
3107 UpperOpcode = PPC::LXSDX;
3108 LowerOpcode = PPC::LFDX;
3109 break;
3110 case PPC::XFSTOREf32:
3111 UpperOpcode = PPC::STXSSPX;
3112 LowerOpcode = PPC::STFSX;
3113 break;
3114 case PPC::XFSTOREf64:
3115 UpperOpcode = PPC::STXSDX;
3116 LowerOpcode = PPC::STFDX;
3117 break;
3118 case PPC::LIWAX:
3119 UpperOpcode = PPC::LXSIWAX;
3120 LowerOpcode = PPC::LFIWAX;
3121 break;
3122 case PPC::LIWZX:
3123 UpperOpcode = PPC::LXSIWZX;
3124 LowerOpcode = PPC::LFIWZX;
3125 break;
3126 case PPC::STIWX:
3127 UpperOpcode = PPC::STXSIWX;
3128 LowerOpcode = PPC::STFIWX;
3129 break;
3130 default:
3131 llvm_unreachable("Unknown Operation!");
3132 }
3133
3134 Register TargetReg = MI.getOperand(i: 0).getReg();
3135 unsigned Opcode;
3136 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3137 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3138 Opcode = LowerOpcode;
3139 else
3140 Opcode = UpperOpcode;
3141 MI.setDesc(get(Opcode));
3142 return true;
3143}
3144
3145static bool isAnImmediateOperand(const MachineOperand &MO) {
3146 return MO.isCPI() || MO.isGlobal() || MO.isImm();
3147}
3148
3149bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
3150 auto &MBB = *MI.getParent();
3151 auto DL = MI.getDebugLoc();
3152
3153 switch (MI.getOpcode()) {
3154 case PPC::BUILD_UACC: {
3155 MCRegister ACC = MI.getOperand(i: 0).getReg();
3156 MCRegister UACC = MI.getOperand(i: 1).getReg();
3157 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3158 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3159 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3160 // FIXME: This can easily be improved to look up to the top of the MBB
3161 // to see if the inputs are XXLOR's. If they are and SrcReg is killed,
3162 // we can just re-target any such XXLOR's to DstVSR + offset.
3163 for (int VecNo = 0; VecNo < 4; VecNo++)
3164 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: get(Opcode: PPC::XXLOR), DestReg: DstVSR + VecNo)
3165 .addReg(RegNo: SrcVSR + VecNo)
3166 .addReg(RegNo: SrcVSR + VecNo);
3167 }
3168 // BUILD_UACC is expanded to 4 copies of the underlying vsx registers.
3169 // So after building the 4 copies, we can replace the BUILD_UACC instruction
3170 // with a NOP.
3171 [[fallthrough]];
3172 }
3173 case PPC::KILL_PAIR: {
3174 MI.setDesc(get(Opcode: PPC::UNENCODED_NOP));
3175 MI.removeOperand(OpNo: 1);
3176 MI.removeOperand(OpNo: 0);
3177 return true;
3178 }
3179 case TargetOpcode::LOAD_STACK_GUARD: {
3180 auto M = MBB.getParent()->getFunction().getParent();
3181 assert(
3182 (Subtarget.isTargetLinux() || M->getStackProtectorGuard() == "tls") &&
3183 "Only Linux target or tls mode are expected to contain "
3184 "LOAD_STACK_GUARD");
3185 int64_t Offset;
3186 if (M->getStackProtectorGuard() == "tls")
3187 Offset = M->getStackProtectorGuardOffset();
3188 else
3189 Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
3190 const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
3191 MI.setDesc(get(Opcode: Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
3192 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
3193 .addImm(Val: Offset)
3194 .addReg(RegNo: Reg);
3195 return true;
3196 }
3197 case PPC::PPCLdFixedAddr: {
3198 assert((Subtarget.getTargetTriple().isOSGlibc() ||
3199 Subtarget.getTargetTriple().isMusl()) &&
3200 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3201 int64_t Offset = 0;
3202 const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
3203 MI.setDesc(get(Opcode: PPC::LWZ));
3204 uint64_t FAType = MI.getOperand(i: 1).getImm();
3205#undef PPC_LNX_FEATURE
3206#undef PPC_CPU
3207#define PPC_LNX_DEFINE_OFFSETS
3208#include "llvm/TargetParser/PPCTargetParser.def"
3209 bool IsLE = Subtarget.isLittleEndian();
3210 bool Is64 = Subtarget.isPPC64();
3211 if (FAType == PPC_FAWORD_HWCAP) {
3212 if (IsLE)
3213 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3214 else
3215 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3216 } else if (FAType == PPC_FAWORD_HWCAP2) {
3217 if (IsLE)
3218 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3219 else
3220 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3221 } else if (FAType == PPC_FAWORD_CPUID) {
3222 if (IsLE)
3223 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3224 else
3225 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3226 }
3227 assert(Offset && "Do not know the offset for this fixed addr load");
3228 MI.removeOperand(OpNo: 1);
3229 Subtarget.getTargetMachine().setGlibcHWCAPAccess();
3230 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
3231 .addImm(Val: Offset)
3232 .addReg(RegNo: Reg);
3233 return true;
3234#define PPC_TGT_PARSER_UNDEF_MACROS
3235#include "llvm/TargetParser/PPCTargetParser.def"
3236#undef PPC_TGT_PARSER_UNDEF_MACROS
3237 }
3238 case PPC::DFLOADf32:
3239 case PPC::DFLOADf64:
3240 case PPC::DFSTOREf32:
3241 case PPC::DFSTOREf64: {
3242 assert(Subtarget.hasP9Vector() &&
3243 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3244 assert(MI.getOperand(2).isReg() &&
3245 isAnImmediateOperand(MI.getOperand(1)) &&
3246 "D-form op must have register and immediate operands");
3247 return expandVSXMemPseudo(MI);
3248 }
3249 case PPC::XFLOADf32:
3250 case PPC::XFSTOREf32:
3251 case PPC::LIWAX:
3252 case PPC::LIWZX:
3253 case PPC::STIWX: {
3254 assert(Subtarget.hasP8Vector() &&
3255 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3256 assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() &&
3257 "X-form op must have register and register operands");
3258 return expandVSXMemPseudo(MI);
3259 }
3260 case PPC::XFLOADf64:
3261 case PPC::XFSTOREf64: {
3262 assert(Subtarget.hasVSX() &&
3263 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3264 assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() &&
3265 "X-form op must have register and register operands");
3266 return expandVSXMemPseudo(MI);
3267 }
3268 case PPC::SPILLTOVSR_LD: {
3269 Register TargetReg = MI.getOperand(i: 0).getReg();
3270 if (PPC::VSFRCRegClass.contains(Reg: TargetReg)) {
3271 MI.setDesc(get(Opcode: PPC::DFLOADf64));
3272 return expandPostRAPseudo(MI);
3273 }
3274 else
3275 MI.setDesc(get(Opcode: PPC::LD));
3276 return true;
3277 }
3278 case PPC::SPILLTOVSR_ST: {
3279 Register SrcReg = MI.getOperand(i: 0).getReg();
3280 if (PPC::VSFRCRegClass.contains(Reg: SrcReg)) {
3281 NumStoreSPILLVSRRCAsVec++;
3282 MI.setDesc(get(Opcode: PPC::DFSTOREf64));
3283 return expandPostRAPseudo(MI);
3284 } else {
3285 NumStoreSPILLVSRRCAsGpr++;
3286 MI.setDesc(get(Opcode: PPC::STD));
3287 }
3288 return true;
3289 }
3290 case PPC::SPILLTOVSR_LDX: {
3291 Register TargetReg = MI.getOperand(i: 0).getReg();
3292 if (PPC::VSFRCRegClass.contains(Reg: TargetReg))
3293 MI.setDesc(get(Opcode: PPC::LXSDX));
3294 else
3295 MI.setDesc(get(Opcode: PPC::LDX));
3296 return true;
3297 }
3298 case PPC::SPILLTOVSR_STX: {
3299 Register SrcReg = MI.getOperand(i: 0).getReg();
3300 if (PPC::VSFRCRegClass.contains(Reg: SrcReg)) {
3301 NumStoreSPILLVSRRCAsVec++;
3302 MI.setDesc(get(Opcode: PPC::STXSDX));
3303 } else {
3304 NumStoreSPILLVSRRCAsGpr++;
3305 MI.setDesc(get(Opcode: PPC::STDX));
3306 }
3307 return true;
3308 }
3309
3310 // FIXME: Maybe we can expand it in 'PowerPC Expand Atomic' pass.
3311 case PPC::CFENCE:
3312 case PPC::CFENCE8: {
3313 auto Val = MI.getOperand(i: 0).getReg();
3314 unsigned CmpOp = Subtarget.isPPC64() ? PPC::CMPD : PPC::CMPW;
3315 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: get(Opcode: CmpOp), DestReg: PPC::CR7).addReg(RegNo: Val).addReg(RegNo: Val);
3316 BuildMI(BB&: MBB, I&: MI, MIMD: DL, MCID: get(Opcode: PPC::CTRL_DEP))
3317 .addImm(Val: PPC::PRED_NE_MINUS)
3318 .addReg(RegNo: PPC::CR7)
3319 .addImm(Val: 1);
3320 MI.setDesc(get(Opcode: PPC::ISYNC));
3321 MI.removeOperand(OpNo: 0);
3322 return true;
3323 }
3324 }
3325 return false;
3326}
3327
3328// Essentially a compile-time implementation of a compare->isel sequence.
3329// It takes two constants to compare, along with the true/false registers
3330// and the comparison type (as a subreg to a CR field) and returns one
3331// of the true/false registers, depending on the comparison results.
3332static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc,
3333 unsigned TrueReg, unsigned FalseReg,
3334 unsigned CRSubReg) {
3335 // Signed comparisons. The immediates are assumed to be sign-extended.
3336 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3337 switch (CRSubReg) {
3338 default: llvm_unreachable("Unknown integer comparison type.");
3339 case PPC::sub_lt:
3340 return Imm1 < Imm2 ? TrueReg : FalseReg;
3341 case PPC::sub_gt:
3342 return Imm1 > Imm2 ? TrueReg : FalseReg;
3343 case PPC::sub_eq:
3344 return Imm1 == Imm2 ? TrueReg : FalseReg;
3345 }
3346 }
3347 // Unsigned comparisons.
3348 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3349 switch (CRSubReg) {
3350 default: llvm_unreachable("Unknown integer comparison type.");
3351 case PPC::sub_lt:
3352 return (uint64_t)Imm1 < (uint64_t)Imm2 ? TrueReg : FalseReg;
3353 case PPC::sub_gt:
3354 return (uint64_t)Imm1 > (uint64_t)Imm2 ? TrueReg : FalseReg;
3355 case PPC::sub_eq:
3356 return Imm1 == Imm2 ? TrueReg : FalseReg;
3357 }
3358 }
3359 return PPC::NoRegister;
3360}
3361
3362void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI,
3363 unsigned OpNo,
3364 int64_t Imm) const {
3365 assert(MI.getOperand(OpNo).isReg() && "Operand must be a REG");
3366 // Replace the REG with the Immediate.
3367 Register InUseReg = MI.getOperand(i: OpNo).getReg();
3368 MI.getOperand(i: OpNo).ChangeToImmediate(ImmVal: Imm);
3369
3370 // We need to make sure that the MI didn't have any implicit use
3371 // of this REG any more. We don't call MI.implicit_operands().empty() to
3372 // return early, since MI's MCID might be changed in calling context, as a
3373 // result its number of explicit operands may be changed, thus the begin of
3374 // implicit operand is changed.
3375 const TargetRegisterInfo *TRI = &getRegisterInfo();
3376 int UseOpIdx = MI.findRegisterUseOperandIdx(Reg: InUseReg, TRI, isKill: false);
3377 if (UseOpIdx >= 0) {
3378 MachineOperand &MO = MI.getOperand(i: UseOpIdx);
3379 if (MO.isImplicit())
3380 // The operands must always be in the following order:
3381 // - explicit reg defs,
3382 // - other explicit operands (reg uses, immediates, etc.),
3383 // - implicit reg defs
3384 // - implicit reg uses
3385 // Therefore, removing the implicit operand won't change the explicit
3386 // operands layout.
3387 MI.removeOperand(OpNo: UseOpIdx);
3388 }
3389}
3390
3391// Replace an instruction with one that materializes a constant (and sets
3392// CR0 if the original instruction was a record-form instruction).
3393void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI,
3394 const LoadImmediateInfo &LII) const {
3395 // Remove existing operands.
3396 int OperandToKeep = LII.SetCR ? 1 : 0;
3397 for (int i = MI.getNumOperands() - 1; i > OperandToKeep; i--)
3398 MI.removeOperand(OpNo: i);
3399
3400 // Replace the instruction.
3401 if (LII.SetCR) {
3402 MI.setDesc(get(Opcode: LII.Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3403 // Set the immediate.
3404 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
3405 .addImm(Val: LII.Imm).addReg(RegNo: PPC::CR0, Flags: RegState::ImplicitDefine);
3406 return;
3407 }
3408 else
3409 MI.setDesc(get(Opcode: LII.Is64Bit ? PPC::LI8 : PPC::LI));
3410
3411 // Set the immediate.
3412 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
3413 .addImm(Val: LII.Imm);
3414}
3415
3416MachineInstr *PPCInstrInfo::getDefMIPostRA(unsigned Reg, MachineInstr &MI,
3417 bool &SeenIntermediateUse) const {
3418 assert(!MI.getParent()->getParent()->getRegInfo().isSSA() &&
3419 "Should be called after register allocation.");
3420 const TargetRegisterInfo *TRI = &getRegisterInfo();
3421 MachineBasicBlock::reverse_iterator E = MI.getParent()->rend(), It = MI;
3422 It++;
3423 SeenIntermediateUse = false;
3424 for (; It != E; ++It) {
3425 if (It->modifiesRegister(Reg, TRI))
3426 return &*It;
3427 if (It->readsRegister(Reg, TRI))
3428 SeenIntermediateUse = true;
3429 }
3430 return nullptr;
3431}
3432
3433void PPCInstrInfo::materializeImmPostRA(MachineBasicBlock &MBB,
3434 MachineBasicBlock::iterator MBBI,
3435 const DebugLoc &DL, Register Reg,
3436 int64_t Imm) const {
3437 assert(!MBB.getParent()->getRegInfo().isSSA() &&
3438 "Register should be in non-SSA form after RA");
3439 bool isPPC64 = Subtarget.isPPC64();
3440 // FIXME: Materialization here is not optimal.
3441 // For some special bit patterns we can use less instructions.
3442 // See `selectI64ImmDirect` in PPCISelDAGToDAG.cpp.
3443 if (isInt<16>(x: Imm)) {
3444 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: isPPC64 ? PPC::LI8 : PPC::LI), DestReg: Reg).addImm(Val: Imm);
3445 } else if (isInt<32>(x: Imm)) {
3446 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: isPPC64 ? PPC::LIS8 : PPC::LIS), DestReg: Reg)
3447 .addImm(Val: Imm >> 16);
3448 if (Imm & 0xFFFF)
3449 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: isPPC64 ? PPC::ORI8 : PPC::ORI), DestReg: Reg)
3450 .addReg(RegNo: Reg, Flags: RegState::Kill)
3451 .addImm(Val: Imm & 0xFFFF);
3452 } else {
3453 assert(isPPC64 && "Materializing 64-bit immediate to single register is "
3454 "only supported in PPC64");
3455 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: PPC::LIS8), DestReg: Reg).addImm(Val: Imm >> 48);
3456 if ((Imm >> 32) & 0xFFFF)
3457 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: PPC::ORI8), DestReg: Reg)
3458 .addReg(RegNo: Reg, Flags: RegState::Kill)
3459 .addImm(Val: (Imm >> 32) & 0xFFFF);
3460 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: PPC::RLDICR), DestReg: Reg)
3461 .addReg(RegNo: Reg, Flags: RegState::Kill)
3462 .addImm(Val: 32)
3463 .addImm(Val: 31);
3464 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: PPC::ORIS8), DestReg: Reg)
3465 .addReg(RegNo: Reg, Flags: RegState::Kill)
3466 .addImm(Val: (Imm >> 16) & 0xFFFF);
3467 if (Imm & 0xFFFF)
3468 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: PPC::ORI8), DestReg: Reg)
3469 .addReg(RegNo: Reg, Flags: RegState::Kill)
3470 .addImm(Val: Imm & 0xFFFF);
3471 }
3472}
3473
3474MachineInstr *PPCInstrInfo::getForwardingDefMI(
3475 MachineInstr &MI,
3476 unsigned &OpNoForForwarding,
3477 bool &SeenIntermediateUse) const {
3478 OpNoForForwarding = ~0U;
3479 MachineInstr *DefMI = nullptr;
3480 MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
3481 const TargetRegisterInfo *TRI = &getRegisterInfo();
3482 // If we're in SSA, get the defs through the MRI. Otherwise, only look
3483 // within the basic block to see if the register is defined using an
3484 // LI/LI8/ADDI/ADDI8.
3485 if (MRI->isSSA()) {
3486 for (int i = 1, e = MI.getNumOperands(); i < e; i++) {
3487 if (!MI.getOperand(i).isReg())
3488 continue;
3489 Register Reg = MI.getOperand(i).getReg();
3490 if (!Reg.isVirtual())
3491 continue;
3492 Register TrueReg = TRI->lookThruCopyLike(SrcReg: Reg, MRI);
3493 if (TrueReg.isVirtual()) {
3494 MachineInstr *DefMIForTrueReg = MRI->getVRegDef(Reg: TrueReg);
3495 if (DefMIForTrueReg->getOpcode() == PPC::LI ||
3496 DefMIForTrueReg->getOpcode() == PPC::LI8 ||
3497 DefMIForTrueReg->getOpcode() == PPC::ADDI ||
3498 DefMIForTrueReg->getOpcode() == PPC::ADDI8) {
3499 OpNoForForwarding = i;
3500 DefMI = DefMIForTrueReg;
3501 // The ADDI and LI operand maybe exist in one instruction at same
3502 // time. we prefer to fold LI operand as LI only has one Imm operand
3503 // and is more possible to be converted. So if current DefMI is
3504 // ADDI/ADDI8, we continue to find possible LI/LI8.
3505 if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI8)
3506 break;
3507 }
3508 }
3509 }
3510 } else {
3511 // Looking back through the definition for each operand could be expensive,
3512 // so exit early if this isn't an instruction that either has an immediate
3513 // form or is already an immediate form that we can handle.
3514 ImmInstrInfo III;
3515 unsigned Opc = MI.getOpcode();
3516 bool ConvertibleImmForm =
3517 Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
3518 Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
3519 Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
3520 Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
3521 Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
3522 Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
3523 Opc == PPC::RLWINM8_rec;
3524 bool IsVFReg = (MI.getNumOperands() && MI.getOperand(i: 0).isReg())
3525 ? PPC::isVFRegister(Reg: MI.getOperand(i: 0).getReg())
3526 : false;
3527 if (!ConvertibleImmForm && !instrHasImmForm(Opc, IsVFReg, III, PostRA: true))
3528 return nullptr;
3529
3530 // Don't convert or %X, %Y, %Y since that's just a register move.
3531 if ((Opc == PPC::OR || Opc == PPC::OR8) &&
3532 MI.getOperand(i: 1).getReg() == MI.getOperand(i: 2).getReg())
3533 return nullptr;
3534 for (int i = 1, e = MI.getNumOperands(); i < e; i++) {
3535 MachineOperand &MO = MI.getOperand(i);
3536 SeenIntermediateUse = false;
3537 if (MO.isReg() && MO.isUse() && !MO.isImplicit()) {
3538 Register Reg = MI.getOperand(i).getReg();
3539 // If we see another use of this reg between the def and the MI,
3540 // we want to flag it so the def isn't deleted.
3541 MachineInstr *DefMI = getDefMIPostRA(Reg, MI, SeenIntermediateUse);
3542 if (DefMI) {
3543 // Is this register defined by some form of add-immediate (including
3544 // load-immediate) within this basic block?
3545 switch (DefMI->getOpcode()) {
3546 default:
3547 break;
3548 case PPC::LI:
3549 case PPC::LI8:
3550 case PPC::ADDItocL8:
3551 case PPC::ADDI:
3552 case PPC::ADDI8:
3553 OpNoForForwarding = i;
3554 return DefMI;
3555 }
3556 }
3557 }
3558 }
3559 }
3560 return OpNoForForwarding == ~0U ? nullptr : DefMI;
3561}
3562
3563unsigned PPCInstrInfo::getSpillTarget() const {
3564 // With P10, we may need to spill paired vector registers or accumulator
3565 // registers. MMA implies paired vectors, so we can just check that.
3566 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3567 // P11 uses the P10 target.
3568 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3569 2 : Subtarget.hasP9Vector() ?
3570 1 : 0;
3571}
3572
3573ArrayRef<unsigned> PPCInstrInfo::getStoreOpcodesForSpillArray() const {
3574 return {StoreSpillOpcodesArray[getSpillTarget()], SOK_LastOpcodeSpill};
3575}
3576
3577ArrayRef<unsigned> PPCInstrInfo::getLoadOpcodesForSpillArray() const {
3578 return {LoadSpillOpcodesArray[getSpillTarget()], SOK_LastOpcodeSpill};
3579}
3580
3581// This opt tries to convert the following imm form to an index form to save an
3582// add for stack variables.
3583// Return false if no such pattern found.
3584//
3585// ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, OffsetAddi
3586// ADD instr: ToBeDeletedReg = ADD ToBeChangedReg(killed), ScaleReg
3587// Imm instr: Reg = op OffsetImm, ToBeDeletedReg(killed)
3588//
3589// can be converted to:
3590//
3591// new ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, (OffsetAddi + OffsetImm)
3592// Index instr: Reg = opx ScaleReg, ToBeChangedReg(killed)
3593//
3594// In order to eliminate ADD instr, make sure that:
3595// 1: (OffsetAddi + OffsetImm) must be int16 since this offset will be used in
3596// new ADDI instr and ADDI can only take int16 Imm.
3597// 2: ToBeChangedReg must be killed in ADD instr and there is no other use
3598// between ADDI and ADD instr since its original def in ADDI will be changed
3599// in new ADDI instr. And also there should be no new def for it between
3600// ADD and Imm instr as ToBeChangedReg will be used in Index instr.
3601// 3: ToBeDeletedReg must be killed in Imm instr and there is no other use
3602// between ADD and Imm instr since ADD instr will be eliminated.
3603// 4: ScaleReg must not be redefined between ADD and Imm instr since it will be
3604// moved to Index instr.
3605bool PPCInstrInfo::foldFrameOffset(MachineInstr &MI) const {
3606 MachineFunction *MF = MI.getParent()->getParent();
3607 MachineRegisterInfo *MRI = &MF->getRegInfo();
3608 bool PostRA = !MRI->isSSA();
3609 // Do this opt after PEI which is after RA. The reason is stack slot expansion
3610 // in PEI may expose such opportunities since in PEI, stack slot offsets to
3611 // frame base(OffsetAddi) are determined.
3612 if (!PostRA)
3613 return false;
3614 unsigned ToBeDeletedReg = 0;
3615 int64_t OffsetImm = 0;
3616 unsigned XFormOpcode = 0;
3617 ImmInstrInfo III;
3618
3619 // Check if Imm instr meets requirement.
3620 if (!isImmInstrEligibleForFolding(MI, BaseReg&: ToBeDeletedReg, XFormOpcode, OffsetOfImmInstr&: OffsetImm,
3621 III))
3622 return false;
3623
3624 bool OtherIntermediateUse = false;
3625 MachineInstr *ADDMI = getDefMIPostRA(Reg: ToBeDeletedReg, MI, SeenIntermediateUse&: OtherIntermediateUse);
3626
3627 // Exit if there is other use between ADD and Imm instr or no def found.
3628 if (OtherIntermediateUse || !ADDMI)
3629 return false;
3630
3631 // Check if ADD instr meets requirement.
3632 if (!isADDInstrEligibleForFolding(ADDMI&: *ADDMI))
3633 return false;
3634
3635 unsigned ScaleRegIdx = 0;
3636 int64_t OffsetAddi = 0;
3637 MachineInstr *ADDIMI = nullptr;
3638
3639 // Check if there is a valid ToBeChangedReg in ADDMI.
3640 // 1: It must be killed.
3641 // 2: Its definition must be a valid ADDIMI.
3642 // 3: It must satify int16 offset requirement.
3643 if (isValidToBeChangedReg(ADDMI, Index: 1, ADDIMI, OffsetAddi, OffsetImm))
3644 ScaleRegIdx = 2;
3645 else if (isValidToBeChangedReg(ADDMI, Index: 2, ADDIMI, OffsetAddi, OffsetImm))
3646 ScaleRegIdx = 1;
3647 else
3648 return false;
3649
3650 assert(ADDIMI && "There should be ADDIMI for valid ToBeChangedReg.");
3651 Register ToBeChangedReg = ADDIMI->getOperand(i: 0).getReg();
3652 Register ScaleReg = ADDMI->getOperand(i: ScaleRegIdx).getReg();
3653 auto NewDefFor = [&](unsigned Reg, MachineBasicBlock::iterator Start,
3654 MachineBasicBlock::iterator End) {
3655 for (auto It = ++Start; It != End; It++)
3656 if (It->modifiesRegister(Reg, TRI: &getRegisterInfo()))
3657 return true;
3658 return false;
3659 };
3660
3661 // We are trying to replace the ImmOpNo with ScaleReg. Give up if it is
3662 // treated as special zero when ScaleReg is R0/X0 register.
3663 if (III.ZeroIsSpecialOrig == III.ImmOpNo &&
3664 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3665 return false;
3666
3667 // Make sure no other def for ToBeChangedReg and ScaleReg between ADD Instr
3668 // and Imm Instr.
3669 if (NewDefFor(ToBeChangedReg, *ADDMI, MI) || NewDefFor(ScaleReg, *ADDMI, MI))
3670 return false;
3671
3672 // Now start to do the transformation.
3673 LLVM_DEBUG(dbgs() << "Replace instruction: "
3674 << "\n");
3675 LLVM_DEBUG(ADDIMI->dump());
3676 LLVM_DEBUG(ADDMI->dump());
3677 LLVM_DEBUG(MI.dump());
3678 LLVM_DEBUG(dbgs() << "with: "
3679 << "\n");
3680
3681 // Update ADDI instr.
3682 ADDIMI->getOperand(i: 2).setImm(OffsetAddi + OffsetImm);
3683
3684 // Update Imm instr.
3685 MI.setDesc(get(Opcode: XFormOpcode));
3686 MI.getOperand(i: III.ImmOpNo)
3687 .ChangeToRegister(Reg: ScaleReg, isDef: false, isImp: false,
3688 isKill: ADDMI->getOperand(i: ScaleRegIdx).isKill());
3689
3690 MI.getOperand(i: III.OpNoForForwarding)
3691 .ChangeToRegister(Reg: ToBeChangedReg, isDef: false, isImp: false, isKill: true);
3692
3693 // Eliminate ADD instr.
3694 ADDMI->eraseFromParent();
3695
3696 LLVM_DEBUG(ADDIMI->dump());
3697 LLVM_DEBUG(MI.dump());
3698
3699 return true;
3700}
3701
3702bool PPCInstrInfo::isADDIInstrEligibleForFolding(MachineInstr &ADDIMI,
3703 int64_t &Imm) const {
3704 unsigned Opc = ADDIMI.getOpcode();
3705
3706 // Exit if the instruction is not ADDI.
3707 if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
3708 return false;
3709
3710 // The operand may not necessarily be an immediate - it could be a relocation.
3711 if (!ADDIMI.getOperand(i: 2).isImm())
3712 return false;
3713
3714 Imm = ADDIMI.getOperand(i: 2).getImm();
3715
3716 return true;
3717}
3718
3719bool PPCInstrInfo::isADDInstrEligibleForFolding(MachineInstr &ADDMI) const {
3720 unsigned Opc = ADDMI.getOpcode();
3721
3722 // Exit if the instruction is not ADD.
3723 return Opc == PPC::ADD4 || Opc == PPC::ADD8;
3724}
3725
3726bool PPCInstrInfo::isImmInstrEligibleForFolding(MachineInstr &MI,
3727 unsigned &ToBeDeletedReg,
3728 unsigned &XFormOpcode,
3729 int64_t &OffsetImm,
3730 ImmInstrInfo &III) const {
3731 // Only handle load/store.
3732 if (!MI.mayLoadOrStore())
3733 return false;
3734
3735 unsigned Opc = MI.getOpcode();
3736
3737 XFormOpcode = RI.getMappedIdxOpcForImmOpc(ImmOpcode: Opc);
3738
3739 // Exit if instruction has no index form.
3740 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3741 return false;
3742
3743 // TODO: sync the logic between instrHasImmForm() and ImmToIdxMap.
3744 if (!instrHasImmForm(Opc: XFormOpcode,
3745 IsVFReg: PPC::isVFRegister(Reg: MI.getOperand(i: 0).getReg()), III, PostRA: true))
3746 return false;
3747
3748 if (!III.IsSummingOperands)
3749 return false;
3750
3751 MachineOperand ImmOperand = MI.getOperand(i: III.ImmOpNo);
3752 MachineOperand RegOperand = MI.getOperand(i: III.OpNoForForwarding);
3753 // Only support imm operands, not relocation slots or others.
3754 if (!ImmOperand.isImm())
3755 return false;
3756
3757 assert(RegOperand.isReg() && "Instruction format is not right");
3758
3759 // There are other use for ToBeDeletedReg after Imm instr, can not delete it.
3760 if (!RegOperand.isKill())
3761 return false;
3762
3763 ToBeDeletedReg = RegOperand.getReg();
3764 OffsetImm = ImmOperand.getImm();
3765
3766 return true;
3767}
3768
3769bool PPCInstrInfo::isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index,
3770 MachineInstr *&ADDIMI,
3771 int64_t &OffsetAddi,
3772 int64_t OffsetImm) const {
3773 assert((Index == 1 || Index == 2) && "Invalid operand index for add.");
3774 MachineOperand &MO = ADDMI->getOperand(i: Index);
3775
3776 if (!MO.isKill())
3777 return false;
3778
3779 bool OtherIntermediateUse = false;
3780
3781 ADDIMI = getDefMIPostRA(Reg: MO.getReg(), MI&: *ADDMI, SeenIntermediateUse&: OtherIntermediateUse);
3782 // Currently handle only one "add + Imminstr" pair case, exit if other
3783 // intermediate use for ToBeChangedReg found.
3784 // TODO: handle the cases where there are other "add + Imminstr" pairs
3785 // with same offset in Imminstr which is like:
3786 //
3787 // ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, OffsetAddi
3788 // ADD instr1: ToBeDeletedReg1 = ADD ToBeChangedReg, ScaleReg1
3789 // Imm instr1: Reg1 = op1 OffsetImm, ToBeDeletedReg1(killed)
3790 // ADD instr2: ToBeDeletedReg2 = ADD ToBeChangedReg(killed), ScaleReg2
3791 // Imm instr2: Reg2 = op2 OffsetImm, ToBeDeletedReg2(killed)
3792 //
3793 // can be converted to:
3794 //
3795 // new ADDI instr: ToBeChangedReg = ADDI FrameBaseReg,
3796 // (OffsetAddi + OffsetImm)
3797 // Index instr1: Reg1 = opx1 ScaleReg1, ToBeChangedReg
3798 // Index instr2: Reg2 = opx2 ScaleReg2, ToBeChangedReg(killed)
3799
3800 if (OtherIntermediateUse || !ADDIMI)
3801 return false;
3802 // Check if ADDI instr meets requirement.
3803 if (!isADDIInstrEligibleForFolding(ADDIMI&: *ADDIMI, Imm&: OffsetAddi))
3804 return false;
3805
3806 if (isInt<16>(x: OffsetAddi + OffsetImm))
3807 return true;
3808 return false;
3809}
3810
3811// If this instruction has an immediate form and one of its operands is a
3812// result of a load-immediate or an add-immediate, convert it to
3813// the immediate form if the constant is in range.
3814bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
3815 SmallSet<Register, 4> &RegsToUpdate,
3816 MachineInstr **KilledDef) const {
3817 MachineFunction *MF = MI.getParent()->getParent();
3818 MachineRegisterInfo *MRI = &MF->getRegInfo();
3819 bool PostRA = !MRI->isSSA();
3820 bool SeenIntermediateUse = true;
3821 unsigned ForwardingOperand = ~0U;
3822 MachineInstr *DefMI = getForwardingDefMI(MI, OpNoForForwarding&: ForwardingOperand,
3823 SeenIntermediateUse);
3824 if (!DefMI)
3825 return false;
3826 assert(ForwardingOperand < MI.getNumOperands() &&
3827 "The forwarding operand needs to be valid at this point");
3828 bool IsForwardingOperandKilled = MI.getOperand(i: ForwardingOperand).isKill();
3829 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3830 if (KilledDef && KillFwdDefMI)
3831 *KilledDef = DefMI;
3832
3833 // Conservatively add defs from DefMI and defs/uses from MI to the set of
3834 // registers that need their kill flags updated.
3835 for (const MachineOperand &MO : DefMI->operands())
3836 if (MO.isReg() && MO.isDef())
3837 RegsToUpdate.insert(V: MO.getReg());
3838 for (const MachineOperand &MO : MI.operands())
3839 if (MO.isReg())
3840 RegsToUpdate.insert(V: MO.getReg());
3841
3842 // If this is a imm instruction and its register operands is produced by ADDI,
3843 // put the imm into imm inst directly.
3844 if (RI.getMappedIdxOpcForImmOpc(ImmOpcode: MI.getOpcode()) !=
3845 PPC::INSTRUCTION_LIST_END &&
3846 transformToNewImmFormFedByAdd(MI, DefMI&: *DefMI, OpNoForForwarding: ForwardingOperand))
3847 return true;
3848
3849 ImmInstrInfo III;
3850 bool IsVFReg = MI.getOperand(i: 0).isReg() &&
3851 MI.getOperand(i: 0).getReg().isPhysical() &&
3852 PPC::isVFRegister(Reg: MI.getOperand(i: 0).getReg());
3853 bool HasImmForm = instrHasImmForm(Opc: MI.getOpcode(), IsVFReg, III, PostRA);
3854 // If this is a reg+reg instruction that has a reg+imm form,
3855 // and one of the operands is produced by an add-immediate,
3856 // try to convert it.
3857 if (HasImmForm &&
3858 transformToImmFormFedByAdd(MI, III, ConstantOpNo: ForwardingOperand, DefMI&: *DefMI,
3859 KillDefMI: KillFwdDefMI))
3860 return true;
3861
3862 // If this is a reg+reg instruction that has a reg+imm form,
3863 // and one of the operands is produced by LI, convert it now.
3864 if (HasImmForm &&
3865 transformToImmFormFedByLI(MI, III, ConstantOpNo: ForwardingOperand, DefMI&: *DefMI))
3866 return true;
3867
3868 // If this is not a reg+reg, but the DefMI is LI/LI8, check if its user MI
3869 // can be simpified to LI.
3870 if (!HasImmForm &&
3871 simplifyToLI(MI, DefMI&: *DefMI, OpNoForForwarding: ForwardingOperand, KilledDef, RegsToUpdate: &RegsToUpdate))
3872 return true;
3873
3874 return false;
3875}
3876
3877bool PPCInstrInfo::combineRLWINM(MachineInstr &MI,
3878 MachineInstr **ToErase) const {
3879 MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
3880 Register FoldingReg = MI.getOperand(i: 1).getReg();
3881 if (!FoldingReg.isVirtual())
3882 return false;
3883 MachineInstr *SrcMI = MRI->getVRegDef(Reg: FoldingReg);
3884 if (SrcMI->getOpcode() != PPC::RLWINM &&
3885 SrcMI->getOpcode() != PPC::RLWINM_rec &&
3886 SrcMI->getOpcode() != PPC::RLWINM8 &&
3887 SrcMI->getOpcode() != PPC::RLWINM8_rec)
3888 return false;
3889 assert((MI.getOperand(2).isImm() && MI.getOperand(3).isImm() &&
3890 MI.getOperand(4).isImm() && SrcMI->getOperand(2).isImm() &&
3891 SrcMI->getOperand(3).isImm() && SrcMI->getOperand(4).isImm()) &&
3892 "Invalid PPC::RLWINM Instruction!");
3893 uint64_t SHSrc = SrcMI->getOperand(i: 2).getImm();
3894 uint64_t SHMI = MI.getOperand(i: 2).getImm();
3895 uint64_t MBSrc = SrcMI->getOperand(i: 3).getImm();
3896 uint64_t MBMI = MI.getOperand(i: 3).getImm();
3897 uint64_t MESrc = SrcMI->getOperand(i: 4).getImm();
3898 uint64_t MEMI = MI.getOperand(i: 4).getImm();
3899
3900 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3901 "Invalid PPC::RLWINM Instruction!");
3902 // If MBMI is bigger than MEMI, we always can not get run of ones.
3903 // RotatedSrcMask non-wrap:
3904 // 0........31|32........63
3905 // RotatedSrcMask: B---E B---E
3906 // MaskMI: -----------|--E B------
3907 // Result: ----- --- (Bad candidate)
3908 //
3909 // RotatedSrcMask wrap:
3910 // 0........31|32........63
3911 // RotatedSrcMask: --E B----|--E B----
3912 // MaskMI: -----------|--E B------
3913 // Result: --- -----|--- ----- (Bad candidate)
3914 //
3915 // One special case is RotatedSrcMask is a full set mask.
3916 // RotatedSrcMask full:
3917 // 0........31|32........63
3918 // RotatedSrcMask: ------EB---|-------EB---
3919 // MaskMI: -----------|--E B------
3920 // Result: -----------|--- ------- (Good candidate)
3921
3922 // Mark special case.
3923 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3924
3925 // For other MBMI > MEMI cases, just return.
3926 if ((MBMI > MEMI) && !SrcMaskFull)
3927 return false;
3928
3929 // Handle MBMI <= MEMI cases.
3930 APInt MaskMI = APInt::getBitsSetWithWrap(numBits: 32, loBit: 32 - MEMI - 1, hiBit: 32 - MBMI);
3931 // In MI, we only need low 32 bits of SrcMI, just consider about low 32
3932 // bit of SrcMI mask. Note that in APInt, lowerest bit is at index 0,
3933 // while in PowerPC ISA, lowerest bit is at index 63.
3934 APInt MaskSrc = APInt::getBitsSetWithWrap(numBits: 32, loBit: 32 - MESrc - 1, hiBit: 32 - MBSrc);
3935
3936 APInt RotatedSrcMask = MaskSrc.rotl(rotateAmt: SHMI);
3937 APInt FinalMask = RotatedSrcMask & MaskMI;
3938 uint32_t NewMB, NewME;
3939 bool Simplified = false;
3940
3941 // If final mask is 0, MI result should be 0 too.
3942 if (FinalMask.isZero()) {
3943 bool Is64Bit =
3944 (MI.getOpcode() == PPC::RLWINM8 || MI.getOpcode() == PPC::RLWINM8_rec);
3945 Simplified = true;
3946 LLVM_DEBUG(dbgs() << "Replace Instr: ");
3947 LLVM_DEBUG(MI.dump());
3948
3949 if (MI.getOpcode() == PPC::RLWINM || MI.getOpcode() == PPC::RLWINM8) {
3950 // Replace MI with "LI 0"
3951 MI.removeOperand(OpNo: 4);
3952 MI.removeOperand(OpNo: 3);
3953 MI.removeOperand(OpNo: 2);
3954 MI.getOperand(i: 1).ChangeToImmediate(ImmVal: 0);
3955 MI.setDesc(get(Opcode: Is64Bit ? PPC::LI8 : PPC::LI));
3956 } else {
3957 // Replace MI with "ANDI_rec reg, 0"
3958 MI.removeOperand(OpNo: 4);
3959 MI.removeOperand(OpNo: 3);
3960 MI.getOperand(i: 2).setImm(0);
3961 MI.setDesc(get(Opcode: Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3962 MI.getOperand(i: 1).setReg(SrcMI->getOperand(i: 1).getReg());
3963 if (SrcMI->getOperand(i: 1).isKill()) {
3964 MI.getOperand(i: 1).setIsKill(true);
3965 SrcMI->getOperand(i: 1).setIsKill(false);
3966 } else
3967 // About to replace MI.getOperand(1), clear its kill flag.
3968 MI.getOperand(i: 1).setIsKill(false);
3969 }
3970
3971 LLVM_DEBUG(dbgs() << "With: ");
3972 LLVM_DEBUG(MI.dump());
3973
3974 } else if ((isRunOfOnes(Val: (unsigned)(FinalMask.getZExtValue()), MB&: NewMB, ME&: NewME) &&
3975 NewMB <= NewME) ||
3976 SrcMaskFull) {
3977 // Here we only handle MBMI <= MEMI case, so NewMB must be no bigger
3978 // than NewME. Otherwise we get a 64 bit value after folding, but MI
3979 // return a 32 bit value.
3980 Simplified = true;
3981 LLVM_DEBUG(dbgs() << "Converting Instr: ");
3982 LLVM_DEBUG(MI.dump());
3983
3984 uint16_t NewSH = (SHSrc + SHMI) % 32;
3985 MI.getOperand(i: 2).setImm(NewSH);
3986 // If SrcMI mask is full, no need to update MBMI and MEMI.
3987 if (!SrcMaskFull) {
3988 MI.getOperand(i: 3).setImm(NewMB);
3989 MI.getOperand(i: 4).setImm(NewME);
3990 }
3991 MI.getOperand(i: 1).setReg(SrcMI->getOperand(i: 1).getReg());
3992 if (SrcMI->getOperand(i: 1).isKill()) {
3993 MI.getOperand(i: 1).setIsKill(true);
3994 SrcMI->getOperand(i: 1).setIsKill(false);
3995 } else
3996 // About to replace MI.getOperand(1), clear its kill flag.
3997 MI.getOperand(i: 1).setIsKill(false);
3998
3999 LLVM_DEBUG(dbgs() << "To: ");
4000 LLVM_DEBUG(MI.dump());
4001 }
4002 if (Simplified & MRI->use_nodbg_empty(RegNo: FoldingReg) &&
4003 !SrcMI->hasImplicitDef()) {
4004 // If FoldingReg has no non-debug use and it has no implicit def (it
4005 // is not RLWINMO or RLWINM8o), it's safe to delete its def SrcMI.
4006 // Otherwise keep it.
4007 *ToErase = SrcMI;
4008 LLVM_DEBUG(dbgs() << "Delete dead instruction: ");
4009 LLVM_DEBUG(SrcMI->dump());
4010 }
4011 return Simplified;
4012}
4013
4014bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
4015 ImmInstrInfo &III, bool PostRA) const {
4016 // The vast majority of the instructions would need their operand 2 replaced
4017 // with an immediate when switching to the reg+imm form. A marked exception
4018 // are the update form loads/stores for which a constant operand 2 would need
4019 // to turn into a displacement and move operand 1 to the operand 2 position.
4020 III.ImmOpNo = 2;
4021 III.OpNoForForwarding = 2;
4022 III.ImmWidth = 16;
4023 III.ImmMustBeMultipleOf = 1;
4024 III.TruncateImmTo = 0;
4025 III.IsSummingOperands = false;
4026 switch (Opc) {
4027 default: return false;
4028 case PPC::ADD4:
4029 case PPC::ADD8:
4030 III.SignedImm = true;
4031 III.ZeroIsSpecialOrig = 0;
4032 III.ZeroIsSpecialNew = 1;
4033 III.IsCommutative = true;
4034 III.IsSummingOperands = true;
4035 III.ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
4036 break;
4037 case PPC::ADDC:
4038 case PPC::ADDC8:
4039 III.SignedImm = true;
4040 III.ZeroIsSpecialOrig = 0;
4041 III.ZeroIsSpecialNew = 0;
4042 III.IsCommutative = true;
4043 III.IsSummingOperands = true;
4044 III.ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
4045 break;
4046 case PPC::ADDC_rec:
4047 III.SignedImm = true;
4048 III.ZeroIsSpecialOrig = 0;
4049 III.ZeroIsSpecialNew = 0;
4050 III.IsCommutative = true;
4051 III.IsSummingOperands = true;
4052 III.ImmOpcode = PPC::ADDIC_rec;
4053 break;
4054 case PPC::SUBFC:
4055 case PPC::SUBFC8:
4056 III.SignedImm = true;
4057 III.ZeroIsSpecialOrig = 0;
4058 III.ZeroIsSpecialNew = 0;
4059 III.IsCommutative = false;
4060 III.ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
4061 break;
4062 case PPC::CMPW:
4063 case PPC::CMPD:
4064 III.SignedImm = true;
4065 III.ZeroIsSpecialOrig = 0;
4066 III.ZeroIsSpecialNew = 0;
4067 III.IsCommutative = false;
4068 III.ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
4069 break;
4070 case PPC::CMPLW:
4071 case PPC::CMPLD:
4072 III.SignedImm = false;
4073 III.ZeroIsSpecialOrig = 0;
4074 III.ZeroIsSpecialNew = 0;
4075 III.IsCommutative = false;
4076 III.ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4077 break;
4078 case PPC::AND_rec:
4079 case PPC::AND8_rec:
4080 case PPC::OR:
4081 case PPC::OR8:
4082 case PPC::XOR:
4083 case PPC::XOR8:
4084 III.SignedImm = false;
4085 III.ZeroIsSpecialOrig = 0;
4086 III.ZeroIsSpecialNew = 0;
4087 III.IsCommutative = true;
4088 switch(Opc) {
4089 default: llvm_unreachable("Unknown opcode");
4090 case PPC::AND_rec:
4091 III.ImmOpcode = PPC::ANDI_rec;
4092 break;
4093 case PPC::AND8_rec:
4094 III.ImmOpcode = PPC::ANDI8_rec;
4095 break;
4096 case PPC::OR: III.ImmOpcode = PPC::ORI; break;
4097 case PPC::OR8: III.ImmOpcode = PPC::ORI8; break;
4098 case PPC::XOR: III.ImmOpcode = PPC::XORI; break;
4099 case PPC::XOR8: III.ImmOpcode = PPC::XORI8; break;
4100 }
4101 break;
4102 case PPC::RLWNM:
4103 case PPC::RLWNM8:
4104 case PPC::RLWNM_rec:
4105 case PPC::RLWNM8_rec:
4106 case PPC::SLW:
4107 case PPC::SLW8:
4108 case PPC::SLW_rec:
4109 case PPC::SLW8_rec:
4110 case PPC::SRW:
4111 case PPC::SRW8:
4112 case PPC::SRW_rec:
4113 case PPC::SRW8_rec:
4114 case PPC::SRAW:
4115 case PPC::SRAW_rec:
4116 III.SignedImm = false;
4117 III.ZeroIsSpecialOrig = 0;
4118 III.ZeroIsSpecialNew = 0;
4119 III.IsCommutative = false;
4120 // This isn't actually true, but the instructions ignore any of the
4121 // upper bits, so any immediate loaded with an LI is acceptable.
4122 // This does not apply to shift right algebraic because a value
4123 // out of range will produce a -1/0.
4124 III.ImmWidth = 16;
4125 if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
4126 Opc == PPC::RLWNM8_rec)
4127 III.TruncateImmTo = 5;
4128 else
4129 III.TruncateImmTo = 6;
4130 switch(Opc) {
4131 default: llvm_unreachable("Unknown opcode");
4132 case PPC::RLWNM: III.ImmOpcode = PPC::RLWINM; break;
4133 case PPC::RLWNM8: III.ImmOpcode = PPC::RLWINM8; break;
4134 case PPC::RLWNM_rec:
4135 III.ImmOpcode = PPC::RLWINM_rec;
4136 break;
4137 case PPC::RLWNM8_rec:
4138 III.ImmOpcode = PPC::RLWINM8_rec;
4139 break;
4140 case PPC::SLW: III.ImmOpcode = PPC::RLWINM; break;
4141 case PPC::SLW8: III.ImmOpcode = PPC::RLWINM8; break;
4142 case PPC::SLW_rec:
4143 III.ImmOpcode = PPC::RLWINM_rec;
4144 break;
4145 case PPC::SLW8_rec:
4146 III.ImmOpcode = PPC::RLWINM8_rec;
4147 break;
4148 case PPC::SRW: III.ImmOpcode = PPC::RLWINM; break;
4149 case PPC::SRW8: III.ImmOpcode = PPC::RLWINM8; break;
4150 case PPC::SRW_rec:
4151 III.ImmOpcode = PPC::RLWINM_rec;
4152 break;
4153 case PPC::SRW8_rec:
4154 III.ImmOpcode = PPC::RLWINM8_rec;
4155 break;
4156 case PPC::SRAW:
4157 III.ImmWidth = 5;
4158 III.TruncateImmTo = 0;
4159 III.ImmOpcode = PPC::SRAWI;
4160 break;
4161 case PPC::SRAW_rec:
4162 III.ImmWidth = 5;
4163 III.TruncateImmTo = 0;
4164 III.ImmOpcode = PPC::SRAWI_rec;
4165 break;
4166 }
4167 break;
4168 case PPC::RLDCL:
4169 case PPC::RLDCL_rec:
4170 case PPC::RLDCR:
4171 case PPC::RLDCR_rec:
4172 case PPC::SLD:
4173 case PPC::SLD_rec:
4174 case PPC::SRD:
4175 case PPC::SRD_rec:
4176 case PPC::SRAD:
4177 case PPC::SRAD_rec:
4178 III.SignedImm = false;
4179 III.ZeroIsSpecialOrig = 0;
4180 III.ZeroIsSpecialNew = 0;
4181 III.IsCommutative = false;
4182 // This isn't actually true, but the instructions ignore any of the
4183 // upper bits, so any immediate loaded with an LI is acceptable.
4184 // This does not apply to shift right algebraic because a value
4185 // out of range will produce a -1/0.
4186 III.ImmWidth = 16;
4187 if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
4188 Opc == PPC::RLDCR_rec)
4189 III.TruncateImmTo = 6;
4190 else
4191 III.TruncateImmTo = 7;
4192 switch(Opc) {
4193 default: llvm_unreachable("Unknown opcode");
4194 case PPC::RLDCL: III.ImmOpcode = PPC::RLDICL; break;
4195 case PPC::RLDCL_rec:
4196 III.ImmOpcode = PPC::RLDICL_rec;
4197 break;
4198 case PPC::RLDCR: III.ImmOpcode = PPC::RLDICR; break;
4199 case PPC::RLDCR_rec:
4200 III.ImmOpcode = PPC::RLDICR_rec;
4201 break;
4202 case PPC::SLD: III.ImmOpcode = PPC::RLDICR; break;
4203 case PPC::SLD_rec:
4204 III.ImmOpcode = PPC::RLDICR_rec;
4205 break;
4206 case PPC::SRD: III.ImmOpcode = PPC::RLDICL; break;
4207 case PPC::SRD_rec:
4208 III.ImmOpcode = PPC::RLDICL_rec;
4209 break;
4210 case PPC::SRAD:
4211 III.ImmWidth = 6;
4212 III.TruncateImmTo = 0;
4213 III.ImmOpcode = PPC::SRADI;
4214 break;
4215 case PPC::SRAD_rec:
4216 III.ImmWidth = 6;
4217 III.TruncateImmTo = 0;
4218 III.ImmOpcode = PPC::SRADI_rec;
4219 break;
4220 }
4221 break;
4222 // Loads and stores:
4223 case PPC::LBZX:
4224 case PPC::LBZX8:
4225 case PPC::LHZX:
4226 case PPC::LHZX8:
4227 case PPC::LHAX:
4228 case PPC::LHAX8:
4229 case PPC::LWZX:
4230 case PPC::LWZX8:
4231 case PPC::LWAX:
4232 case PPC::LDX:
4233 case PPC::LFSX:
4234 case PPC::LFDX:
4235 case PPC::STBX:
4236 case PPC::STBX8:
4237 case PPC::STHX:
4238 case PPC::STHX8:
4239 case PPC::STWX:
4240 case PPC::STWX8:
4241 case PPC::STDX:
4242 case PPC::STFSX:
4243 case PPC::STFDX:
4244 III.SignedImm = true;
4245 III.ZeroIsSpecialOrig = 1;
4246 III.ZeroIsSpecialNew = 2;
4247 III.IsCommutative = true;
4248 III.IsSummingOperands = true;
4249 III.ImmOpNo = 1;
4250 III.OpNoForForwarding = 2;
4251 switch(Opc) {
4252 default: llvm_unreachable("Unknown opcode");
4253 case PPC::LBZX: III.ImmOpcode = PPC::LBZ; break;
4254 case PPC::LBZX8: III.ImmOpcode = PPC::LBZ8; break;
4255 case PPC::LHZX: III.ImmOpcode = PPC::LHZ; break;
4256 case PPC::LHZX8: III.ImmOpcode = PPC::LHZ8; break;
4257 case PPC::LHAX: III.ImmOpcode = PPC::LHA; break;
4258 case PPC::LHAX8: III.ImmOpcode = PPC::LHA8; break;
4259 case PPC::LWZX: III.ImmOpcode = PPC::LWZ; break;
4260 case PPC::LWZX8: III.ImmOpcode = PPC::LWZ8; break;
4261 case PPC::LWAX:
4262 III.ImmOpcode = PPC::LWA;
4263 III.ImmMustBeMultipleOf = 4;
4264 break;
4265 case PPC::LDX: III.ImmOpcode = PPC::LD; III.ImmMustBeMultipleOf = 4; break;
4266 case PPC::LFSX: III.ImmOpcode = PPC::LFS; break;
4267 case PPC::LFDX: III.ImmOpcode = PPC::LFD; break;
4268 case PPC::STBX: III.ImmOpcode = PPC::STB; break;
4269 case PPC::STBX8: III.ImmOpcode = PPC::STB8; break;
4270 case PPC::STHX: III.ImmOpcode = PPC::STH; break;
4271 case PPC::STHX8: III.ImmOpcode = PPC::STH8; break;
4272 case PPC::STWX: III.ImmOpcode = PPC::STW; break;
4273 case PPC::STWX8: III.ImmOpcode = PPC::STW8; break;
4274 case PPC::STDX:
4275 III.ImmOpcode = PPC::STD;
4276 III.ImmMustBeMultipleOf = 4;
4277 break;
4278 case PPC::STFSX: III.ImmOpcode = PPC::STFS; break;
4279 case PPC::STFDX: III.ImmOpcode = PPC::STFD; break;
4280 }
4281 break;
4282 case PPC::LBZUX:
4283 case PPC::LBZUX8:
4284 case PPC::LHZUX:
4285 case PPC::LHZUX8:
4286 case PPC::LHAUX:
4287 case PPC::LHAUX8:
4288 case PPC::LWZUX:
4289 case PPC::LWZUX8:
4290 case PPC::LDUX:
4291 case PPC::LFSUX:
4292 case PPC::LFDUX:
4293 case PPC::STBUX:
4294 case PPC::STBUX8:
4295 case PPC::STHUX:
4296 case PPC::STHUX8:
4297 case PPC::STWUX:
4298 case PPC::STWUX8:
4299 case PPC::STDUX:
4300 case PPC::STFSUX:
4301 case PPC::STFDUX:
4302 III.SignedImm = true;
4303 III.ZeroIsSpecialOrig = 2;
4304 III.ZeroIsSpecialNew = 3;
4305 III.IsCommutative = false;
4306 III.IsSummingOperands = true;
4307 III.ImmOpNo = 2;
4308 III.OpNoForForwarding = 3;
4309 switch(Opc) {
4310 default: llvm_unreachable("Unknown opcode");
4311 case PPC::LBZUX: III.ImmOpcode = PPC::LBZU; break;
4312 case PPC::LBZUX8: III.ImmOpcode = PPC::LBZU8; break;
4313 case PPC::LHZUX: III.ImmOpcode = PPC::LHZU; break;
4314 case PPC::LHZUX8: III.ImmOpcode = PPC::LHZU8; break;
4315 case PPC::LHAUX: III.ImmOpcode = PPC::LHAU; break;
4316 case PPC::LHAUX8: III.ImmOpcode = PPC::LHAU8; break;
4317 case PPC::LWZUX: III.ImmOpcode = PPC::LWZU; break;
4318 case PPC::LWZUX8: III.ImmOpcode = PPC::LWZU8; break;
4319 case PPC::LDUX:
4320 III.ImmOpcode = PPC::LDU;
4321 III.ImmMustBeMultipleOf = 4;
4322 break;
4323 case PPC::LFSUX: III.ImmOpcode = PPC::LFSU; break;
4324 case PPC::LFDUX: III.ImmOpcode = PPC::LFDU; break;
4325 case PPC::STBUX: III.ImmOpcode = PPC::STBU; break;
4326 case PPC::STBUX8: III.ImmOpcode = PPC::STBU8; break;
4327 case PPC::STHUX: III.ImmOpcode = PPC::STHU; break;
4328 case PPC::STHUX8: III.ImmOpcode = PPC::STHU8; break;
4329 case PPC::STWUX: III.ImmOpcode = PPC::STWU; break;
4330 case PPC::STWUX8: III.ImmOpcode = PPC::STWU8; break;
4331 case PPC::STDUX:
4332 III.ImmOpcode = PPC::STDU;
4333 III.ImmMustBeMultipleOf = 4;
4334 break;
4335 case PPC::STFSUX: III.ImmOpcode = PPC::STFSU; break;
4336 case PPC::STFDUX: III.ImmOpcode = PPC::STFDU; break;
4337 }
4338 break;
4339 // Power9 and up only. For some of these, the X-Form version has access to all
4340 // 64 VSR's whereas the D-Form only has access to the VR's. We replace those
4341 // with pseudo-ops pre-ra and for post-ra, we check that the register loaded
4342 // into or stored from is one of the VR registers.
4343 case PPC::LXVX:
4344 case PPC::LXSSPX:
4345 case PPC::LXSDX:
4346 case PPC::STXVX:
4347 case PPC::STXSSPX:
4348 case PPC::STXSDX:
4349 case PPC::XFLOADf32:
4350 case PPC::XFLOADf64:
4351 case PPC::XFSTOREf32:
4352 case PPC::XFSTOREf64:
4353 if (!Subtarget.hasP9Vector())
4354 return false;
4355 III.SignedImm = true;
4356 III.ZeroIsSpecialOrig = 1;
4357 III.ZeroIsSpecialNew = 2;
4358 III.IsCommutative = true;
4359 III.IsSummingOperands = true;
4360 III.ImmOpNo = 1;
4361 III.OpNoForForwarding = 2;
4362 III.ImmMustBeMultipleOf = 4;
4363 switch(Opc) {
4364 default: llvm_unreachable("Unknown opcode");
4365 case PPC::LXVX:
4366 III.ImmOpcode = PPC::LXV;
4367 III.ImmMustBeMultipleOf = 16;
4368 break;
4369 case PPC::LXSSPX:
4370 if (PostRA) {
4371 if (IsVFReg)
4372 III.ImmOpcode = PPC::LXSSP;
4373 else {
4374 III.ImmOpcode = PPC::LFS;
4375 III.ImmMustBeMultipleOf = 1;
4376 }
4377 break;
4378 }
4379 [[fallthrough]];
4380 case PPC::XFLOADf32:
4381 III.ImmOpcode = PPC::DFLOADf32;
4382 break;
4383 case PPC::LXSDX:
4384 if (PostRA) {
4385 if (IsVFReg)
4386 III.ImmOpcode = PPC::LXSD;
4387 else {
4388 III.ImmOpcode = PPC::LFD;
4389 III.ImmMustBeMultipleOf = 1;
4390 }
4391 break;
4392 }
4393 [[fallthrough]];
4394 case PPC::XFLOADf64:
4395 III.ImmOpcode = PPC::DFLOADf64;
4396 break;
4397 case PPC::STXVX:
4398 III.ImmOpcode = PPC::STXV;
4399 III.ImmMustBeMultipleOf = 16;
4400 break;
4401 case PPC::STXSSPX:
4402 if (PostRA) {
4403 if (IsVFReg)
4404 III.ImmOpcode = PPC::STXSSP;
4405 else {
4406 III.ImmOpcode = PPC::STFS;
4407 III.ImmMustBeMultipleOf = 1;
4408 }
4409 break;
4410 }
4411 [[fallthrough]];
4412 case PPC::XFSTOREf32:
4413 III.ImmOpcode = PPC::DFSTOREf32;
4414 break;
4415 case PPC::STXSDX:
4416 if (PostRA) {
4417 if (IsVFReg)
4418 III.ImmOpcode = PPC::STXSD;
4419 else {
4420 III.ImmOpcode = PPC::STFD;
4421 III.ImmMustBeMultipleOf = 1;
4422 }
4423 break;
4424 }
4425 [[fallthrough]];
4426 case PPC::XFSTOREf64:
4427 III.ImmOpcode = PPC::DFSTOREf64;
4428 break;
4429 }
4430 break;
4431 }
4432 return true;
4433}
4434
4435// Utility function for swaping two arbitrary operands of an instruction.
4436static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2) {
4437 assert(Op1 != Op2 && "Cannot swap operand with itself.");
4438
4439 unsigned MaxOp = std::max(a: Op1, b: Op2);
4440 unsigned MinOp = std::min(a: Op1, b: Op2);
4441 MachineOperand MOp1 = MI.getOperand(i: MinOp);
4442 MachineOperand MOp2 = MI.getOperand(i: MaxOp);
4443 MI.removeOperand(OpNo: std::max(a: Op1, b: Op2));
4444 MI.removeOperand(OpNo: std::min(a: Op1, b: Op2));
4445
4446 // If the operands we are swapping are the two at the end (the common case)
4447 // we can just remove both and add them in the opposite order.
4448 if (MaxOp - MinOp == 1 && MI.getNumOperands() == MinOp) {
4449 MI.addOperand(Op: MOp2);
4450 MI.addOperand(Op: MOp1);
4451 } else {
4452 // Store all operands in a temporary vector, remove them and re-add in the
4453 // right order.
4454 SmallVector<MachineOperand, 2> MOps;
4455 unsigned TotalOps = MI.getNumOperands() + 2; // We've already removed 2 ops.
4456 for (unsigned i = MI.getNumOperands() - 1; i >= MinOp; i--) {
4457 MOps.push_back(Elt: MI.getOperand(i));
4458 MI.removeOperand(OpNo: i);
4459 }
4460 // MOp2 needs to be added next.
4461 MI.addOperand(Op: MOp2);
4462 // Now add the rest.
4463 for (unsigned i = MI.getNumOperands(); i < TotalOps; i++) {
4464 if (i == MaxOp)
4465 MI.addOperand(Op: MOp1);
4466 else {
4467 MI.addOperand(Op: MOps.back());
4468 MOps.pop_back();
4469 }
4470 }
4471 }
4472}
4473
4474// Check if the 'MI' that has the index OpNoForForwarding
4475// meets the requirement described in the ImmInstrInfo.
4476bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI,
4477 const ImmInstrInfo &III,
4478 unsigned OpNoForForwarding
4479 ) const {
4480 // As the algorithm of checking for PPC::ZERO/PPC::ZERO8
4481 // would not work pre-RA, we can only do the check post RA.
4482 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4483 if (MRI.isSSA())
4484 return false;
4485
4486 // Cannot do the transform if MI isn't summing the operands.
4487 if (!III.IsSummingOperands)
4488 return false;
4489
4490 // The instruction we are trying to replace must have the ZeroIsSpecialOrig set.
4491 if (!III.ZeroIsSpecialOrig)
4492 return false;
4493
4494 // We cannot do the transform if the operand we are trying to replace
4495 // isn't the same as the operand the instruction allows.
4496 if (OpNoForForwarding != III.OpNoForForwarding)
4497 return false;
4498
4499 // Check if the instruction we are trying to transform really has
4500 // the special zero register as its operand.
4501 if (MI.getOperand(i: III.ZeroIsSpecialOrig).getReg() != PPC::ZERO &&
4502 MI.getOperand(i: III.ZeroIsSpecialOrig).getReg() != PPC::ZERO8)
4503 return false;
4504
4505 // This machine instruction is convertible if it is,
4506 // 1. summing the operands.
4507 // 2. one of the operands is special zero register.
4508 // 3. the operand we are trying to replace is allowed by the MI.
4509 return true;
4510}
4511
4512// Check if the DefMI is the add inst and set the ImmMO and RegMO
4513// accordingly.
4514bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI,
4515 const ImmInstrInfo &III,
4516 MachineOperand *&ImmMO,
4517 MachineOperand *&RegMO) const {
4518 unsigned Opc = DefMI.getOpcode();
4519 if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
4520 return false;
4521
4522 // Skip the optimization of transformTo[NewImm|Imm]FormFedByAdd for ADDItocL8
4523 // on AIX which is used for toc-data access. TODO: Follow up to see if it can
4524 // apply for AIX toc-data as well.
4525 if (Opc == PPC::ADDItocL8 && Subtarget.isAIX())
4526 return false;
4527
4528 assert(DefMI.getNumOperands() >= 3 &&
4529 "Add inst must have at least three operands");
4530 RegMO = &DefMI.getOperand(i: 1);
4531 ImmMO = &DefMI.getOperand(i: 2);
4532
4533 // Before RA, ADDI first operand could be a frame index.
4534 if (!RegMO->isReg())
4535 return false;
4536
4537 // This DefMI is elgible for forwarding if it is:
4538 // 1. add inst
4539 // 2. one of the operands is Imm/CPI/Global.
4540 return isAnImmediateOperand(MO: *ImmMO);
4541}
4542
4543bool PPCInstrInfo::isRegElgibleForForwarding(
4544 const MachineOperand &RegMO, const MachineInstr &DefMI,
4545 const MachineInstr &MI, bool KillDefMI,
4546 bool &IsFwdFeederRegKilled, bool &SeenIntermediateUse) const {
4547 // x = addi y, imm
4548 // ...
4549 // z = lfdx 0, x -> z = lfd imm(y)
4550 // The Reg "y" can be forwarded to the MI(z) only when there is no DEF
4551 // of "y" between the DEF of "x" and "z".
4552 // The query is only valid post RA.
4553 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4554 if (MRI.isSSA())
4555 return false;
4556
4557 Register Reg = RegMO.getReg();
4558
4559 // Walking the inst in reverse(MI-->DefMI) to get the last DEF of the Reg.
4560 MachineBasicBlock::const_reverse_iterator It = MI;
4561 MachineBasicBlock::const_reverse_iterator E = MI.getParent()->rend();
4562 It++;
4563 for (; It != E; ++It) {
4564 if (It->modifiesRegister(Reg, TRI: &getRegisterInfo()) && (&*It) != &DefMI)
4565 return false;
4566 else if (It->killsRegister(Reg, TRI: &getRegisterInfo()) && (&*It) != &DefMI)
4567 IsFwdFeederRegKilled = true;
4568 if (It->readsRegister(Reg, TRI: &getRegisterInfo()) && (&*It) != &DefMI)
4569 SeenIntermediateUse = true;
4570 // Made it to DefMI without encountering a clobber.
4571 if ((&*It) == &DefMI)
4572 break;
4573 }
4574 assert((&*It) == &DefMI && "DefMI is missing");
4575
4576 // If DefMI also defines the register to be forwarded, we can only forward it
4577 // if DefMI is being erased.
4578 if (DefMI.modifiesRegister(Reg, TRI: &getRegisterInfo()))
4579 return KillDefMI;
4580
4581 return true;
4582}
4583
4584bool PPCInstrInfo::isImmElgibleForForwarding(const MachineOperand &ImmMO,
4585 const MachineInstr &DefMI,
4586 const ImmInstrInfo &III,
4587 int64_t &Imm,
4588 int64_t BaseImm) const {
4589 assert(isAnImmediateOperand(ImmMO) && "ImmMO is NOT an immediate");
4590 if (DefMI.getOpcode() == PPC::ADDItocL8) {
4591 // The operand for ADDItocL8 is CPI, which isn't imm at compiling time,
4592 // However, we know that, it is 16-bit width, and has the alignment of 4.
4593 // Check if the instruction met the requirement.
4594 if (III.ImmMustBeMultipleOf > 4 ||
4595 III.TruncateImmTo || III.ImmWidth != 16)
4596 return false;
4597
4598 // Going from XForm to DForm loads means that the displacement needs to be
4599 // not just an immediate but also a multiple of 4, or 16 depending on the
4600 // load. A DForm load cannot be represented if it is a multiple of say 2.
4601 // XForm loads do not have this restriction.
4602 if (ImmMO.isGlobal()) {
4603 const DataLayout &DL = ImmMO.getGlobal()->getDataLayout();
4604 if (ImmMO.getGlobal()->getPointerAlignment(DL) < III.ImmMustBeMultipleOf)
4605 return false;
4606 }
4607
4608 return true;
4609 }
4610
4611 if (ImmMO.isImm()) {
4612 // It is Imm, we need to check if the Imm fit the range.
4613 // Sign-extend to 64-bits.
4614 // DefMI may be folded with another imm form instruction, the result Imm is
4615 // the sum of Imm of DefMI and BaseImm which is from imm form instruction.
4616 APInt ActualValue(64, ImmMO.getImm() + BaseImm, true);
4617 if (III.SignedImm && !ActualValue.isSignedIntN(N: III.ImmWidth))
4618 return false;
4619 if (!III.SignedImm && !ActualValue.isIntN(N: III.ImmWidth))
4620 return false;
4621 Imm = SignExtend64<16>(x: ImmMO.getImm() + BaseImm);
4622
4623 if (Imm % III.ImmMustBeMultipleOf)
4624 return false;
4625 if (III.TruncateImmTo)
4626 Imm &= ((1 << III.TruncateImmTo) - 1);
4627 }
4628 else
4629 return false;
4630
4631 // This ImmMO is forwarded if it meets the requriement describle
4632 // in ImmInstrInfo
4633 return true;
4634}
4635
4636bool PPCInstrInfo::simplifyToLI(MachineInstr &MI, MachineInstr &DefMI,
4637 unsigned OpNoForForwarding,
4638 MachineInstr **KilledDef,
4639 SmallSet<Register, 4> *RegsToUpdate) const {
4640 if ((DefMI.getOpcode() != PPC::LI && DefMI.getOpcode() != PPC::LI8) ||
4641 !DefMI.getOperand(i: 1).isImm())
4642 return false;
4643
4644 MachineFunction *MF = MI.getParent()->getParent();
4645 MachineRegisterInfo *MRI = &MF->getRegInfo();
4646 bool PostRA = !MRI->isSSA();
4647
4648 int64_t Immediate = DefMI.getOperand(i: 1).getImm();
4649 // Sign-extend to 64-bits.
4650 int64_t SExtImm = SignExtend64<16>(x: Immediate);
4651
4652 bool ReplaceWithLI = false;
4653 bool Is64BitLI = false;
4654 int64_t NewImm = 0;
4655 bool SetCR = false;
4656 unsigned Opc = MI.getOpcode();
4657 switch (Opc) {
4658 default:
4659 return false;
4660
4661 // FIXME: Any branches conditional on such a comparison can be made
4662 // unconditional. At this time, this happens too infrequently to be worth
4663 // the implementation effort, but if that ever changes, we could convert
4664 // such a pattern here.
4665 case PPC::CMPWI:
4666 case PPC::CMPLWI:
4667 case PPC::CMPDI:
4668 case PPC::CMPLDI: {
4669 // Doing this post-RA would require dataflow analysis to reliably find uses
4670 // of the CR register set by the compare.
4671 // No need to fixup killed/dead flag since this transformation is only valid
4672 // before RA.
4673 if (PostRA)
4674 return false;
4675 // If a compare-immediate is fed by an immediate and is itself an input of
4676 // an ISEL (the most common case) into a COPY of the correct register.
4677 bool Changed = false;
4678 Register DefReg = MI.getOperand(i: 0).getReg();
4679 int64_t Comparand = MI.getOperand(i: 2).getImm();
4680 int64_t SExtComparand = ((uint64_t)Comparand & ~0x7FFFuLL) != 0
4681 ? (Comparand | 0xFFFFFFFFFFFF0000)
4682 : Comparand;
4683
4684 for (auto &CompareUseMI : MRI->use_instructions(Reg: DefReg)) {
4685 unsigned UseOpc = CompareUseMI.getOpcode();
4686 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4687 continue;
4688 unsigned CRSubReg = CompareUseMI.getOperand(i: 3).getSubReg();
4689 Register TrueReg = CompareUseMI.getOperand(i: 1).getReg();
4690 Register FalseReg = CompareUseMI.getOperand(i: 2).getReg();
4691 unsigned RegToCopy =
4692 selectReg(Imm1: SExtImm, Imm2: SExtComparand, CompareOpc: Opc, TrueReg, FalseReg, CRSubReg);
4693 if (RegToCopy == PPC::NoRegister)
4694 continue;
4695 // Can't use PPC::COPY to copy PPC::ZERO[8]. Convert it to LI[8] 0.
4696 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4697 CompareUseMI.setDesc(get(Opcode: UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4698 replaceInstrOperandWithImm(MI&: CompareUseMI, OpNo: 1, Imm: 0);
4699 CompareUseMI.removeOperand(OpNo: 3);
4700 CompareUseMI.removeOperand(OpNo: 2);
4701 continue;
4702 }
4703 LLVM_DEBUG(
4704 dbgs() << "Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4705 LLVM_DEBUG(DefMI.dump(); MI.dump(); CompareUseMI.dump());
4706 LLVM_DEBUG(dbgs() << "Is converted to:\n");
4707 if (RegsToUpdate) {
4708 for (const MachineOperand &MO : CompareUseMI.operands())
4709 if (MO.isReg())
4710 RegsToUpdate->insert(V: MO.getReg());
4711 }
4712 // Convert to copy and remove unneeded operands.
4713 CompareUseMI.setDesc(get(Opcode: PPC::COPY));
4714 CompareUseMI.removeOperand(OpNo: 3);
4715 CompareUseMI.removeOperand(OpNo: RegToCopy == TrueReg ? 2 : 1);
4716 CmpIselsConverted++;
4717 Changed = true;
4718 LLVM_DEBUG(CompareUseMI.dump());
4719 }
4720 if (Changed)
4721 return true;
4722 // This may end up incremented multiple times since this function is called
4723 // during a fixed-point transformation, but it is only meant to indicate the
4724 // presence of this opportunity.
4725 MissedConvertibleImmediateInstrs++;
4726 return false;
4727 }
4728
4729 // Immediate forms - may simply be convertable to an LI.
4730 case PPC::ADDI:
4731 case PPC::ADDI8: {
4732 // Does the sum fit in a 16-bit signed field?
4733 int64_t Addend = MI.getOperand(i: 2).getImm();
4734 if (isInt<16>(x: Addend + SExtImm)) {
4735 ReplaceWithLI = true;
4736 Is64BitLI = Opc == PPC::ADDI8;
4737 NewImm = Addend + SExtImm;
4738 break;
4739 }
4740 return false;
4741 }
4742 case PPC::SUBFIC:
4743 case PPC::SUBFIC8: {
4744 // Only transform this if the CARRY implicit operand is dead.
4745 if (MI.getNumOperands() > 3 && !MI.getOperand(i: 3).isDead())
4746 return false;
4747 int64_t Minuend = MI.getOperand(i: 2).getImm();
4748 if (isInt<16>(x: Minuend - SExtImm)) {
4749 ReplaceWithLI = true;
4750 Is64BitLI = Opc == PPC::SUBFIC8;
4751 NewImm = Minuend - SExtImm;
4752 break;
4753 }
4754 return false;
4755 }
4756 case PPC::RLDICL:
4757 case PPC::RLDICL_rec:
4758 case PPC::RLDICL_32:
4759 case PPC::RLDICL_32_64: {
4760 // Use APInt's rotate function.
4761 int64_t SH = MI.getOperand(i: 2).getImm();
4762 int64_t MB = MI.getOperand(i: 3).getImm();
4763 APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
4764 SExtImm, true);
4765 InVal = InVal.rotl(rotateAmt: SH);
4766 uint64_t Mask = MB == 0 ? -1LLU : (1LLU << (63 - MB + 1)) - 1;
4767 InVal &= Mask;
4768 // Can't replace negative values with an LI as that will sign-extend
4769 // and not clear the left bits. If we're setting the CR bit, we will use
4770 // ANDI_rec which won't sign extend, so that's safe.
4771 if (isUInt<15>(x: InVal.getSExtValue()) ||
4772 (Opc == PPC::RLDICL_rec && isUInt<16>(x: InVal.getSExtValue()))) {
4773 ReplaceWithLI = true;
4774 Is64BitLI = Opc != PPC::RLDICL_32;
4775 NewImm = InVal.getSExtValue();
4776 SetCR = Opc == PPC::RLDICL_rec;
4777 break;
4778 }
4779 return false;
4780 }
4781 case PPC::RLWINM:
4782 case PPC::RLWINM8:
4783 case PPC::RLWINM_rec:
4784 case PPC::RLWINM8_rec: {
4785 int64_t SH = MI.getOperand(i: 2).getImm();
4786 int64_t MB = MI.getOperand(i: 3).getImm();
4787 int64_t ME = MI.getOperand(i: 4).getImm();
4788 APInt InVal(32, SExtImm, true);
4789 InVal = InVal.rotl(rotateAmt: SH);
4790 APInt Mask = APInt::getBitsSetWithWrap(numBits: 32, loBit: 32 - ME - 1, hiBit: 32 - MB);
4791 InVal &= Mask;
4792 // Can't replace negative values with an LI as that will sign-extend
4793 // and not clear the left bits. If we're setting the CR bit, we will use
4794 // ANDI_rec which won't sign extend, so that's safe.
4795 bool ValueFits = isUInt<15>(x: InVal.getSExtValue());
4796 ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
4797 isUInt<16>(x: InVal.getSExtValue()));
4798 if (ValueFits) {
4799 ReplaceWithLI = true;
4800 Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
4801 NewImm = InVal.getSExtValue();
4802 SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
4803 break;
4804 }
4805 return false;
4806 }
4807 case PPC::ORI:
4808 case PPC::ORI8:
4809 case PPC::XORI:
4810 case PPC::XORI8: {
4811 int64_t LogicalImm = MI.getOperand(i: 2).getImm();
4812 int64_t Result = 0;
4813 if (Opc == PPC::ORI || Opc == PPC::ORI8)
4814 Result = LogicalImm | SExtImm;
4815 else
4816 Result = LogicalImm ^ SExtImm;
4817 if (isInt<16>(x: Result)) {
4818 ReplaceWithLI = true;
4819 Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
4820 NewImm = Result;
4821 break;
4822 }
4823 return false;
4824 }
4825 }
4826
4827 if (ReplaceWithLI) {
4828 // We need to be careful with CR-setting instructions we're replacing.
4829 if (SetCR) {
4830 // We don't know anything about uses when we're out of SSA, so only
4831 // replace if the new immediate will be reproduced.
4832 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4833 if (PostRA && ImmChanged)
4834 return false;
4835
4836 if (!PostRA) {
4837 // If the defining load-immediate has no other uses, we can just replace
4838 // the immediate with the new immediate.
4839 if (MRI->hasOneUse(RegNo: DefMI.getOperand(i: 0).getReg()))
4840 DefMI.getOperand(i: 1).setImm(NewImm);
4841
4842 // If we're not using the GPR result of the CR-setting instruction, we
4843 // just need to and with zero/non-zero depending on the new immediate.
4844 else if (MRI->use_empty(RegNo: MI.getOperand(i: 0).getReg())) {
4845 if (NewImm) {
4846 assert(Immediate && "Transformation converted zero to non-zero?");
4847 NewImm = Immediate;
4848 }
4849 } else if (ImmChanged)
4850 return false;
4851 }
4852 }
4853
4854 LLVM_DEBUG(dbgs() << "Replacing constant instruction:\n");
4855 LLVM_DEBUG(MI.dump());
4856 LLVM_DEBUG(dbgs() << "Fed by:\n");
4857 LLVM_DEBUG(DefMI.dump());
4858 LoadImmediateInfo LII;
4859 LII.Imm = NewImm;
4860 LII.Is64Bit = Is64BitLI;
4861 LII.SetCR = SetCR;
4862 // If we're setting the CR, the original load-immediate must be kept (as an
4863 // operand to ANDI_rec/ANDI8_rec).
4864 if (KilledDef && SetCR)
4865 *KilledDef = nullptr;
4866 replaceInstrWithLI(MI, LII);
4867
4868 if (PostRA)
4869 recomputeLivenessFlags(MBB&: *MI.getParent());
4870
4871 LLVM_DEBUG(dbgs() << "With:\n");
4872 LLVM_DEBUG(MI.dump());
4873 return true;
4874 }
4875 return false;
4876}
4877
4878bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4879 MachineInstr &MI, MachineInstr &DefMI, unsigned OpNoForForwarding) const {
4880 MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
4881 bool PostRA = !MRI->isSSA();
4882 // FIXME: extend this to post-ra. Need to do some change in getForwardingDefMI
4883 // for post-ra.
4884 if (PostRA)
4885 return false;
4886
4887 // Only handle load/store.
4888 if (!MI.mayLoadOrStore())
4889 return false;
4890
4891 unsigned XFormOpcode = RI.getMappedIdxOpcForImmOpc(ImmOpcode: MI.getOpcode());
4892
4893 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4894 "MI must have x-form opcode");
4895
4896 // get Imm Form info.
4897 ImmInstrInfo III;
4898 bool IsVFReg = MI.getOperand(i: 0).isReg() &&
4899 MI.getOperand(i: 0).getReg().isPhysical() &&
4900 PPC::isVFRegister(Reg: MI.getOperand(i: 0).getReg());
4901
4902 if (!instrHasImmForm(Opc: XFormOpcode, IsVFReg, III, PostRA))
4903 return false;
4904
4905 if (!III.IsSummingOperands)
4906 return false;
4907
4908 if (OpNoForForwarding != III.OpNoForForwarding)
4909 return false;
4910
4911 MachineOperand ImmOperandMI = MI.getOperand(i: III.ImmOpNo);
4912 if (!ImmOperandMI.isImm())
4913 return false;
4914
4915 // Check DefMI.
4916 MachineOperand *ImmMO = nullptr;
4917 MachineOperand *RegMO = nullptr;
4918 if (!isDefMIElgibleForForwarding(DefMI, III, ImmMO, RegMO))
4919 return false;
4920 assert(ImmMO && RegMO && "Imm and Reg operand must have been set");
4921
4922 // Check Imm.
4923 // Set ImmBase from imm instruction as base and get new Imm inside
4924 // isImmElgibleForForwarding.
4925 int64_t ImmBase = ImmOperandMI.getImm();
4926 int64_t Imm = 0;
4927 if (!isImmElgibleForForwarding(ImmMO: *ImmMO, DefMI, III, Imm, BaseImm: ImmBase))
4928 return false;
4929
4930 // Do the transform
4931 LLVM_DEBUG(dbgs() << "Replacing existing reg+imm instruction:\n");
4932 LLVM_DEBUG(MI.dump());
4933 LLVM_DEBUG(dbgs() << "Fed by:\n");
4934 LLVM_DEBUG(DefMI.dump());
4935
4936 MI.getOperand(i: III.OpNoForForwarding).setReg(RegMO->getReg());
4937 MI.getOperand(i: III.ImmOpNo).setImm(Imm);
4938
4939 LLVM_DEBUG(dbgs() << "With:\n");
4940 LLVM_DEBUG(MI.dump());
4941 return true;
4942}
4943
4944// If an X-Form instruction is fed by an add-immediate and one of its operands
4945// is the literal zero, attempt to forward the source of the add-immediate to
4946// the corresponding D-Form instruction with the displacement coming from
4947// the immediate being added.
4948bool PPCInstrInfo::transformToImmFormFedByAdd(
4949 MachineInstr &MI, const ImmInstrInfo &III, unsigned OpNoForForwarding,
4950 MachineInstr &DefMI, bool KillDefMI) const {
4951 // RegMO ImmMO
4952 // | |
4953 // x = addi reg, imm <----- DefMI
4954 // y = op 0 , x <----- MI
4955 // |
4956 // OpNoForForwarding
4957 // Check if the MI meet the requirement described in the III.
4958 if (!isUseMIElgibleForForwarding(MI, III, OpNoForForwarding))
4959 return false;
4960
4961 // Check if the DefMI meet the requirement
4962 // described in the III. If yes, set the ImmMO and RegMO accordingly.
4963 MachineOperand *ImmMO = nullptr;
4964 MachineOperand *RegMO = nullptr;
4965 if (!isDefMIElgibleForForwarding(DefMI, III, ImmMO, RegMO))
4966 return false;
4967 assert(ImmMO && RegMO && "Imm and Reg operand must have been set");
4968
4969 // As we get the Imm operand now, we need to check if the ImmMO meet
4970 // the requirement described in the III. If yes set the Imm.
4971 int64_t Imm = 0;
4972 if (!isImmElgibleForForwarding(ImmMO: *ImmMO, DefMI, III, Imm))
4973 return false;
4974
4975 bool IsFwdFeederRegKilled = false;
4976 bool SeenIntermediateUse = false;
4977 // Check if the RegMO can be forwarded to MI.
4978 if (!isRegElgibleForForwarding(RegMO: *RegMO, DefMI, MI, KillDefMI,
4979 IsFwdFeederRegKilled, SeenIntermediateUse))
4980 return false;
4981
4982 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4983 bool PostRA = !MRI.isSSA();
4984
4985 // We know that, the MI and DefMI both meet the pattern, and
4986 // the Imm also meet the requirement with the new Imm-form.
4987 // It is safe to do the transformation now.
4988 LLVM_DEBUG(dbgs() << "Replacing indexed instruction:\n");
4989 LLVM_DEBUG(MI.dump());
4990 LLVM_DEBUG(dbgs() << "Fed by:\n");
4991 LLVM_DEBUG(DefMI.dump());
4992
4993 // Update the base reg first.
4994 MI.getOperand(i: III.OpNoForForwarding).ChangeToRegister(Reg: RegMO->getReg(),
4995 isDef: false, isImp: false,
4996 isKill: RegMO->isKill());
4997
4998 // Then, update the imm.
4999 if (ImmMO->isImm()) {
5000 // If the ImmMO is Imm, change the operand that has ZERO to that Imm
5001 // directly.
5002 replaceInstrOperandWithImm(MI, OpNo: III.ZeroIsSpecialOrig, Imm);
5003 }
5004 else {
5005 // Otherwise, it is Constant Pool Index(CPI) or Global,
5006 // which is relocation in fact. We need to replace the special zero
5007 // register with ImmMO.
5008 // Before that, we need to fixup the target flags for imm.
5009 // For some reason, we miss to set the flag for the ImmMO if it is CPI.
5010 if (DefMI.getOpcode() == PPC::ADDItocL8)
5011 ImmMO->setTargetFlags(PPCII::MO_TOC_LO);
5012
5013 // MI didn't have the interface such as MI.setOperand(i) though
5014 // it has MI.getOperand(i). To repalce the ZERO MachineOperand with
5015 // ImmMO, we need to remove ZERO operand and all the operands behind it,
5016 // and, add the ImmMO, then, move back all the operands behind ZERO.
5017 SmallVector<MachineOperand, 2> MOps;
5018 for (unsigned i = MI.getNumOperands() - 1; i >= III.ZeroIsSpecialOrig; i--) {
5019 MOps.push_back(Elt: MI.getOperand(i));
5020 MI.removeOperand(OpNo: i);
5021 }
5022
5023 // Remove the last MO in the list, which is ZERO operand in fact.
5024 MOps.pop_back();
5025 // Add the imm operand.
5026 MI.addOperand(Op: *ImmMO);
5027 // Now add the rest back.
5028 for (auto &MO : MOps)
5029 MI.addOperand(Op: MO);
5030 }
5031
5032 // Update the opcode.
5033 MI.setDesc(get(Opcode: III.ImmOpcode));
5034
5035 if (PostRA)
5036 recomputeLivenessFlags(MBB&: *MI.getParent());
5037 LLVM_DEBUG(dbgs() << "With:\n");
5038 LLVM_DEBUG(MI.dump());
5039
5040 return true;
5041}
5042
5043bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI,
5044 const ImmInstrInfo &III,
5045 unsigned ConstantOpNo,
5046 MachineInstr &DefMI) const {
5047 // DefMI must be LI or LI8.
5048 if ((DefMI.getOpcode() != PPC::LI && DefMI.getOpcode() != PPC::LI8) ||
5049 !DefMI.getOperand(i: 1).isImm())
5050 return false;
5051
5052 // Get Imm operand and Sign-extend to 64-bits.
5053 int64_t Imm = SignExtend64<16>(x: DefMI.getOperand(i: 1).getImm());
5054
5055 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
5056 bool PostRA = !MRI.isSSA();
5057 // Exit early if we can't convert this.
5058 if ((ConstantOpNo != III.OpNoForForwarding) && !III.IsCommutative)
5059 return false;
5060 if (Imm % III.ImmMustBeMultipleOf)
5061 return false;
5062 if (III.TruncateImmTo)
5063 Imm &= ((1 << III.TruncateImmTo) - 1);
5064 if (III.SignedImm) {
5065 APInt ActualValue(64, Imm, true);
5066 if (!ActualValue.isSignedIntN(N: III.ImmWidth))
5067 return false;
5068 } else {
5069 uint64_t UnsignedMax = (1 << III.ImmWidth) - 1;
5070 if ((uint64_t)Imm > UnsignedMax)
5071 return false;
5072 }
5073
5074 // If we're post-RA, the instructions don't agree on whether register zero is
5075 // special, we can transform this as long as the register operand that will
5076 // end up in the location where zero is special isn't R0.
5077 if (PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) {
5078 unsigned PosForOrigZero = III.ZeroIsSpecialOrig ? III.ZeroIsSpecialOrig :
5079 III.ZeroIsSpecialNew + 1;
5080 Register OrigZeroReg = MI.getOperand(i: PosForOrigZero).getReg();
5081 Register NewZeroReg = MI.getOperand(i: III.ZeroIsSpecialNew).getReg();
5082 // If R0 is in the operand where zero is special for the new instruction,
5083 // it is unsafe to transform if the constant operand isn't that operand.
5084 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
5085 ConstantOpNo != III.ZeroIsSpecialNew)
5086 return false;
5087 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
5088 ConstantOpNo != PosForOrigZero)
5089 return false;
5090 }
5091
5092 unsigned Opc = MI.getOpcode();
5093 bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
5094 Opc == PPC::SRW || Opc == PPC::SRW_rec ||
5095 Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
5096 Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
5097 bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
5098 Opc == PPC::SRD || Opc == PPC::SRD_rec;
5099 bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
5100 Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
5101 bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
5102 Opc == PPC::SRD_rec;
5103
5104 LLVM_DEBUG(dbgs() << "Replacing reg+reg instruction: ");
5105 LLVM_DEBUG(MI.dump());
5106 LLVM_DEBUG(dbgs() << "Fed by load-immediate: ");
5107 LLVM_DEBUG(DefMI.dump());
5108 MI.setDesc(get(Opcode: III.ImmOpcode));
5109 if (ConstantOpNo == III.OpNoForForwarding) {
5110 // Converting shifts to immediate form is a bit tricky since they may do
5111 // one of three things:
5112 // 1. If the shift amount is between OpSize and 2*OpSize, the result is zero
5113 // 2. If the shift amount is zero, the result is unchanged (save for maybe
5114 // setting CR0)
5115 // 3. If the shift amount is in [1, OpSize), it's just a shift
5116 if (SpecialShift32 || SpecialShift64) {
5117 LoadImmediateInfo LII;
5118 LII.Imm = 0;
5119 LII.SetCR = SetCR;
5120 LII.Is64Bit = SpecialShift64;
5121 uint64_t ShAmt = Imm & (SpecialShift32 ? 0x1F : 0x3F);
5122 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5123 replaceInstrWithLI(MI, LII);
5124 // Shifts by zero don't change the value. If we don't need to set CR0,
5125 // just convert this to a COPY. Can't do this post-RA since we've already
5126 // cleaned up the copies.
5127 else if (!SetCR && ShAmt == 0 && !PostRA) {
5128 MI.removeOperand(OpNo: 2);
5129 MI.setDesc(get(Opcode: PPC::COPY));
5130 } else {
5131 // The 32 bit and 64 bit instructions are quite different.
5132 if (SpecialShift32) {
5133 // Left shifts use (N, 0, 31-N).
5134 // Right shifts use (32-N, N, 31) if 0 < N < 32.
5135 // use (0, 0, 31) if N == 0.
5136 uint64_t SH = ShAmt == 0 ? 0 : RightShift ? 32 - ShAmt : ShAmt;
5137 uint64_t MB = RightShift ? ShAmt : 0;
5138 uint64_t ME = RightShift ? 31 : 31 - ShAmt;
5139 replaceInstrOperandWithImm(MI, OpNo: III.OpNoForForwarding, Imm: SH);
5140 MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(Val: MB)
5141 .addImm(Val: ME);
5142 } else {
5143 // Left shifts use (N, 63-N).
5144 // Right shifts use (64-N, N) if 0 < N < 64.
5145 // use (0, 0) if N == 0.
5146 uint64_t SH = ShAmt == 0 ? 0 : RightShift ? 64 - ShAmt : ShAmt;
5147 uint64_t ME = RightShift ? ShAmt : 63 - ShAmt;
5148 replaceInstrOperandWithImm(MI, OpNo: III.OpNoForForwarding, Imm: SH);
5149 MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(Val: ME);
5150 }
5151 }
5152 } else
5153 replaceInstrOperandWithImm(MI, OpNo: ConstantOpNo, Imm);
5154 }
5155 // Convert commutative instructions (switch the operands and convert the
5156 // desired one to an immediate.
5157 else if (III.IsCommutative) {
5158 replaceInstrOperandWithImm(MI, OpNo: ConstantOpNo, Imm);
5159 swapMIOperands(MI, Op1: ConstantOpNo, Op2: III.OpNoForForwarding);
5160 } else
5161 llvm_unreachable("Should have exited early!");
5162
5163 // For instructions for which the constant register replaces a different
5164 // operand than where the immediate goes, we need to swap them.
5165 if (III.OpNoForForwarding != III.ImmOpNo)
5166 swapMIOperands(MI, Op1: III.OpNoForForwarding, Op2: III.ImmOpNo);
5167
5168 // If the special R0/X0 register index are different for original instruction
5169 // and new instruction, we need to fix up the register class in new
5170 // instruction.
5171 if (!PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) {
5172 if (III.ZeroIsSpecialNew) {
5173 // If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no
5174 // need to fix up register class.
5175 Register RegToModify = MI.getOperand(i: III.ZeroIsSpecialNew).getReg();
5176 if (RegToModify.isVirtual()) {
5177 const TargetRegisterClass *NewRC =
5178 MRI.getRegClass(Reg: RegToModify)->hasSuperClassEq(RC: &PPC::GPRCRegClass) ?
5179 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5180 MRI.setRegClass(Reg: RegToModify, RC: NewRC);
5181 }
5182 }
5183 }
5184
5185 if (PostRA)
5186 recomputeLivenessFlags(MBB&: *MI.getParent());
5187
5188 LLVM_DEBUG(dbgs() << "With: ");
5189 LLVM_DEBUG(MI.dump());
5190 LLVM_DEBUG(dbgs() << "\n");
5191 return true;
5192}
5193
5194const TargetRegisterClass *
5195PPCInstrInfo::updatedRC(const TargetRegisterClass *RC) const {
5196 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5197 return &PPC::VSRCRegClass;
5198 return RC;
5199}
5200
5201int PPCInstrInfo::getRecordFormOpcode(unsigned Opcode) {
5202 return PPC::getRecordFormOpcode(Opcode);
5203}
5204
5205static bool isOpZeroOfSubwordPreincLoad(int Opcode) {
5206 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5207 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5208 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5209 Opcode == PPC::LHZUX8);
5210}
5211
5212// This function checks for sign extension from 32 bits to 64 bits.
5213static bool definedBySignExtendingOp(const unsigned Reg,
5214 const MachineRegisterInfo *MRI) {
5215 if (!Register::isVirtualRegister(Reg))
5216 return false;
5217
5218 MachineInstr *MI = MRI->getVRegDef(Reg);
5219 if (!MI)
5220 return false;
5221
5222 int Opcode = MI->getOpcode();
5223 const PPCInstrInfo *TII =
5224 MI->getMF()->getSubtarget<PPCSubtarget>().getInstrInfo();
5225 if (TII->isSExt32To64(Opcode))
5226 return true;
5227
5228 // The first def of LBZU/LHZU is sign extended.
5229 if (isOpZeroOfSubwordPreincLoad(Opcode) && MI->getOperand(i: 0).getReg() == Reg)
5230 return true;
5231
5232 // RLDICL generates sign-extended output if it clears at least
5233 // 33 bits from the left (MSB).
5234 if (Opcode == PPC::RLDICL && MI->getOperand(i: 3).getImm() >= 33)
5235 return true;
5236
5237 // If at least one bit from left in a lower word is masked out,
5238 // all of 0 to 32-th bits of the output are cleared.
5239 // Hence the output is already sign extended.
5240 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5241 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5242 MI->getOperand(i: 3).getImm() > 0 &&
5243 MI->getOperand(i: 3).getImm() <= MI->getOperand(i: 4).getImm())
5244 return true;
5245
5246 // If the most significant bit of immediate in ANDIS is zero,
5247 // all of 0 to 32-th bits are cleared.
5248 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5249 uint16_t Imm = MI->getOperand(i: 2).getImm();
5250 if ((Imm & 0x8000) == 0)
5251 return true;
5252 }
5253
5254 return false;
5255}
5256
5257// This function checks the machine instruction that defines the input register
5258// Reg. If that machine instruction always outputs a value that has only zeros
5259// in the higher 32 bits then this function will return true.
5260static bool definedByZeroExtendingOp(const unsigned Reg,
5261 const MachineRegisterInfo *MRI) {
5262 if (!Register::isVirtualRegister(Reg))
5263 return false;
5264
5265 MachineInstr *MI = MRI->getVRegDef(Reg);
5266 if (!MI)
5267 return false;
5268
5269 int Opcode = MI->getOpcode();
5270 const PPCInstrInfo *TII =
5271 MI->getMF()->getSubtarget<PPCSubtarget>().getInstrInfo();
5272 if (TII->isZExt32To64(Opcode))
5273 return true;
5274
5275 // The first def of LBZU/LHZU/LWZU are zero extended.
5276 if ((isOpZeroOfSubwordPreincLoad(Opcode) || Opcode == PPC::LWZU ||
5277 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5278 MI->getOperand(i: 0).getReg() == Reg)
5279 return true;
5280
5281 // The 16-bit immediate is sign-extended in li/lis.
5282 // If the most significant bit is zero, all higher bits are zero.
5283 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5284 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5285 int64_t Imm = MI->getOperand(i: 1).getImm();
5286 if (((uint64_t)Imm & ~0x7FFFuLL) == 0)
5287 return true;
5288 }
5289
5290 // We have some variations of rotate-and-mask instructions
5291 // that clear higher 32-bits.
5292 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5293 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5294 Opcode == PPC::RLDICL_32_64) &&
5295 MI->getOperand(i: 3).getImm() >= 32)
5296 return true;
5297
5298 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5299 MI->getOperand(i: 3).getImm() >= 32 &&
5300 MI->getOperand(i: 3).getImm() <= 63 - MI->getOperand(i: 2).getImm())
5301 return true;
5302
5303 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5304 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5305 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5306 MI->getOperand(i: 3).getImm() <= MI->getOperand(i: 4).getImm())
5307 return true;
5308
5309 return false;
5310}
5311
5312// This function returns true if the input MachineInstr is a TOC save
5313// instruction.
5314bool PPCInstrInfo::isTOCSaveMI(const MachineInstr &MI) const {
5315 if (!MI.getOperand(i: 1).isImm() || !MI.getOperand(i: 2).isReg())
5316 return false;
5317 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5318 unsigned StackOffset = MI.getOperand(i: 1).getImm();
5319 Register StackReg = MI.getOperand(i: 2).getReg();
5320 Register SPReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
5321 if (StackReg == SPReg && StackOffset == TOCSaveOffset)
5322 return true;
5323
5324 return false;
5325}
5326
5327// We limit the max depth to track incoming values of PHIs or binary ops
5328// (e.g. AND) to avoid excessive cost.
5329const unsigned MAX_BINOP_DEPTH = 1;
5330
5331// This function will promote the instruction which defines the register `Reg`
5332// in the parameter from a 32-bit to a 64-bit instruction if needed. The logic
5333// used to check whether an instruction needs to be promoted or not is similar
5334// to the logic used to check whether or not a defined register is sign or zero
5335// extended within the function PPCInstrInfo::isSignOrZeroExtended.
5336// Additionally, the `promoteInstr32To64ForElimEXTSW` function is recursive.
5337// BinOpDepth does not count all of the recursions. The parameter BinOpDepth is
5338// incremented only when `promoteInstr32To64ForElimEXTSW` calls itself more
5339// than once. This is done to prevent exponential recursion.
5340void PPCInstrInfo::promoteInstr32To64ForElimEXTSW(const Register &Reg,
5341 MachineRegisterInfo *MRI,
5342 unsigned BinOpDepth,
5343 LiveVariables *LV) const {
5344 if (!Reg.isVirtual())
5345 return;
5346
5347 MachineInstr *MI = MRI->getVRegDef(Reg);
5348 if (!MI)
5349 return;
5350
5351 unsigned Opcode = MI->getOpcode();
5352
5353 switch (Opcode) {
5354 case PPC::OR:
5355 case PPC::ISEL:
5356 case PPC::OR8:
5357 case PPC::PHI: {
5358 if (BinOpDepth >= MAX_BINOP_DEPTH)
5359 break;
5360 unsigned OperandEnd = 3, OperandStride = 1;
5361 if (Opcode == PPC::PHI) {
5362 OperandEnd = MI->getNumOperands();
5363 OperandStride = 2;
5364 }
5365
5366 for (unsigned I = 1; I < OperandEnd; I += OperandStride) {
5367 assert(MI->getOperand(I).isReg() && "Operand must be register");
5368 promoteInstr32To64ForElimEXTSW(Reg: MI->getOperand(i: I).getReg(), MRI,
5369 BinOpDepth: BinOpDepth + 1, LV);
5370 }
5371
5372 break;
5373 }
5374 case PPC::COPY: {
5375 // Refers to the logic of the `case PPC::COPY` statement in the function
5376 // PPCInstrInfo::isSignOrZeroExtended().
5377
5378 Register SrcReg = MI->getOperand(i: 1).getReg();
5379 // In both ELFv1 and v2 ABI, method parameters and the return value
5380 // are sign- or zero-extended.
5381 const MachineFunction *MF = MI->getMF();
5382 if (!MF->getSubtarget<PPCSubtarget>().isSVR4ABI()) {
5383 // If this is a copy from another register, we recursively promote the
5384 // source.
5385 promoteInstr32To64ForElimEXTSW(Reg: SrcReg, MRI, BinOpDepth, LV);
5386 return;
5387 }
5388
5389 // From here on everything is SVR4ABI. COPY will be eliminated in the other
5390 // pass, we do not need promote the COPY pseudo opcode.
5391
5392 if (SrcReg != PPC::X3)
5393 // If this is a copy from another register, we recursively promote the
5394 // source.
5395 promoteInstr32To64ForElimEXTSW(Reg: SrcReg, MRI, BinOpDepth, LV);
5396 return;
5397 }
5398 case PPC::ORI:
5399 case PPC::XORI:
5400 case PPC::ORIS:
5401 case PPC::XORIS:
5402 case PPC::ORI8:
5403 case PPC::XORI8:
5404 case PPC::ORIS8:
5405 case PPC::XORIS8:
5406 promoteInstr32To64ForElimEXTSW(Reg: MI->getOperand(i: 1).getReg(), MRI, BinOpDepth,
5407 LV);
5408 break;
5409 case PPC::AND:
5410 case PPC::AND8:
5411 if (BinOpDepth >= MAX_BINOP_DEPTH)
5412 break;
5413
5414 promoteInstr32To64ForElimEXTSW(Reg: MI->getOperand(i: 1).getReg(), MRI,
5415 BinOpDepth: BinOpDepth + 1, LV);
5416 promoteInstr32To64ForElimEXTSW(Reg: MI->getOperand(i: 2).getReg(), MRI,
5417 BinOpDepth: BinOpDepth + 1, LV);
5418 break;
5419 }
5420
5421 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
5422 if (RC == &PPC::G8RCRegClass || RC == &PPC::G8RC_and_G8RC_NOX0RegClass)
5423 return;
5424
5425 const PPCInstrInfo *TII =
5426 MI->getMF()->getSubtarget<PPCSubtarget>().getInstrInfo();
5427
5428 // Map the 32bit to 64bit opcodes for instructions that are not signed or zero
5429 // extended themselves, but may have operands who's destination registers of
5430 // signed or zero extended instructions.
5431 std::unordered_map<unsigned, unsigned> OpcodeMap = {
5432 {PPC::OR, PPC::OR8}, {PPC::ISEL, PPC::ISEL8},
5433 {PPC::ORI, PPC::ORI8}, {PPC::XORI, PPC::XORI8},
5434 {PPC::ORIS, PPC::ORIS8}, {PPC::XORIS, PPC::XORIS8},
5435 {PPC::AND, PPC::AND8}};
5436
5437 int NewOpcode = -1;
5438 auto It = OpcodeMap.find(x: Opcode);
5439 if (It != OpcodeMap.end()) {
5440 // Set the new opcode to the mapped 64-bit version.
5441 NewOpcode = It->second;
5442 } else {
5443 if (!TII->isSExt32To64(Opcode))
5444 return;
5445
5446 // The TableGen function `get64BitInstrFromSignedExt32BitInstr` is used to
5447 // map the 32-bit instruction with the `SExt32To64` flag to the 64-bit
5448 // instruction with the same opcode.
5449 NewOpcode = PPC::get64BitInstrFromSignedExt32BitInstr(Opcode);
5450 }
5451
5452 assert(NewOpcode != -1 &&
5453 "Must have a 64-bit opcode to map the 32-bit opcode!");
5454
5455 const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
5456 const MCInstrDesc &MCID = TII->get(Opcode: NewOpcode);
5457 const TargetRegisterClass *NewRC =
5458 TRI->getRegClass(i: MCID.operands()[0].RegClass);
5459
5460 Register SrcReg = MI->getOperand(i: 0).getReg();
5461 const TargetRegisterClass *SrcRC = MRI->getRegClass(Reg: SrcReg);
5462
5463 // If the register class of the defined register in the 32-bit instruction
5464 // is the same as the register class of the defined register in the promoted
5465 // 64-bit instruction, we do not need to promote the instruction.
5466 if (NewRC == SrcRC)
5467 return;
5468
5469 DebugLoc DL = MI->getDebugLoc();
5470 auto MBB = MI->getParent();
5471
5472 // Since the pseudo-opcode of the instruction is promoted from 32-bit to
5473 // 64-bit, if the source reg class of the original instruction belongs to
5474 // PPC::GRCRegClass or PPC::GPRC_and_GPRC_NOR0RegClass, we need to promote
5475 // the operand to PPC::G8CRegClass or PPC::G8RC_and_G8RC_NOR0RegClass,
5476 // respectively.
5477 DenseMap<unsigned, Register> PromoteRegs;
5478 for (unsigned i = 1; i < MI->getNumOperands(); i++) {
5479 MachineOperand &Operand = MI->getOperand(i);
5480 if (!Operand.isReg())
5481 continue;
5482
5483 Register OperandReg = Operand.getReg();
5484 if (!OperandReg.isVirtual())
5485 continue;
5486
5487 const TargetRegisterClass *NewUsedRegRC =
5488 TRI->getRegClass(i: MCID.operands()[i].RegClass);
5489 const TargetRegisterClass *OrgRC = MRI->getRegClass(Reg: OperandReg);
5490 if (NewUsedRegRC != OrgRC && (OrgRC == &PPC::GPRCRegClass ||
5491 OrgRC == &PPC::GPRC_and_GPRC_NOR0RegClass)) {
5492 // Promote the used 32-bit register to 64-bit register.
5493 Register TmpReg = MRI->createVirtualRegister(RegClass: NewUsedRegRC);
5494 Register DstTmpReg = MRI->createVirtualRegister(RegClass: NewUsedRegRC);
5495 BuildMI(BB&: *MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: PPC::IMPLICIT_DEF), DestReg: TmpReg);
5496 BuildMI(BB&: *MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: PPC::INSERT_SUBREG), DestReg: DstTmpReg)
5497 .addReg(RegNo: TmpReg)
5498 .addReg(RegNo: OperandReg)
5499 .addImm(Val: PPC::sub_32);
5500 PromoteRegs[i] = DstTmpReg;
5501 }
5502 }
5503
5504 Register NewDefinedReg = MRI->createVirtualRegister(RegClass: NewRC);
5505
5506 BuildMI(BB&: *MBB, I: MI, MIMD: DL, MCID: TII->get(Opcode: NewOpcode), DestReg: NewDefinedReg);
5507 MachineBasicBlock::instr_iterator Iter(MI);
5508 --Iter;
5509 MachineInstrBuilder MIBuilder(*Iter->getMF(), Iter);
5510 for (unsigned i = 1; i < MI->getNumOperands(); i++) {
5511 if (auto It = PromoteRegs.find(Val: i); It != PromoteRegs.end())
5512 MIBuilder.addReg(RegNo: It->second, Flags: RegState::Kill);
5513 else
5514 Iter->addOperand(Op: MI->getOperand(i));
5515 }
5516
5517 for (unsigned i = 1; i < Iter->getNumOperands(); i++) {
5518 MachineOperand &Operand = Iter->getOperand(i);
5519 if (!Operand.isReg())
5520 continue;
5521 Register OperandReg = Operand.getReg();
5522 if (!OperandReg.isVirtual())
5523 continue;
5524 LV->recomputeForSingleDefVirtReg(Reg: OperandReg);
5525 }
5526
5527 MI->eraseFromParent();
5528
5529 // A defined register may be used by other instructions that are 32-bit.
5530 // After the defined register is promoted to 64-bit for the promoted
5531 // instruction, we need to demote the 64-bit defined register back to a
5532 // 32-bit register
5533 BuildMI(BB&: *MBB, I: ++Iter, MIMD: DL, MCID: TII->get(Opcode: PPC::COPY), DestReg: SrcReg)
5534 .addReg(RegNo: NewDefinedReg, Flags: RegState::Kill, SubReg: PPC::sub_32);
5535 LV->recomputeForSingleDefVirtReg(Reg: NewDefinedReg);
5536}
5537
5538// The isSignOrZeroExtended function is recursive. The parameter BinOpDepth
5539// does not count all of the recursions. The parameter BinOpDepth is incremented
5540// only when isSignOrZeroExtended calls itself more than once. This is done to
5541// prevent expontential recursion. There is no parameter to track linear
5542// recursion.
5543std::pair<bool, bool>
5544PPCInstrInfo::isSignOrZeroExtended(const unsigned Reg,
5545 const unsigned BinOpDepth,
5546 const MachineRegisterInfo *MRI) const {
5547 if (!Register::isVirtualRegister(Reg))
5548 return std::pair<bool, bool>(false, false);
5549
5550 MachineInstr *MI = MRI->getVRegDef(Reg);
5551 if (!MI)
5552 return std::pair<bool, bool>(false, false);
5553
5554 bool IsSExt = definedBySignExtendingOp(Reg, MRI);
5555 bool IsZExt = definedByZeroExtendingOp(Reg, MRI);
5556
5557 // If we know the instruction always returns sign- and zero-extended result,
5558 // return here.
5559 if (IsSExt && IsZExt)
5560 return std::pair<bool, bool>(IsSExt, IsZExt);
5561
5562 switch (MI->getOpcode()) {
5563 case PPC::COPY: {
5564 Register SrcReg = MI->getOperand(i: 1).getReg();
5565
5566 // In both ELFv1 and v2 ABI, method parameters and the return value
5567 // are sign- or zero-extended.
5568 const MachineFunction *MF = MI->getMF();
5569
5570 if (!MF->getSubtarget<PPCSubtarget>().isSVR4ABI()) {
5571 // If this is a copy from another register, we recursively check source.
5572 auto SrcExt = isSignOrZeroExtended(Reg: SrcReg, BinOpDepth, MRI);
5573 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5574 SrcExt.second || IsZExt);
5575 }
5576
5577 // From here on everything is SVR4ABI
5578 const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
5579 // We check the ZExt/SExt flags for a method parameter.
5580 if (MI->getParent()->getBasicBlock() ==
5581 &MF->getFunction().getEntryBlock()) {
5582 Register VReg = MI->getOperand(i: 0).getReg();
5583 if (MF->getRegInfo().isLiveIn(Reg: VReg)) {
5584 IsSExt |= FuncInfo->isLiveInSExt(VReg);
5585 IsZExt |= FuncInfo->isLiveInZExt(VReg);
5586 return std::pair<bool, bool>(IsSExt, IsZExt);
5587 }
5588 }
5589
5590 if (SrcReg != PPC::X3) {
5591 // If this is a copy from another register, we recursively check source.
5592 auto SrcExt = isSignOrZeroExtended(Reg: SrcReg, BinOpDepth, MRI);
5593 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5594 SrcExt.second || IsZExt);
5595 }
5596
5597 // For a method return value, we check the ZExt/SExt flags in attribute.
5598 // We assume the following code sequence for method call.
5599 // ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1
5600 // BL8_NOP @func,...
5601 // ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1
5602 // %5 = COPY %x3; G8RC:%5
5603 const MachineBasicBlock *MBB = MI->getParent();
5604 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5605 MachineBasicBlock::const_instr_iterator II =
5606 MachineBasicBlock::const_instr_iterator(MI);
5607 if (II == MBB->instr_begin() || (--II)->getOpcode() != PPC::ADJCALLSTACKUP)
5608 return IsExtendPair;
5609
5610 const MachineInstr &CallMI = *(--II);
5611 if (!CallMI.isCall() || !CallMI.getOperand(i: 0).isGlobal())
5612 return IsExtendPair;
5613
5614 const Function *CalleeFn =
5615 dyn_cast_if_present<Function>(Val: CallMI.getOperand(i: 0).getGlobal());
5616 if (!CalleeFn)
5617 return IsExtendPair;
5618 const IntegerType *IntTy = dyn_cast<IntegerType>(Val: CalleeFn->getReturnType());
5619 if (IntTy && IntTy->getBitWidth() <= 32) {
5620 const AttributeSet &Attrs = CalleeFn->getAttributes().getRetAttrs();
5621 IsSExt |= Attrs.hasAttribute(Kind: Attribute::SExt);
5622 IsZExt |= Attrs.hasAttribute(Kind: Attribute::ZExt);
5623 return std::pair<bool, bool>(IsSExt, IsZExt);
5624 }
5625
5626 return IsExtendPair;
5627 }
5628
5629 // OR, XOR with 16-bit immediate does not change the upper 48 bits.
5630 // So, we track the operand register as we do for register copy.
5631 case PPC::ORI:
5632 case PPC::XORI:
5633 case PPC::ORI8:
5634 case PPC::XORI8: {
5635 Register SrcReg = MI->getOperand(i: 1).getReg();
5636 auto SrcExt = isSignOrZeroExtended(Reg: SrcReg, BinOpDepth, MRI);
5637 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5638 SrcExt.second || IsZExt);
5639 }
5640
5641 // OR, XOR with shifted 16-bit immediate does not change the upper
5642 // 32 bits. So, we track the operand register for zero extension.
5643 // For sign extension when the MSB of the immediate is zero, we also
5644 // track the operand register since the upper 33 bits are unchanged.
5645 case PPC::ORIS:
5646 case PPC::XORIS:
5647 case PPC::ORIS8:
5648 case PPC::XORIS8: {
5649 Register SrcReg = MI->getOperand(i: 1).getReg();
5650 auto SrcExt = isSignOrZeroExtended(Reg: SrcReg, BinOpDepth, MRI);
5651 uint16_t Imm = MI->getOperand(i: 2).getImm();
5652 if (Imm & 0x8000)
5653 return std::pair<bool, bool>(false, SrcExt.second || IsZExt);
5654 else
5655 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5656 SrcExt.second || IsZExt);
5657 }
5658
5659 // If all incoming values are sign-/zero-extended,
5660 // the output of OR, ISEL or PHI is also sign-/zero-extended.
5661 case PPC::OR:
5662 case PPC::OR8:
5663 case PPC::ISEL:
5664 case PPC::PHI: {
5665 if (BinOpDepth >= MAX_BINOP_DEPTH)
5666 return std::pair<bool, bool>(false, false);
5667
5668 // The input registers for PHI are operand 1, 3, ...
5669 // The input registers for others are operand 1 and 2.
5670 unsigned OperandEnd = 3, OperandStride = 1;
5671 if (MI->getOpcode() == PPC::PHI) {
5672 OperandEnd = MI->getNumOperands();
5673 OperandStride = 2;
5674 }
5675
5676 IsSExt = true;
5677 IsZExt = true;
5678 for (unsigned I = 1; I != OperandEnd; I += OperandStride) {
5679 if (!MI->getOperand(i: I).isReg())
5680 return std::pair<bool, bool>(false, false);
5681
5682 Register SrcReg = MI->getOperand(i: I).getReg();
5683 auto SrcExt = isSignOrZeroExtended(Reg: SrcReg, BinOpDepth: BinOpDepth + 1, MRI);
5684 IsSExt &= SrcExt.first;
5685 IsZExt &= SrcExt.second;
5686 }
5687 return std::pair<bool, bool>(IsSExt, IsZExt);
5688 }
5689
5690 // If at least one of the incoming values of an AND is zero extended
5691 // then the output is also zero-extended. If both of the incoming values
5692 // are sign-extended then the output is also sign extended.
5693 case PPC::AND:
5694 case PPC::AND8: {
5695 if (BinOpDepth >= MAX_BINOP_DEPTH)
5696 return std::pair<bool, bool>(false, false);
5697
5698 Register SrcReg1 = MI->getOperand(i: 1).getReg();
5699 Register SrcReg2 = MI->getOperand(i: 2).getReg();
5700 auto Src1Ext = isSignOrZeroExtended(Reg: SrcReg1, BinOpDepth: BinOpDepth + 1, MRI);
5701 auto Src2Ext = isSignOrZeroExtended(Reg: SrcReg2, BinOpDepth: BinOpDepth + 1, MRI);
5702 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5703 Src1Ext.second || Src2Ext.second);
5704 }
5705
5706 default:
5707 break;
5708 }
5709 return std::pair<bool, bool>(IsSExt, IsZExt);
5710}
5711
5712bool PPCInstrInfo::isBDNZ(unsigned Opcode) const {
5713 return (Opcode == (Subtarget.isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5714}
5715
5716namespace {
5717class PPCPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
5718 MachineInstr *Loop, *EndLoop, *LoopCount;
5719 MachineFunction *MF;
5720 const TargetInstrInfo *TII;
5721 int64_t TripCount;
5722
5723public:
5724 PPCPipelinerLoopInfo(MachineInstr *Loop, MachineInstr *EndLoop,
5725 MachineInstr *LoopCount)
5726 : Loop(Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5727 MF(Loop->getParent()->getParent()),
5728 TII(MF->getSubtarget().getInstrInfo()) {
5729 // Inspect the Loop instruction up-front, as it may be deleted when we call
5730 // createTripCountGreaterCondition.
5731 if (LoopCount->getOpcode() == PPC::LI8 || LoopCount->getOpcode() == PPC::LI)
5732 TripCount = LoopCount->getOperand(i: 1).getImm();
5733 else
5734 TripCount = -1;
5735 }
5736
5737 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
5738 // Only ignore the terminator.
5739 return MI == EndLoop;
5740 }
5741
5742 std::optional<bool> createTripCountGreaterCondition(
5743 int TC, MachineBasicBlock &MBB,
5744 SmallVectorImpl<MachineOperand> &Cond) override {
5745 if (TripCount == -1) {
5746 // Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1,
5747 // so we don't need to generate any thing here.
5748 Cond.push_back(Elt: MachineOperand::CreateImm(Val: 0));
5749 Cond.push_back(Elt: MachineOperand::CreateReg(
5750 Reg: MF->getSubtarget<PPCSubtarget>().isPPC64() ? PPC::CTR8 : PPC::CTR,
5751 isDef: true));
5752 return {};
5753 }
5754
5755 return TripCount > TC;
5756 }
5757
5758 void setPreheader(MachineBasicBlock *NewPreheader) override {
5759 // Do nothing. We want the LOOP setup instruction to stay in the *old*
5760 // preheader, so we can use BDZ in the prologs to adapt the loop trip count.
5761 }
5762
5763 void adjustTripCount(int TripCountAdjust) override {
5764 // If the loop trip count is a compile-time value, then just change the
5765 // value.
5766 if (LoopCount->getOpcode() == PPC::LI8 ||
5767 LoopCount->getOpcode() == PPC::LI) {
5768 int64_t TripCount = LoopCount->getOperand(i: 1).getImm() + TripCountAdjust;
5769 LoopCount->getOperand(i: 1).setImm(TripCount);
5770 return;
5771 }
5772
5773 // Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1,
5774 // so we don't need to generate any thing here.
5775 }
5776
5777 void disposed(LiveIntervals *LIS) override {
5778 if (LIS) {
5779 LIS->RemoveMachineInstrFromMaps(MI&: *Loop);
5780 LIS->RemoveMachineInstrFromMaps(MI&: *LoopCount);
5781 }
5782 Loop->eraseFromParent();
5783 // Ensure the loop setup instruction is deleted too.
5784 LoopCount->eraseFromParent();
5785 }
5786};
5787} // namespace
5788
5789std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5790PPCInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
5791 // We really "analyze" only hardware loops right now.
5792 MachineBasicBlock::iterator I = LoopBB->getFirstTerminator();
5793 MachineBasicBlock *Preheader = *LoopBB->pred_begin();
5794 if (Preheader == LoopBB)
5795 Preheader = *std::next(x: LoopBB->pred_begin());
5796 MachineFunction *MF = Preheader->getParent();
5797
5798 if (I != LoopBB->end() && isBDNZ(Opcode: I->getOpcode())) {
5799 SmallPtrSet<MachineBasicBlock *, 8> Visited;
5800 if (MachineInstr *LoopInst = findLoopInstr(PreHeader&: *Preheader, Visited)) {
5801 Register LoopCountReg = LoopInst->getOperand(i: 0).getReg();
5802 MachineRegisterInfo &MRI = MF->getRegInfo();
5803 MachineInstr *LoopCount = MRI.getUniqueVRegDef(Reg: LoopCountReg);
5804 return std::make_unique<PPCPipelinerLoopInfo>(args&: LoopInst, args: &*I, args&: LoopCount);
5805 }
5806 }
5807 return nullptr;
5808}
5809
5810MachineInstr *PPCInstrInfo::findLoopInstr(
5811 MachineBasicBlock &PreHeader,
5812 SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
5813
5814 unsigned LOOPi = (Subtarget.isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5815
5816 // The loop set-up instruction should be in preheader
5817 for (auto &I : PreHeader.instrs())
5818 if (I.getOpcode() == LOOPi)
5819 return &I;
5820 return nullptr;
5821}
5822
5823// Return true if get the base operand, byte offset of an instruction and the
5824// memory width. Width is the size of memory that is being loaded/stored.
5825bool PPCInstrInfo::getMemOperandWithOffsetWidth(
5826 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
5827 LocationSize &Width, const TargetRegisterInfo *TRI) const {
5828 if (!LdSt.mayLoadOrStore() || LdSt.getNumExplicitOperands() != 3)
5829 return false;
5830
5831 // Handle only loads/stores with base register followed by immediate offset.
5832 if (!LdSt.getOperand(i: 1).isImm() ||
5833 (!LdSt.getOperand(i: 2).isReg() && !LdSt.getOperand(i: 2).isFI()))
5834 return false;
5835
5836 if (!LdSt.hasOneMemOperand())
5837 return false;
5838
5839 Width = (*LdSt.memoperands_begin())->getSize();
5840 Offset = LdSt.getOperand(i: 1).getImm();
5841 BaseReg = &LdSt.getOperand(i: 2);
5842 return true;
5843}
5844
5845bool PPCInstrInfo::areMemAccessesTriviallyDisjoint(
5846 const MachineInstr &MIa, const MachineInstr &MIb) const {
5847 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
5848 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
5849
5850 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
5851 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
5852 return false;
5853
5854 // Retrieve the base register, offset from the base register and width. Width
5855 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
5856 // base registers are identical, and the offset of a lower memory access +
5857 // the width doesn't overlap the offset of a higher memory access,
5858 // then the memory accesses are different.
5859 const TargetRegisterInfo *TRI = &getRegisterInfo();
5860 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
5861 int64_t OffsetA = 0, OffsetB = 0;
5862 LocationSize WidthA = LocationSize::precise(Value: 0),
5863 WidthB = LocationSize::precise(Value: 0);
5864 if (getMemOperandWithOffsetWidth(LdSt: MIa, BaseReg&: BaseOpA, Offset&: OffsetA, Width&: WidthA, TRI) &&
5865 getMemOperandWithOffsetWidth(LdSt: MIb, BaseReg&: BaseOpB, Offset&: OffsetB, Width&: WidthB, TRI)) {
5866 if (BaseOpA->isIdenticalTo(Other: *BaseOpB)) {
5867 int LowOffset = std::min(a: OffsetA, b: OffsetB);
5868 int HighOffset = std::max(a: OffsetA, b: OffsetB);
5869 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5870 if (LowWidth.hasValue() &&
5871 LowOffset + (int)LowWidth.getValue() <= HighOffset)
5872 return true;
5873 }
5874 }
5875 return false;
5876}
5877