1 | //===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the PowerPC implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H |
14 | #define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H |
15 | |
16 | #include "MCTargetDesc/PPCMCTargetDesc.h" |
17 | #include "PPC.h" |
18 | #include "PPCRegisterInfo.h" |
19 | #include "llvm/ADT/SmallSet.h" |
20 | #include "llvm/CodeGen/TargetInstrInfo.h" |
21 | |
22 | #define |
23 | #include "PPCGenInstrInfo.inc" |
24 | |
25 | namespace llvm { |
26 | |
27 | // Instructions that have an immediate form might be convertible to that |
28 | // form if the correct input is a result of a load immediate. In order to |
29 | // know whether the transformation is special, we might need to know some |
30 | // of the details of the two forms. |
31 | struct ImmInstrInfo { |
32 | // Is the immediate field in the immediate form signed or unsigned? |
33 | uint64_t SignedImm : 1; |
34 | // Does the immediate need to be a multiple of some value? |
35 | uint64_t ImmMustBeMultipleOf : 5; |
36 | // Is R0/X0 treated specially by the original r+r instruction? |
37 | // If so, in which operand? |
38 | uint64_t ZeroIsSpecialOrig : 3; |
39 | // Is R0/X0 treated specially by the new r+i instruction? |
40 | // If so, in which operand? |
41 | uint64_t ZeroIsSpecialNew : 3; |
42 | // Is the operation commutative? |
43 | uint64_t IsCommutative : 1; |
44 | // The operand number to check for add-immediate def. |
45 | uint64_t OpNoForForwarding : 3; |
46 | // The operand number for the immediate. |
47 | uint64_t ImmOpNo : 3; |
48 | // The opcode of the new instruction. |
49 | uint64_t ImmOpcode : 16; |
50 | // The size of the immediate. |
51 | uint64_t ImmWidth : 5; |
52 | // The immediate should be truncated to N bits. |
53 | uint64_t TruncateImmTo : 5; |
54 | // Is the instruction summing the operand |
55 | uint64_t IsSummingOperands : 1; |
56 | }; |
57 | |
58 | // Information required to convert an instruction to just a materialized |
59 | // immediate. |
60 | struct LoadImmediateInfo { |
61 | unsigned Imm : 16; |
62 | unsigned Is64Bit : 1; |
63 | unsigned SetCR : 1; |
64 | }; |
65 | |
66 | // Index into the OpcodesForSpill array. |
67 | enum SpillOpcodeKey { |
68 | SOK_Int4Spill, |
69 | SOK_Int8Spill, |
70 | SOK_Float8Spill, |
71 | SOK_Float4Spill, |
72 | SOK_CRSpill, |
73 | SOK_CRBitSpill, |
74 | SOK_VRVectorSpill, |
75 | SOK_VSXVectorSpill, |
76 | SOK_VectorFloat8Spill, |
77 | SOK_VectorFloat4Spill, |
78 | SOK_SpillToVSR, |
79 | SOK_PairedVecSpill, |
80 | SOK_AccumulatorSpill, |
81 | SOK_UAccumulatorSpill, |
82 | SOK_WAccumulatorSpill, |
83 | SOK_SPESpill, |
84 | SOK_PairedG8Spill, |
85 | SOK_LastOpcodeSpill // This must be last on the enum. |
86 | }; |
87 | |
88 | // PPC MachineCombiner patterns |
89 | enum PPCMachineCombinerPattern : unsigned { |
90 | // These are patterns matched by the PowerPC to reassociate FMA chains. |
91 | REASSOC_XY_AMM_BMM = MachineCombinerPattern::TARGET_PATTERN_START, |
92 | REASSOC_XMM_AMM_BMM, |
93 | |
94 | // These are patterns matched by the PowerPC to reassociate FMA and FSUB to |
95 | // reduce register pressure. |
96 | REASSOC_XY_BCA, |
97 | REASSOC_XY_BAC, |
98 | |
99 | }; |
100 | |
101 | // Define list of load and store spill opcodes. |
102 | #define NoInstr PPC::INSTRUCTION_LIST_END |
103 | #define Pwr8LoadOpcodes \ |
104 | { \ |
105 | PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ |
106 | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX, \ |
107 | PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVLDD, \ |
108 | PPC::RESTORE_QUADWORD \ |
109 | } |
110 | |
111 | #define Pwr9LoadOpcodes \ |
112 | { \ |
113 | PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ |
114 | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \ |
115 | PPC::DFLOADf32, PPC::SPILLTOVSR_LD, NoInstr, NoInstr, NoInstr, \ |
116 | NoInstr, NoInstr, PPC::RESTORE_QUADWORD \ |
117 | } |
118 | |
119 | #define Pwr10LoadOpcodes \ |
120 | { \ |
121 | PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ |
122 | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \ |
123 | PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \ |
124 | PPC::RESTORE_UACC, NoInstr, NoInstr, PPC::RESTORE_QUADWORD \ |
125 | } |
126 | |
127 | #define FutureLoadOpcodes \ |
128 | { \ |
129 | PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR, \ |
130 | PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, \ |
131 | PPC::DFLOADf32, PPC::SPILLTOVSR_LD, PPC::LXVP, PPC::RESTORE_ACC, \ |
132 | PPC::RESTORE_UACC, PPC::RESTORE_WACC, NoInstr, PPC::RESTORE_QUADWORD \ |
133 | } |
134 | |
135 | #define Pwr8StoreOpcodes \ |
136 | { \ |
137 | PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ |
138 | PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX, \ |
139 | PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, PPC::EVSTDD, \ |
140 | PPC::SPILL_QUADWORD \ |
141 | } |
142 | |
143 | #define Pwr9StoreOpcodes \ |
144 | { \ |
145 | PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ |
146 | PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \ |
147 | PPC::SPILLTOVSR_ST, NoInstr, NoInstr, NoInstr, NoInstr, NoInstr, \ |
148 | PPC::SPILL_QUADWORD \ |
149 | } |
150 | |
151 | #define Pwr10StoreOpcodes \ |
152 | { \ |
153 | PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ |
154 | PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \ |
155 | PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \ |
156 | NoInstr, NoInstr, PPC::SPILL_QUADWORD \ |
157 | } |
158 | |
159 | #define FutureStoreOpcodes \ |
160 | { \ |
161 | PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR, PPC::SPILL_CRBIT, \ |
162 | PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32, \ |
163 | PPC::SPILLTOVSR_ST, PPC::STXVP, PPC::SPILL_ACC, PPC::SPILL_UACC, \ |
164 | PPC::SPILL_WACC, NoInstr, PPC::SPILL_QUADWORD \ |
165 | } |
166 | |
167 | // Initialize arrays for load and store spill opcodes on supported subtargets. |
168 | #define StoreOpcodesForSpill \ |
169 | { Pwr8StoreOpcodes, Pwr9StoreOpcodes, Pwr10StoreOpcodes, FutureStoreOpcodes } |
170 | #define LoadOpcodesForSpill \ |
171 | { Pwr8LoadOpcodes, Pwr9LoadOpcodes, Pwr10LoadOpcodes, FutureLoadOpcodes } |
172 | |
173 | class PPCSubtarget; |
174 | class PPCInstrInfo : public PPCGenInstrInfo { |
175 | PPCSubtarget &Subtarget; |
176 | const PPCRegisterInfo RI; |
177 | const unsigned StoreSpillOpcodesArray[4][SOK_LastOpcodeSpill] = |
178 | StoreOpcodesForSpill; |
179 | const unsigned LoadSpillOpcodesArray[4][SOK_LastOpcodeSpill] = |
180 | LoadOpcodesForSpill; |
181 | |
182 | void StoreRegToStackSlot(MachineFunction &MF, unsigned SrcReg, bool isKill, |
183 | int FrameIdx, const TargetRegisterClass *RC, |
184 | SmallVectorImpl<MachineInstr *> &NewMIs) const; |
185 | void LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL, |
186 | unsigned DestReg, int FrameIdx, |
187 | const TargetRegisterClass *RC, |
188 | SmallVectorImpl<MachineInstr *> &NewMIs) const; |
189 | |
190 | // Replace the instruction with single LI if possible. \p DefMI must be LI or |
191 | // LI8. |
192 | bool simplifyToLI(MachineInstr &MI, MachineInstr &DefMI, |
193 | unsigned OpNoForForwarding, MachineInstr **KilledDef) const; |
194 | // If the inst is imm-form and its register operand is produced by a ADDI, put |
195 | // the imm into the inst directly and remove the ADDI if possible. |
196 | bool transformToNewImmFormFedByAdd(MachineInstr &MI, MachineInstr &DefMI, |
197 | unsigned OpNoForForwarding) const; |
198 | // If the inst is x-form and has imm-form and one of its operand is produced |
199 | // by a LI, put the imm into the inst directly and remove the LI if possible. |
200 | bool transformToImmFormFedByLI(MachineInstr &MI, const ImmInstrInfo &III, |
201 | unsigned ConstantOpNo, |
202 | MachineInstr &DefMI) const; |
203 | // If the inst is x-form and has imm-form and one of its operand is produced |
204 | // by an add-immediate, try to transform it when possible. |
205 | bool transformToImmFormFedByAdd(MachineInstr &MI, const ImmInstrInfo &III, |
206 | unsigned ConstantOpNo, MachineInstr &DefMI, |
207 | bool KillDefMI) const; |
208 | // Try to find that, if the instruction 'MI' contains any operand that |
209 | // could be forwarded from some inst that feeds it. If yes, return the |
210 | // Def of that operand. And OpNoForForwarding is the operand index in |
211 | // the 'MI' for that 'Def'. If we see another use of this Def between |
212 | // the Def and the MI, SeenIntermediateUse becomes 'true'. |
213 | MachineInstr *getForwardingDefMI(MachineInstr &MI, |
214 | unsigned &OpNoForForwarding, |
215 | bool &SeenIntermediateUse) const; |
216 | |
217 | // Can the user MI have it's source at index \p OpNoForForwarding |
218 | // forwarded from an add-immediate that feeds it? |
219 | bool isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III, |
220 | unsigned OpNoForForwarding) const; |
221 | bool isDefMIElgibleForForwarding(MachineInstr &DefMI, |
222 | const ImmInstrInfo &III, |
223 | MachineOperand *&ImmMO, |
224 | MachineOperand *&RegMO) const; |
225 | bool isImmElgibleForForwarding(const MachineOperand &ImmMO, |
226 | const MachineInstr &DefMI, |
227 | const ImmInstrInfo &III, |
228 | int64_t &Imm, |
229 | int64_t BaseImm = 0) const; |
230 | bool isRegElgibleForForwarding(const MachineOperand &RegMO, |
231 | const MachineInstr &DefMI, |
232 | const MachineInstr &MI, bool KillDefMI, |
233 | bool &IsFwdFeederRegKilled, |
234 | bool &SeenIntermediateUse) const; |
235 | unsigned getSpillTarget() const; |
236 | ArrayRef<unsigned> getStoreOpcodesForSpillArray() const; |
237 | ArrayRef<unsigned> getLoadOpcodesForSpillArray() const; |
238 | unsigned getSpillIndex(const TargetRegisterClass *RC) const; |
239 | int16_t getFMAOpIdxInfo(unsigned Opcode) const; |
240 | void reassociateFMA(MachineInstr &Root, unsigned Pattern, |
241 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
242 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
243 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; |
244 | Register |
245 | generateLoadForNewConst(unsigned Idx, MachineInstr *MI, Type *Ty, |
246 | SmallVectorImpl<MachineInstr *> &InsInstrs) const; |
247 | virtual void anchor(); |
248 | |
249 | protected: |
250 | /// Commutes the operands in the given instruction. |
251 | /// The commutable operands are specified by their indices OpIdx1 and OpIdx2. |
252 | /// |
253 | /// Do not call this method for a non-commutable instruction or for |
254 | /// non-commutable pair of operand indices OpIdx1 and OpIdx2. |
255 | /// Even though the instruction is commutable, the method may still |
256 | /// fail to commute the operands, null pointer is returned in such cases. |
257 | /// |
258 | /// For example, we can commute rlwimi instructions, but only if the |
259 | /// rotate amt is zero. We also have to munge the immediates a bit. |
260 | MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
261 | unsigned OpIdx1, |
262 | unsigned OpIdx2) const override; |
263 | |
264 | public: |
265 | explicit PPCInstrInfo(PPCSubtarget &STI); |
266 | |
267 | bool isLoadFromConstantPool(MachineInstr *I) const; |
268 | const Constant *getConstantFromConstantPool(MachineInstr *I) const; |
269 | |
270 | /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As |
271 | /// such, whenever a client has an instance of instruction info, it should |
272 | /// always be able to get register info as well (through this method). |
273 | /// |
274 | const PPCRegisterInfo &getRegisterInfo() const { return RI; } |
275 | |
276 | bool isXFormMemOp(unsigned Opcode) const { |
277 | return get(Opcode).TSFlags & PPCII::XFormMemOp; |
278 | } |
279 | bool isPrefixed(unsigned Opcode) const { |
280 | return get(Opcode).TSFlags & PPCII::Prefixed; |
281 | } |
282 | bool isSExt32To64(unsigned Opcode) const { |
283 | return get(Opcode).TSFlags & PPCII::SExt32To64; |
284 | } |
285 | bool isZExt32To64(unsigned Opcode) const { |
286 | return get(Opcode).TSFlags & PPCII::ZExt32To64; |
287 | } |
288 | |
289 | static bool isSameClassPhysRegCopy(unsigned Opcode) { |
290 | unsigned CopyOpcodes[] = {PPC::OR, PPC::OR8, PPC::FMR, |
291 | PPC::VOR, PPC::XXLOR, PPC::XXLORf, |
292 | PPC::XSCPSGNDP, PPC::MCRF, PPC::CROR, |
293 | PPC::EVOR, -1U}; |
294 | for (int i = 0; CopyOpcodes[i] != -1U; i++) |
295 | if (Opcode == CopyOpcodes[i]) |
296 | return true; |
297 | return false; |
298 | } |
299 | |
300 | static bool hasPCRelFlag(unsigned TF) { |
301 | return TF == PPCII::MO_PCREL_FLAG || TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG || |
302 | TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG || |
303 | TF == PPCII::MO_GOT_TPREL_PCREL_FLAG || |
304 | TF == PPCII::MO_TPREL_PCREL_FLAG || TF == PPCII::MO_TLS_PCREL_FLAG || |
305 | TF == PPCII::MO_GOT_PCREL_FLAG; |
306 | } |
307 | |
308 | static bool hasGOTFlag(unsigned TF) { |
309 | return TF == PPCII::MO_GOT_FLAG || TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG || |
310 | TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG || |
311 | TF == PPCII::MO_GOT_TPREL_PCREL_FLAG || |
312 | TF == PPCII::MO_GOT_PCREL_FLAG; |
313 | } |
314 | |
315 | static bool hasTLSFlag(unsigned TF) { |
316 | return TF == PPCII::MO_TLSGD_FLAG || TF == PPCII::MO_TPREL_FLAG || |
317 | TF == PPCII::MO_TLSLD_FLAG || TF == PPCII::MO_TLSGDM_FLAG || |
318 | TF == PPCII::MO_GOT_TLSGD_PCREL_FLAG || |
319 | TF == PPCII::MO_GOT_TLSLD_PCREL_FLAG || |
320 | TF == PPCII::MO_GOT_TPREL_PCREL_FLAG || TF == PPCII::MO_TPREL_LO || |
321 | TF == PPCII::MO_TPREL_HA || TF == PPCII::MO_DTPREL_LO || |
322 | TF == PPCII::MO_TLSLD_LO || TF == PPCII::MO_TLS || |
323 | TF == PPCII::MO_TPREL_PCREL_FLAG || TF == PPCII::MO_TLS_PCREL_FLAG; |
324 | } |
325 | |
326 | ScheduleHazardRecognizer * |
327 | CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, |
328 | const ScheduleDAG *DAG) const override; |
329 | ScheduleHazardRecognizer * |
330 | CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, |
331 | const ScheduleDAG *DAG) const override; |
332 | |
333 | unsigned getInstrLatency(const InstrItineraryData *ItinData, |
334 | const MachineInstr &MI, |
335 | unsigned *PredCost = nullptr) const override; |
336 | |
337 | std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData, |
338 | const MachineInstr &DefMI, |
339 | unsigned DefIdx, |
340 | const MachineInstr &UseMI, |
341 | unsigned UseIdx) const override; |
342 | std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData, |
343 | SDNode *DefNode, unsigned DefIdx, |
344 | SDNode *UseNode, |
345 | unsigned UseIdx) const override { |
346 | return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx, |
347 | UseNode, UseIdx); |
348 | } |
349 | |
350 | bool hasLowDefLatency(const TargetSchedModel &SchedModel, |
351 | const MachineInstr &DefMI, |
352 | unsigned DefIdx) const override { |
353 | // Machine LICM should hoist all instructions in low-register-pressure |
354 | // situations; none are sufficiently free to justify leaving in a loop |
355 | // body. |
356 | return false; |
357 | } |
358 | |
359 | bool useMachineCombiner() const override { |
360 | return true; |
361 | } |
362 | |
363 | /// When getMachineCombinerPatterns() finds patterns, this function generates |
364 | /// the instructions that could replace the original code sequence |
365 | void genAlternativeCodeSequence( |
366 | MachineInstr &Root, unsigned Pattern, |
367 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
368 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
369 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override; |
370 | |
371 | /// Return true when there is potentially a faster code sequence for a fma |
372 | /// chain ending in \p Root. All potential patterns are output in the \p |
373 | /// P array. |
374 | bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns, |
375 | bool DoRegPressureReduce) const; |
376 | |
377 | CombinerObjective getCombinerObjective(unsigned Pattern) const override; |
378 | |
379 | /// Return true when there is potentially a faster code sequence |
380 | /// for an instruction chain ending in <Root>. All potential patterns are |
381 | /// output in the <Pattern> array. |
382 | bool getMachineCombinerPatterns(MachineInstr &Root, |
383 | SmallVectorImpl<unsigned> &Patterns, |
384 | bool DoRegPressureReduce) const override; |
385 | |
386 | /// On PowerPC, we leverage machine combiner pass to reduce register pressure |
387 | /// when the register pressure is high for one BB. |
388 | /// Return true if register pressure for \p MBB is high and ABI is supported |
389 | /// to reduce register pressure. Otherwise return false. |
390 | bool shouldReduceRegisterPressure( |
391 | const MachineBasicBlock *MBB, |
392 | const RegisterClassInfo *RegClassInfo) const override; |
393 | |
394 | /// Fixup the placeholders we put in genAlternativeCodeSequence() for |
395 | /// MachineCombiner. |
396 | void |
397 | finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, |
398 | SmallVectorImpl<MachineInstr *> &InsInstrs) const override; |
399 | |
400 | bool isAssociativeAndCommutative(const MachineInstr &Inst, |
401 | bool Invert) const override; |
402 | |
403 | /// On PowerPC, we try to reassociate FMA chain which will increase |
404 | /// instruction size. Set extension resource length limit to 1 for edge case. |
405 | /// Resource Length is calculated by scaled resource usage in getCycles(). |
406 | /// Because of the division in getCycles(), it returns different cycles due to |
407 | /// legacy scaled resource usage. So new resource length may be same with |
408 | /// legacy or 1 bigger than legacy. |
409 | /// We need to execlude the 1 bigger case even the resource length is not |
410 | /// perserved for more FMA chain reassociations on PowerPC. |
411 | int getExtendResourceLenLimit() const override { return 1; } |
412 | |
413 | // PowerPC specific version of setSpecialOperandAttr that copies Flags to MI |
414 | // and clears nuw, nsw, and exact flags. |
415 | using TargetInstrInfo::setSpecialOperandAttr; |
416 | void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const; |
417 | |
418 | bool isCoalescableExtInstr(const MachineInstr &MI, |
419 | Register &SrcReg, Register &DstReg, |
420 | unsigned &SubIdx) const override; |
421 | Register isLoadFromStackSlot(const MachineInstr &MI, |
422 | int &FrameIndex) const override; |
423 | bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; |
424 | Register isStoreToStackSlot(const MachineInstr &MI, |
425 | int &FrameIndex) const override; |
426 | |
427 | bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, |
428 | unsigned &SrcOpIdx2) const override; |
429 | |
430 | void insertNoop(MachineBasicBlock &MBB, |
431 | MachineBasicBlock::iterator MI) const override; |
432 | |
433 | |
434 | // Branch analysis. |
435 | bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
436 | MachineBasicBlock *&FBB, |
437 | SmallVectorImpl<MachineOperand> &Cond, |
438 | bool AllowModify) const override; |
439 | unsigned removeBranch(MachineBasicBlock &MBB, |
440 | int *BytesRemoved = nullptr) const override; |
441 | unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, |
442 | MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, |
443 | const DebugLoc &DL, |
444 | int *BytesAdded = nullptr) const override; |
445 | |
446 | // Select analysis. |
447 | bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond, |
448 | Register, Register, Register, int &, int &, |
449 | int &) const override; |
450 | void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
451 | const DebugLoc &DL, Register DstReg, |
452 | ArrayRef<MachineOperand> Cond, Register TrueReg, |
453 | Register FalseReg) const override; |
454 | |
455 | void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, |
456 | const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, |
457 | bool KillSrc) const override; |
458 | |
459 | void storeRegToStackSlot(MachineBasicBlock &MBB, |
460 | MachineBasicBlock::iterator MBBI, Register SrcReg, |
461 | bool isKill, int FrameIndex, |
462 | const TargetRegisterClass *RC, |
463 | const TargetRegisterInfo *TRI, |
464 | Register VReg) const override; |
465 | |
466 | // Emits a register spill without updating the register class for vector |
467 | // registers. This ensures that when we spill a vector register the |
468 | // element order in the register is the same as it was in memory. |
469 | void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, |
470 | MachineBasicBlock::iterator MBBI, |
471 | unsigned SrcReg, bool isKill, int FrameIndex, |
472 | const TargetRegisterClass *RC, |
473 | const TargetRegisterInfo *TRI) const; |
474 | |
475 | void loadRegFromStackSlot(MachineBasicBlock &MBB, |
476 | MachineBasicBlock::iterator MBBI, Register DestReg, |
477 | int FrameIndex, const TargetRegisterClass *RC, |
478 | const TargetRegisterInfo *TRI, |
479 | Register VReg) const override; |
480 | |
481 | // Emits a register reload without updating the register class for vector |
482 | // registers. This ensures that when we reload a vector register the |
483 | // element order in the register is the same as it was in memory. |
484 | void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, |
485 | MachineBasicBlock::iterator MBBI, |
486 | unsigned DestReg, int FrameIndex, |
487 | const TargetRegisterClass *RC, |
488 | const TargetRegisterInfo *TRI) const; |
489 | |
490 | unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const; |
491 | |
492 | unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const; |
493 | |
494 | bool |
495 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; |
496 | |
497 | bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, |
498 | MachineRegisterInfo *MRI) const override; |
499 | |
500 | bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
501 | Register Reg) const; |
502 | |
503 | // If conversion by predication (only supported by some branch instructions). |
504 | // All of the profitability checks always return true; it is always |
505 | // profitable to use the predicated branches. |
506 | bool isProfitableToIfCvt(MachineBasicBlock &MBB, |
507 | unsigned NumCycles, unsigned , |
508 | BranchProbability Probability) const override { |
509 | return true; |
510 | } |
511 | |
512 | bool isProfitableToIfCvt(MachineBasicBlock &TMBB, |
513 | unsigned NumT, unsigned , |
514 | MachineBasicBlock &FMBB, |
515 | unsigned NumF, unsigned , |
516 | BranchProbability Probability) const override; |
517 | |
518 | bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, |
519 | BranchProbability Probability) const override { |
520 | return true; |
521 | } |
522 | |
523 | bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, |
524 | MachineBasicBlock &FMBB) const override { |
525 | return false; |
526 | } |
527 | |
528 | // Predication support. |
529 | bool isPredicated(const MachineInstr &MI) const override; |
530 | |
531 | bool isSchedulingBoundary(const MachineInstr &MI, |
532 | const MachineBasicBlock *MBB, |
533 | const MachineFunction &MF) const override; |
534 | |
535 | bool PredicateInstruction(MachineInstr &MI, |
536 | ArrayRef<MachineOperand> Pred) const override; |
537 | |
538 | bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, |
539 | ArrayRef<MachineOperand> Pred2) const override; |
540 | |
541 | bool ClobbersPredicate(MachineInstr &MI, std::vector<MachineOperand> &Pred, |
542 | bool SkipDead) const override; |
543 | |
544 | // Comparison optimization. |
545 | |
546 | bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
547 | Register &SrcReg2, int64_t &Mask, |
548 | int64_t &Value) const override; |
549 | |
550 | bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
551 | Register SrcReg2, int64_t Mask, int64_t Value, |
552 | const MachineRegisterInfo *MRI) const override; |
553 | |
554 | |
555 | /// Return true if get the base operand, byte offset of an instruction and |
556 | /// the memory width. Width is the size of memory that is being |
557 | /// loaded/stored (e.g. 1, 2, 4, 8). |
558 | bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, |
559 | const MachineOperand *&BaseOp, |
560 | int64_t &Offset, LocationSize &Width, |
561 | const TargetRegisterInfo *TRI) const; |
562 | |
563 | bool optimizeCmpPostRA(MachineInstr &MI) const; |
564 | |
565 | /// Get the base operand and byte offset of an instruction that reads/writes |
566 | /// memory. |
567 | bool getMemOperandsWithOffsetWidth( |
568 | const MachineInstr &LdSt, |
569 | SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset, |
570 | bool &OffsetIsScalable, LocationSize &Width, |
571 | const TargetRegisterInfo *TRI) const override; |
572 | |
573 | /// Returns true if the two given memory operations should be scheduled |
574 | /// adjacent. |
575 | bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, |
576 | int64_t Offset1, bool OffsetIsScalable1, |
577 | ArrayRef<const MachineOperand *> BaseOps2, |
578 | int64_t Offset2, bool OffsetIsScalable2, |
579 | unsigned ClusterSize, |
580 | unsigned NumBytes) const override; |
581 | |
582 | /// Return true if two MIs access different memory addresses and false |
583 | /// otherwise |
584 | bool |
585 | areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, |
586 | const MachineInstr &MIb) const override; |
587 | |
588 | /// GetInstSize - Return the number of bytes of code the specified |
589 | /// instruction may be. This returns the maximum number of bytes. |
590 | /// |
591 | unsigned getInstSizeInBytes(const MachineInstr &MI) const override; |
592 | |
593 | MCInst getNop() const override; |
594 | |
595 | std::pair<unsigned, unsigned> |
596 | decomposeMachineOperandsTargetFlags(unsigned TF) const override; |
597 | |
598 | ArrayRef<std::pair<unsigned, const char *>> |
599 | getSerializableDirectMachineOperandTargetFlags() const override; |
600 | |
601 | // Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction. |
602 | bool expandVSXMemPseudo(MachineInstr &MI) const; |
603 | |
604 | // Lower pseudo instructions after register allocation. |
605 | bool expandPostRAPseudo(MachineInstr &MI) const override; |
606 | |
607 | const TargetRegisterClass *updatedRC(const TargetRegisterClass *RC) const; |
608 | static int getRecordFormOpcode(unsigned Opcode); |
609 | |
610 | bool isTOCSaveMI(const MachineInstr &MI) const; |
611 | |
612 | std::pair<bool, bool> |
613 | isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, |
614 | const MachineRegisterInfo *MRI) const; |
615 | |
616 | // Return true if the register is sign-extended from 32 to 64 bits. |
617 | bool isSignExtended(const unsigned Reg, |
618 | const MachineRegisterInfo *MRI) const { |
619 | return isSignOrZeroExtended(Reg, BinOpDepth: 0, MRI).first; |
620 | } |
621 | |
622 | // Return true if the register is zero-extended from 32 to 64 bits. |
623 | bool isZeroExtended(const unsigned Reg, |
624 | const MachineRegisterInfo *MRI) const { |
625 | return isSignOrZeroExtended(Reg, BinOpDepth: 0, MRI).second; |
626 | } |
627 | |
628 | bool convertToImmediateForm(MachineInstr &MI, |
629 | SmallSet<Register, 4> &RegsToUpdate, |
630 | MachineInstr **KilledDef = nullptr) const; |
631 | bool foldFrameOffset(MachineInstr &MI) const; |
632 | bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase = nullptr) const; |
633 | bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const; |
634 | bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const; |
635 | bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, |
636 | unsigned &XFormOpcode, |
637 | int64_t &OffsetOfImmInstr, |
638 | ImmInstrInfo &III) const; |
639 | bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, |
640 | MachineInstr *&ADDIMI, int64_t &OffsetAddi, |
641 | int64_t OffsetImm) const; |
642 | |
643 | void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const; |
644 | void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, |
645 | int64_t Imm) const; |
646 | |
647 | bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, |
648 | bool PostRA) const; |
649 | |
650 | // In PostRA phase, try to find instruction defines \p Reg before \p MI. |
651 | // \p SeenIntermediate is set to true if uses between DefMI and \p MI exist. |
652 | MachineInstr *getDefMIPostRA(unsigned Reg, MachineInstr &MI, |
653 | bool &SeenIntermediateUse) const; |
654 | |
655 | // Materialize immediate after RA. |
656 | void materializeImmPostRA(MachineBasicBlock &MBB, |
657 | MachineBasicBlock::iterator MBBI, |
658 | const DebugLoc &DL, Register Reg, |
659 | int64_t Imm) const; |
660 | |
661 | /// Check \p Opcode is BDNZ (Decrement CTR and branch if it is still nonzero). |
662 | bool isBDNZ(unsigned Opcode) const; |
663 | |
664 | /// Find the hardware loop instruction used to set-up the specified loop. |
665 | /// On PPC, we have two instructions used to set-up the hardware loop |
666 | /// (MTCTRloop, MTCTR8loop) with corresponding endloop (BDNZ, BDNZ8) |
667 | /// instructions to indicate the end of a loop. |
668 | MachineInstr * |
669 | findLoopInstr(MachineBasicBlock &, |
670 | SmallPtrSet<MachineBasicBlock *, 8> &Visited) const; |
671 | |
672 | /// Analyze loop L, which must be a single-basic-block loop, and if the |
673 | /// conditions can be understood enough produce a PipelinerLoopInfo object. |
674 | std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> |
675 | analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override; |
676 | }; |
677 | |
678 | } |
679 | |
680 | #endif |
681 | |