1 | //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the X86 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_LIB_TARGET_X86_X86INSTRINFO_H |
14 | #define LLVM_LIB_TARGET_X86_X86INSTRINFO_H |
15 | |
16 | #include "MCTargetDesc/X86BaseInfo.h" |
17 | #include "X86InstrFMA3Info.h" |
18 | #include "X86RegisterInfo.h" |
19 | #include "llvm/CodeGen/ISDOpcodes.h" |
20 | #include "llvm/CodeGen/TargetInstrInfo.h" |
21 | #include <vector> |
22 | |
23 | #define |
24 | #include "X86GenInstrInfo.inc" |
25 | |
26 | namespace llvm { |
27 | class X86Subtarget; |
28 | |
29 | // X86 MachineCombiner patterns |
30 | enum X86MachineCombinerPattern : unsigned { |
31 | // X86 VNNI |
32 | DPWSSD = MachineCombinerPattern::TARGET_PATTERN_START, |
33 | }; |
34 | |
35 | namespace X86 { |
36 | |
37 | enum { |
38 | // For instr that was compressed from EVEX to LEGACY. |
39 | AC_EVEX_2_LEGACY = MachineInstr::TAsmComments, |
40 | // For instr that was compressed from EVEX to VEX. |
41 | AC_EVEX_2_VEX = AC_EVEX_2_LEGACY << 1, |
42 | // For instr that was compressed from EVEX to EVEX. |
43 | AC_EVEX_2_EVEX = AC_EVEX_2_VEX << 1 |
44 | }; |
45 | |
46 | /// Return a pair of condition code for the given predicate and whether |
47 | /// the instruction operands should be swaped to match the condition code. |
48 | std::pair<CondCode, bool> getX86ConditionCode(CmpInst::Predicate Predicate); |
49 | |
50 | /// Return a cmov opcode for the given register size in bytes, and operand type. |
51 | unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand = false, |
52 | bool HasNDD = false); |
53 | |
54 | /// Return the source operand # for condition code by \p MCID. If the |
55 | /// instruction doesn't have a condition code, return -1. |
56 | int getCondSrcNoFromDesc(const MCInstrDesc &MCID); |
57 | |
58 | /// Return the condition code of the instruction. If the instruction doesn't |
59 | /// have a condition code, return X86::COND_INVALID. |
60 | CondCode getCondFromMI(const MachineInstr &MI); |
61 | |
62 | // Turn JCC instruction into condition code. |
63 | CondCode getCondFromBranch(const MachineInstr &MI); |
64 | |
65 | // Turn SETCC instruction into condition code. |
66 | CondCode getCondFromSETCC(const MachineInstr &MI); |
67 | |
68 | // Turn CMOV instruction into condition code. |
69 | CondCode getCondFromCMov(const MachineInstr &MI); |
70 | |
71 | // Turn CFCMOV instruction into condition code. |
72 | CondCode getCondFromCFCMov(const MachineInstr &MI); |
73 | |
74 | // Turn CCMP instruction into condition code. |
75 | CondCode getCondFromCCMP(const MachineInstr &MI); |
76 | |
77 | // Turn condition code into condition flags for CCMP/CTEST. |
78 | int getCCMPCondFlagsFromCondCode(CondCode CC); |
79 | |
80 | // Get the opcode of corresponding NF variant. |
81 | unsigned getNFVariant(unsigned Opc); |
82 | |
83 | // Get the opcode of corresponding NonND variant. |
84 | unsigned getNonNDVariant(unsigned Opc); |
85 | |
86 | /// GetOppositeBranchCondition - Return the inverse of the specified cond, |
87 | /// e.g. turning COND_E to COND_NE. |
88 | CondCode GetOppositeBranchCondition(CondCode CC); |
89 | |
90 | /// Get the VPCMP immediate for the given condition. |
91 | unsigned getVPCMPImmForCond(ISD::CondCode CC); |
92 | |
93 | /// Get the VPCMP immediate if the opcodes are swapped. |
94 | unsigned getSwappedVPCMPImm(unsigned Imm); |
95 | |
96 | /// Get the VPCOM immediate if the opcodes are swapped. |
97 | unsigned getSwappedVPCOMImm(unsigned Imm); |
98 | |
99 | /// Get the VCMP immediate if the opcodes are swapped. |
100 | unsigned getSwappedVCMPImm(unsigned Imm); |
101 | |
102 | /// Get the width of the vector register operand. |
103 | unsigned getVectorRegisterWidth(const MCOperandInfo &Info); |
104 | |
105 | /// Check if the instruction is X87 instruction. |
106 | bool isX87Instruction(MachineInstr &MI); |
107 | |
108 | /// Return the index of the instruction's first address operand, if it has a |
109 | /// memory reference, or -1 if it has none. Unlike X86II::getMemoryOperandNo(), |
110 | /// this also works for both pseudo instructions (e.g., TCRETURNmi) as well as |
111 | /// real instructions (e.g., JMP64m). |
112 | int getFirstAddrOperandIdx(const MachineInstr &MI); |
113 | |
114 | /// Find any constant pool entry associated with a specific instruction operand. |
115 | const Constant *getConstantFromPool(const MachineInstr &MI, unsigned OpNo); |
116 | |
117 | } // namespace X86 |
118 | |
119 | /// isGlobalStubReference - Return true if the specified TargetFlag operand is |
120 | /// a reference to a stub for a global, not the global itself. |
121 | inline static bool isGlobalStubReference(unsigned char TargetFlag) { |
122 | switch (TargetFlag) { |
123 | case X86II::MO_DLLIMPORT: // dllimport stub. |
124 | case X86II::MO_GOTPCREL: // rip-relative GOT reference. |
125 | case X86II::MO_GOTPCREL_NORELAX: // rip-relative GOT reference. |
126 | case X86II::MO_GOT: // normal GOT reference. |
127 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. |
128 | case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. |
129 | case X86II::MO_COFFSTUB: // COFF .refptr stub. |
130 | return true; |
131 | default: |
132 | return false; |
133 | } |
134 | } |
135 | |
136 | /// isGlobalRelativeToPICBase - Return true if the specified global value |
137 | /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this |
138 | /// is true, the addressing mode has the PIC base register added in (e.g. EBX). |
139 | inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) { |
140 | switch (TargetFlag) { |
141 | case X86II::MO_GOTOFF: // isPICStyleGOT: local global. |
142 | case X86II::MO_GOT: // isPICStyleGOT: other global. |
143 | case X86II::MO_PIC_BASE_OFFSET: // Darwin local global. |
144 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global. |
145 | case X86II::MO_TLVP: // ??? Pretty sure.. |
146 | return true; |
147 | default: |
148 | return false; |
149 | } |
150 | } |
151 | |
152 | inline static bool isScale(const MachineOperand &MO) { |
153 | return MO.isImm() && (MO.getImm() == 1 || MO.getImm() == 2 || |
154 | MO.getImm() == 4 || MO.getImm() == 8); |
155 | } |
156 | |
157 | inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) { |
158 | if (MI.getOperand(i: Op).isFI()) |
159 | return true; |
160 | return Op + X86::AddrSegmentReg <= MI.getNumOperands() && |
161 | MI.getOperand(i: Op + X86::AddrBaseReg).isReg() && |
162 | isScale(MO: MI.getOperand(i: Op + X86::AddrScaleAmt)) && |
163 | MI.getOperand(i: Op + X86::AddrIndexReg).isReg() && |
164 | (MI.getOperand(i: Op + X86::AddrDisp).isImm() || |
165 | MI.getOperand(i: Op + X86::AddrDisp).isGlobal() || |
166 | MI.getOperand(i: Op + X86::AddrDisp).isCPI() || |
167 | MI.getOperand(i: Op + X86::AddrDisp).isJTI()); |
168 | } |
169 | |
170 | inline static bool isMem(const MachineInstr &MI, unsigned Op) { |
171 | if (MI.getOperand(i: Op).isFI()) |
172 | return true; |
173 | return Op + X86::AddrNumOperands <= MI.getNumOperands() && |
174 | MI.getOperand(i: Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op); |
175 | } |
176 | |
177 | class X86InstrInfo final : public X86GenInstrInfo { |
178 | X86Subtarget &Subtarget; |
179 | const X86RegisterInfo RI; |
180 | |
181 | virtual void anchor(); |
182 | |
183 | bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
184 | MachineBasicBlock *&FBB, |
185 | SmallVectorImpl<MachineOperand> &Cond, |
186 | SmallVectorImpl<MachineInstr *> &CondBranches, |
187 | bool AllowModify) const; |
188 | |
189 | bool foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI, Register Reg, |
190 | int64_t ImmVal, MachineRegisterInfo *MRI, |
191 | bool MakeChange) const; |
192 | |
193 | public: |
194 | explicit X86InstrInfo(X86Subtarget &STI); |
195 | |
196 | /// Given a machine instruction descriptor, returns the register |
197 | /// class constraint for OpNum, or NULL. Returned register class |
198 | /// may be different from the definition in the TD file, e.g. |
199 | /// GR*RegClass (definition in TD file) |
200 | /// -> |
201 | /// GR*_NOREX2RegClass (Returned register class) |
202 | const TargetRegisterClass * |
203 | getRegClass(const MCInstrDesc &MCID, unsigned OpNum, |
204 | const TargetRegisterInfo *TRI, |
205 | const MachineFunction &MF) const override; |
206 | |
207 | /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As |
208 | /// such, whenever a client has an instance of instruction info, it should |
209 | /// always be able to get register info as well (through this method). |
210 | /// |
211 | const X86RegisterInfo &getRegisterInfo() const { return RI; } |
212 | |
213 | /// Returns the stack pointer adjustment that happens inside the frame |
214 | /// setup..destroy sequence (e.g. by pushes, or inside the callee). |
215 | int64_t getFrameAdjustment(const MachineInstr &I) const { |
216 | assert(isFrameInstr(I)); |
217 | if (isFrameSetup(I)) |
218 | return I.getOperand(i: 2).getImm(); |
219 | return I.getOperand(i: 1).getImm(); |
220 | } |
221 | |
222 | /// Sets the stack pointer adjustment made inside the frame made up by this |
223 | /// instruction. |
224 | void setFrameAdjustment(MachineInstr &I, int64_t V) const { |
225 | assert(isFrameInstr(I)); |
226 | if (isFrameSetup(I)) |
227 | I.getOperand(i: 2).setImm(V); |
228 | else |
229 | I.getOperand(i: 1).setImm(V); |
230 | } |
231 | |
232 | /// getSPAdjust - This returns the stack pointer adjustment made by |
233 | /// this instruction. For x86, we need to handle more complex call |
234 | /// sequences involving PUSHes. |
235 | int getSPAdjust(const MachineInstr &MI) const override; |
236 | |
237 | /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" |
238 | /// extension instruction. That is, it's like a copy where it's legal for the |
239 | /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns |
240 | /// true, then it's expected the pre-extension value is available as a subreg |
241 | /// of the result register. This also returns the sub-register index in |
242 | /// SubIdx. |
243 | bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, |
244 | Register &DstReg, unsigned &SubIdx) const override; |
245 | |
246 | /// Returns true if the instruction has no behavior (specified or otherwise) |
247 | /// that is based on the value of any of its register operands |
248 | /// |
249 | /// Instructions are considered data invariant even if they set EFLAGS. |
250 | /// |
251 | /// A classical example of something that is inherently not data invariant is |
252 | /// an indirect jump -- the destination is loaded into icache based on the |
253 | /// bits set in the jump destination register. |
254 | /// |
255 | /// FIXME: This should become part of our instruction tables. |
256 | static bool isDataInvariant(MachineInstr &MI); |
257 | |
258 | /// Returns true if the instruction has no behavior (specified or otherwise) |
259 | /// that is based on the value loaded from memory or the value of any |
260 | /// non-address register operands. |
261 | /// |
262 | /// For example, if the latency of the instruction is dependent on the |
263 | /// particular bits set in any of the registers *or* any of the bits loaded |
264 | /// from memory. |
265 | /// |
266 | /// Instructions are considered data invariant even if they set EFLAGS. |
267 | /// |
268 | /// A classical example of something that is inherently not data invariant is |
269 | /// an indirect jump -- the destination is loaded into icache based on the |
270 | /// bits set in the jump destination register. |
271 | /// |
272 | /// FIXME: This should become part of our instruction tables. |
273 | static bool isDataInvariantLoad(MachineInstr &MI); |
274 | |
275 | Register isLoadFromStackSlot(const MachineInstr &MI, |
276 | int &FrameIndex) const override; |
277 | Register isLoadFromStackSlot(const MachineInstr &MI, |
278 | int &FrameIndex, |
279 | unsigned &MemBytes) const override; |
280 | /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination |
281 | /// stack locations as well. This uses a heuristic so it isn't |
282 | /// reliable for correctness. |
283 | Register isLoadFromStackSlotPostFE(const MachineInstr &MI, |
284 | int &FrameIndex) const override; |
285 | |
286 | Register isStoreToStackSlot(const MachineInstr &MI, |
287 | int &FrameIndex) const override; |
288 | Register isStoreToStackSlot(const MachineInstr &MI, |
289 | int &FrameIndex, |
290 | unsigned &MemBytes) const override; |
291 | /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination |
292 | /// stack locations as well. This uses a heuristic so it isn't |
293 | /// reliable for correctness. |
294 | Register isStoreToStackSlotPostFE(const MachineInstr &MI, |
295 | int &FrameIndex) const override; |
296 | |
297 | bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; |
298 | void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
299 | Register DestReg, unsigned SubIdx, |
300 | const MachineInstr &Orig, |
301 | const TargetRegisterInfo &TRI) const override; |
302 | |
303 | /// Given an operand within a MachineInstr, insert preceding code to put it |
304 | /// into the right format for a particular kind of LEA instruction. This may |
305 | /// involve using an appropriate super-register instead (with an implicit use |
306 | /// of the original) or creating a new virtual register and inserting COPY |
307 | /// instructions to get the data into the right class. |
308 | /// |
309 | /// Reference parameters are set to indicate how caller should add this |
310 | /// operand to the LEA instruction. |
311 | bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, |
312 | unsigned LEAOpcode, bool AllowSP, Register &NewSrc, |
313 | bool &isKill, MachineOperand &ImplicitOp, |
314 | LiveVariables *LV, LiveIntervals *LIS) const; |
315 | |
316 | /// convertToThreeAddress - This method must be implemented by targets that |
317 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
318 | /// may be able to convert a two-address instruction into a true |
319 | /// three-address instruction on demand. This allows the X86 target (for |
320 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
321 | /// would require register copies due to two-addressness. |
322 | /// |
323 | /// This method returns a null pointer if the transformation cannot be |
324 | /// performed, otherwise it returns the new instruction. |
325 | /// |
326 | MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, |
327 | LiveIntervals *LIS) const override; |
328 | |
329 | /// Returns true iff the routine could find two commutable operands in the |
330 | /// given machine instruction. |
331 | /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their |
332 | /// input values can be re-defined in this method only if the input values |
333 | /// are not pre-defined, which is designated by the special value |
334 | /// 'CommuteAnyOperandIndex' assigned to it. |
335 | /// If both of indices are pre-defined and refer to some operands, then the |
336 | /// method simply returns true if the corresponding operands are commutable |
337 | /// and returns false otherwise. |
338 | /// |
339 | /// For example, calling this method this way: |
340 | /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; |
341 | /// findCommutedOpIndices(MI, Op1, Op2); |
342 | /// can be interpreted as a query asking to find an operand that would be |
343 | /// commutable with the operand#1. |
344 | bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, |
345 | unsigned &SrcOpIdx2) const override; |
346 | |
347 | /// Returns true if we have preference on the operands order in MI, the |
348 | /// commute decision is returned in Commute. |
349 | bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override; |
350 | |
351 | /// Returns an adjusted FMA opcode that must be used in FMA instruction that |
352 | /// performs the same computations as the given \p MI but which has the |
353 | /// operands \p SrcOpIdx1 and \p SrcOpIdx2 commuted. |
354 | /// It may return 0 if it is unsafe to commute the operands. |
355 | /// Note that a machine instruction (instead of its opcode) is passed as the |
356 | /// first parameter to make it possible to analyze the instruction's uses and |
357 | /// commute the first operand of FMA even when it seems unsafe when you look |
358 | /// at the opcode. For example, it is Ok to commute the first operand of |
359 | /// VFMADD*SD_Int, if ONLY the lowest 64-bit element of the result is used. |
360 | /// |
361 | /// The returned FMA opcode may differ from the opcode in the given \p MI. |
362 | /// For example, commuting the operands #1 and #3 in the following FMA |
363 | /// FMA213 #1, #2, #3 |
364 | /// results into instruction with adjusted opcode: |
365 | /// FMA231 #3, #2, #1 |
366 | unsigned |
367 | getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, |
368 | unsigned SrcOpIdx2, |
369 | const X86InstrFMA3Group &FMA3Group) const; |
370 | |
371 | // Branch analysis. |
372 | bool isUnconditionalTailCall(const MachineInstr &MI) const override; |
373 | bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, |
374 | const MachineInstr &TailCall) const override; |
375 | void replaceBranchWithTailCall(MachineBasicBlock &MBB, |
376 | SmallVectorImpl<MachineOperand> &Cond, |
377 | const MachineInstr &TailCall) const override; |
378 | |
379 | bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
380 | MachineBasicBlock *&FBB, |
381 | SmallVectorImpl<MachineOperand> &Cond, |
382 | bool AllowModify) const override; |
383 | |
384 | int getJumpTableIndex(const MachineInstr &MI) const override; |
385 | |
386 | std::optional<ExtAddrMode> |
387 | getAddrModeFromMemoryOp(const MachineInstr &MemI, |
388 | const TargetRegisterInfo *TRI) const override; |
389 | |
390 | bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, |
391 | int64_t &ImmVal) const override; |
392 | |
393 | bool preservesZeroValueInReg(const MachineInstr *MI, |
394 | const Register NullValueReg, |
395 | const TargetRegisterInfo *TRI) const override; |
396 | |
397 | bool getMemOperandsWithOffsetWidth( |
398 | const MachineInstr &LdSt, |
399 | SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset, |
400 | bool &OffsetIsScalable, LocationSize &Width, |
401 | const TargetRegisterInfo *TRI) const override; |
402 | bool analyzeBranchPredicate(MachineBasicBlock &MBB, |
403 | TargetInstrInfo::MachineBranchPredicate &MBP, |
404 | bool AllowModify = false) const override; |
405 | |
406 | unsigned removeBranch(MachineBasicBlock &MBB, |
407 | int *BytesRemoved = nullptr) const override; |
408 | unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, |
409 | MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, |
410 | const DebugLoc &DL, |
411 | int *BytesAdded = nullptr) const override; |
412 | bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond, |
413 | Register, Register, Register, int &, int &, |
414 | int &) const override; |
415 | void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
416 | const DebugLoc &DL, Register DstReg, |
417 | ArrayRef<MachineOperand> Cond, Register TrueReg, |
418 | Register FalseReg) const override; |
419 | void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
420 | const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, |
421 | bool KillSrc) const override; |
422 | void storeRegToStackSlot(MachineBasicBlock &MBB, |
423 | MachineBasicBlock::iterator MI, Register SrcReg, |
424 | bool isKill, int FrameIndex, |
425 | const TargetRegisterClass *RC, |
426 | const TargetRegisterInfo *TRI, |
427 | Register VReg) const override; |
428 | |
429 | void loadRegFromStackSlot(MachineBasicBlock &MBB, |
430 | MachineBasicBlock::iterator MI, Register DestReg, |
431 | int FrameIndex, const TargetRegisterClass *RC, |
432 | const TargetRegisterInfo *TRI, |
433 | Register VReg) const override; |
434 | |
435 | void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
436 | unsigned Opc, Register Reg, int FrameIdx, |
437 | bool isKill = false) const; |
438 | |
439 | bool expandPostRAPseudo(MachineInstr &MI) const override; |
440 | |
441 | /// Check whether the target can fold a load that feeds a subreg operand |
442 | /// (or a subreg operand that feeds a store). |
443 | bool isSubregFoldable() const override { return true; } |
444 | |
445 | /// Fold a load or store of the specified stack slot into the specified |
446 | /// machine instruction for the specified operand(s). If folding happens, it |
447 | /// is likely that the referenced instruction has been changed. |
448 | /// |
449 | /// \returns true on success. |
450 | MachineInstr * |
451 | foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
452 | ArrayRef<unsigned> Ops, |
453 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
454 | LiveIntervals *LIS = nullptr, |
455 | VirtRegMap *VRM = nullptr) const override; |
456 | |
457 | /// Same as the previous version except it allows folding of any load and |
458 | /// store from / to any address, not just from a specific stack slot. |
459 | MachineInstr *foldMemoryOperandImpl( |
460 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
461 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
462 | LiveIntervals *LIS = nullptr) const override; |
463 | |
464 | bool |
465 | unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, |
466 | bool UnfoldLoad, bool UnfoldStore, |
467 | SmallVectorImpl<MachineInstr *> &NewMIs) const override; |
468 | |
469 | bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
470 | SmallVectorImpl<SDNode *> &NewNodes) const override; |
471 | |
472 | unsigned |
473 | getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, |
474 | unsigned *LoadRegIndex = nullptr) const override; |
475 | |
476 | bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, |
477 | int64_t &Offset2) const override; |
478 | |
479 | /// Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to |
480 | /// make it capable of identifying ENDBR intructions and prevent it from being |
481 | /// re-scheduled. |
482 | bool isSchedulingBoundary(const MachineInstr &MI, |
483 | const MachineBasicBlock *MBB, |
484 | const MachineFunction &MF) const override; |
485 | |
486 | /// This is a used by the pre-regalloc scheduler to determine (in conjunction |
487 | /// with areLoadsFromSameBasePtr) if two loads should be scheduled togther. On |
488 | /// some targets if two loads are loading from addresses in the same cache |
489 | /// line, it's better if they are scheduled together. This function takes two |
490 | /// integers that represent the load offsets from the common base address. It |
491 | /// returns true if it decides it's desirable to schedule the two loads |
492 | /// together. "NumLoads" is the number of loads that have already been |
493 | /// scheduled after Load1. |
494 | bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, |
495 | int64_t Offset2, |
496 | unsigned NumLoads) const override; |
497 | |
498 | void insertNoop(MachineBasicBlock &MBB, |
499 | MachineBasicBlock::iterator MI) const override; |
500 | |
501 | MCInst getNop() const override; |
502 | |
503 | bool |
504 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; |
505 | |
506 | bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; |
507 | |
508 | /// True if MI has a condition code def, e.g. EFLAGS, that is |
509 | /// not marked dead. |
510 | bool hasLiveCondCodeDef(MachineInstr &MI) const; |
511 | |
512 | /// getGlobalBaseReg - Return a virtual register initialized with the |
513 | /// the global base register value. Output instructions required to |
514 | /// initialize the register in the function entry block, if necessary. |
515 | /// |
516 | unsigned getGlobalBaseReg(MachineFunction *MF) const; |
517 | |
518 | std::pair<uint16_t, uint16_t> |
519 | getExecutionDomain(const MachineInstr &MI) const override; |
520 | |
521 | uint16_t getExecutionDomainCustom(const MachineInstr &MI) const; |
522 | |
523 | void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override; |
524 | |
525 | bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const; |
526 | |
527 | unsigned |
528 | getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, |
529 | const TargetRegisterInfo *TRI) const override; |
530 | unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, |
531 | const TargetRegisterInfo *TRI) const override; |
532 | void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, |
533 | const TargetRegisterInfo *TRI) const override; |
534 | |
535 | MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
536 | unsigned OpNum, |
537 | ArrayRef<MachineOperand> MOs, |
538 | MachineBasicBlock::iterator InsertPt, |
539 | unsigned Size, Align Alignment, |
540 | bool AllowCommute) const; |
541 | |
542 | bool isHighLatencyDef(int opc) const override; |
543 | |
544 | bool hasHighOperandLatency(const TargetSchedModel &SchedModel, |
545 | const MachineRegisterInfo *MRI, |
546 | const MachineInstr &DefMI, unsigned DefIdx, |
547 | const MachineInstr &UseMI, |
548 | unsigned UseIdx) const override; |
549 | |
550 | bool useMachineCombiner() const override { return true; } |
551 | |
552 | bool isAssociativeAndCommutative(const MachineInstr &Inst, |
553 | bool Invert) const override; |
554 | |
555 | bool hasReassociableOperands(const MachineInstr &Inst, |
556 | const MachineBasicBlock *MBB) const override; |
557 | |
558 | void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, |
559 | MachineInstr &NewMI1, |
560 | MachineInstr &NewMI2) const override; |
561 | |
562 | bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
563 | Register &SrcReg2, int64_t &CmpMask, |
564 | int64_t &CmpValue) const override; |
565 | |
566 | /// Check if there exists an earlier instruction that operates on the same |
567 | /// source operands and sets eflags in the same way as CMP and remove CMP if |
568 | /// possible. |
569 | bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
570 | Register SrcReg2, int64_t CmpMask, int64_t CmpValue, |
571 | const MachineRegisterInfo *MRI) const override; |
572 | |
573 | MachineInstr *optimizeLoadInstr(MachineInstr &MI, |
574 | const MachineRegisterInfo *MRI, |
575 | Register &FoldAsLoadDefReg, |
576 | MachineInstr *&DefMI) const override; |
577 | |
578 | bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, |
579 | MachineRegisterInfo *MRI) const override; |
580 | |
581 | std::pair<unsigned, unsigned> |
582 | decomposeMachineOperandsTargetFlags(unsigned TF) const override; |
583 | |
584 | ArrayRef<std::pair<unsigned, const char *>> |
585 | getSerializableDirectMachineOperandTargetFlags() const override; |
586 | |
587 | std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo( |
588 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override; |
589 | |
590 | bool isFunctionSafeToOutlineFrom(MachineFunction &MF, |
591 | bool OutlineFromLinkOnceODRs) const override; |
592 | |
593 | outliner::InstrType |
594 | getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const override; |
595 | |
596 | void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, |
597 | const outliner::OutlinedFunction &OF) const override; |
598 | |
599 | MachineBasicBlock::iterator |
600 | insertOutlinedCall(Module &M, MachineBasicBlock &MBB, |
601 | MachineBasicBlock::iterator &It, MachineFunction &MF, |
602 | outliner::Candidate &C) const override; |
603 | |
604 | void buildClearRegister(Register Reg, MachineBasicBlock &MBB, |
605 | MachineBasicBlock::iterator Iter, DebugLoc &DL, |
606 | bool AllowSideEffects = true) const override; |
607 | |
608 | bool verifyInstruction(const MachineInstr &MI, |
609 | StringRef &ErrInfo) const override; |
610 | #define GET_INSTRINFO_HELPER_DECLS |
611 | #include "X86GenInstrInfo.inc" |
612 | |
613 | static bool hasLockPrefix(const MachineInstr &MI) { |
614 | return MI.getDesc().TSFlags & X86II::LOCK; |
615 | } |
616 | |
617 | std::optional<ParamLoadedValue> |
618 | describeLoadedValue(const MachineInstr &MI, Register Reg) const override; |
619 | |
620 | protected: |
621 | MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
622 | unsigned CommuteOpIdx1, |
623 | unsigned CommuteOpIdx2) const override; |
624 | |
625 | std::optional<DestSourcePair> |
626 | isCopyInstrImpl(const MachineInstr &MI) const override; |
627 | |
628 | bool getMachineCombinerPatterns(MachineInstr &Root, |
629 | SmallVectorImpl<unsigned> &Patterns, |
630 | bool DoRegPressureReduce) const override; |
631 | |
632 | /// When getMachineCombinerPatterns() finds potential patterns, |
633 | /// this function generates the instructions that could replace the |
634 | /// original code sequence. |
635 | void genAlternativeCodeSequence( |
636 | MachineInstr &Root, unsigned Pattern, |
637 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
638 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
639 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override; |
640 | |
641 | /// When calculate the latency of the root instruction, accumulate the |
642 | /// latency of the sequence to the root latency. |
643 | /// \param Root - Instruction that could be combined with one of its operands |
644 | /// For X86 instruction (vpmaddwd + vpmaddwd) -> vpdpwssd, the vpmaddwd |
645 | /// is not in the critical path, so the root latency only include vpmaddwd. |
646 | bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const override { |
647 | return false; |
648 | } |
649 | |
650 | void getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops, |
651 | int FI) const override; |
652 | |
653 | private: |
654 | /// This is a helper for convertToThreeAddress for 8 and 16-bit instructions. |
655 | /// We use 32-bit LEA to form 3-address code by promoting to a 32-bit |
656 | /// super-register and then truncating back down to a 8/16-bit sub-register. |
657 | MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc, MachineInstr &MI, |
658 | LiveVariables *LV, |
659 | LiveIntervals *LIS, |
660 | bool Is8BitOp) const; |
661 | |
662 | /// Handles memory folding for special case instructions, for instance those |
663 | /// requiring custom manipulation of the address. |
664 | MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI, |
665 | unsigned OpNum, |
666 | ArrayRef<MachineOperand> MOs, |
667 | MachineBasicBlock::iterator InsertPt, |
668 | unsigned Size, Align Alignment) const; |
669 | |
670 | MachineInstr *foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI, |
671 | unsigned OpNum, |
672 | ArrayRef<MachineOperand> MOs, |
673 | MachineBasicBlock::iterator InsertPt, |
674 | unsigned BitsSize, bool AllowCommute) const; |
675 | |
676 | /// isFrameOperand - Return true and the FrameIndex if the specified |
677 | /// operand and follow operands form a reference to the stack frame. |
678 | bool isFrameOperand(const MachineInstr &MI, unsigned int Op, |
679 | int &FrameIndex) const; |
680 | |
681 | /// Returns true iff the routine could find two commutable operands in the |
682 | /// given machine instruction with 3 vector inputs. |
683 | /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their |
684 | /// input values can be re-defined in this method only if the input values |
685 | /// are not pre-defined, which is designated by the special value |
686 | /// 'CommuteAnyOperandIndex' assigned to it. |
687 | /// If both of indices are pre-defined and refer to some operands, then the |
688 | /// method simply returns true if the corresponding operands are commutable |
689 | /// and returns false otherwise. |
690 | /// |
691 | /// For example, calling this method this way: |
692 | /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; |
693 | /// findThreeSrcCommutedOpIndices(MI, Op1, Op2); |
694 | /// can be interpreted as a query asking to find an operand that would be |
695 | /// commutable with the operand#1. |
696 | /// |
697 | /// If IsIntrinsic is set, operand 1 will be ignored for commuting. |
698 | bool findThreeSrcCommutedOpIndices(const MachineInstr &MI, |
699 | unsigned &SrcOpIdx1, |
700 | unsigned &SrcOpIdx2, |
701 | bool IsIntrinsic = false) const; |
702 | |
703 | /// Returns true when instruction \p FlagI produces the same flags as \p OI. |
704 | /// The caller should pass in the results of calling analyzeCompare on \p OI: |
705 | /// \p SrcReg, \p SrcReg2, \p ImmMask, \p ImmValue. |
706 | /// If the flags match \p OI as if it had the input operands swapped then the |
707 | /// function succeeds and sets \p IsSwapped to true. |
708 | /// |
709 | /// Examples of OI, FlagI pairs returning true: |
710 | /// CMP %1, 42 and CMP %1, 42 |
711 | /// CMP %1, %2 and %3 = SUB %1, %2 |
712 | /// TEST %1, %1 and %2 = SUB %1, 0 |
713 | /// CMP %1, %2 and %3 = SUB %2, %1 ; IsSwapped=true |
714 | bool isRedundantFlagInstr(const MachineInstr &FlagI, Register SrcReg, |
715 | Register SrcReg2, int64_t ImmMask, int64_t ImmValue, |
716 | const MachineInstr &OI, bool *IsSwapped, |
717 | int64_t *ImmDelta) const; |
718 | |
719 | /// Commute operands of \p MI for memory fold. |
720 | /// |
721 | /// \param Idx1 the index of operand to be commuted. |
722 | /// |
723 | /// \returns the index of operand that is commuted with \p Idx1. If the method |
724 | /// fails to commute the operands, it will return \p Idx1. |
725 | unsigned commuteOperandsForFold(MachineInstr &MI, unsigned Idx1) const; |
726 | }; |
727 | } // namespace llvm |
728 | |
729 | #endif |
730 | |