1 | //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the X86 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_LIB_TARGET_X86_X86INSTRINFO_H |
14 | #define LLVM_LIB_TARGET_X86_X86INSTRINFO_H |
15 | |
16 | #include "MCTargetDesc/X86BaseInfo.h" |
17 | #include "X86InstrFMA3Info.h" |
18 | #include "X86RegisterInfo.h" |
19 | #include "llvm/CodeGen/ISDOpcodes.h" |
20 | #include "llvm/CodeGen/TargetInstrInfo.h" |
21 | #include <vector> |
22 | |
23 | #define |
24 | #include "X86GenInstrInfo.inc" |
25 | |
26 | namespace llvm { |
27 | class X86Subtarget; |
28 | |
29 | // X86 MachineCombiner patterns |
30 | enum X86MachineCombinerPattern : unsigned { |
31 | // X86 VNNI |
32 | DPWSSD = MachineCombinerPattern::TARGET_PATTERN_START, |
33 | }; |
34 | |
35 | namespace X86 { |
36 | |
37 | enum { |
38 | // For instr that was compressed from EVEX to LEGACY. |
39 | AC_EVEX_2_LEGACY = MachineInstr::TAsmComments, |
40 | // For instr that was compressed from EVEX to VEX. |
41 | AC_EVEX_2_VEX = AC_EVEX_2_LEGACY << 1, |
42 | // For instr that was compressed from EVEX to EVEX. |
43 | AC_EVEX_2_EVEX = AC_EVEX_2_VEX << 1 |
44 | }; |
45 | |
46 | /// Return a pair of condition code for the given predicate and whether |
47 | /// the instruction operands should be swaped to match the condition code. |
48 | std::pair<CondCode, bool> getX86ConditionCode(CmpInst::Predicate Predicate); |
49 | |
50 | /// Return a cmov opcode for the given register size in bytes, and operand type. |
51 | unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand = false, |
52 | bool HasNDD = false); |
53 | |
54 | /// Return the source operand # for condition code by \p MCID. If the |
55 | /// instruction doesn't have a condition code, return -1. |
56 | int getCondSrcNoFromDesc(const MCInstrDesc &MCID); |
57 | |
58 | /// Return the condition code of the instruction. If the instruction doesn't |
59 | /// have a condition code, return X86::COND_INVALID. |
60 | CondCode getCondFromMI(const MachineInstr &MI); |
61 | |
62 | // Turn JCC instruction into condition code. |
63 | CondCode getCondFromBranch(const MachineInstr &MI); |
64 | |
65 | // Turn SETCC instruction into condition code. |
66 | CondCode getCondFromSETCC(const MachineInstr &MI); |
67 | |
68 | // Turn CMOV instruction into condition code. |
69 | CondCode getCondFromCMov(const MachineInstr &MI); |
70 | |
71 | // Turn CFCMOV instruction into condition code. |
72 | CondCode getCondFromCFCMov(const MachineInstr &MI); |
73 | |
74 | // Turn CCMP instruction into condition code. |
75 | CondCode getCondFromCCMP(const MachineInstr &MI); |
76 | |
77 | // Turn condition code into condition flags for CCMP/CTEST. |
78 | int getCCMPCondFlagsFromCondCode(CondCode CC); |
79 | |
80 | // Get the opcode of corresponding NF variant. |
81 | unsigned getNFVariant(unsigned Opc); |
82 | |
83 | // Get the opcode of corresponding NonND variant. |
84 | unsigned getNonNDVariant(unsigned Opc); |
85 | |
86 | /// GetOppositeBranchCondition - Return the inverse of the specified cond, |
87 | /// e.g. turning COND_E to COND_NE. |
88 | CondCode GetOppositeBranchCondition(CondCode CC); |
89 | |
90 | /// Get the VPCMP immediate for the given condition. |
91 | unsigned getVPCMPImmForCond(ISD::CondCode CC); |
92 | |
93 | /// Get the VPCMP immediate if the opcodes are swapped. |
94 | unsigned getSwappedVPCMPImm(unsigned Imm); |
95 | |
96 | /// Get the VPCOM immediate if the opcodes are swapped. |
97 | unsigned getSwappedVPCOMImm(unsigned Imm); |
98 | |
99 | /// Get the VCMP immediate if the opcodes are swapped. |
100 | unsigned getSwappedVCMPImm(unsigned Imm); |
101 | |
102 | /// Get the width of the vector register operand. |
103 | unsigned getVectorRegisterWidth(const MCOperandInfo &Info); |
104 | |
105 | /// Check if the instruction is X87 instruction. |
106 | bool isX87Instruction(MachineInstr &MI); |
107 | |
108 | /// Return the index of the instruction's first address operand, if it has a |
109 | /// memory reference, or -1 if it has none. Unlike X86II::getMemoryOperandNo(), |
110 | /// this also works for both pseudo instructions (e.g., TCRETURNmi) as well as |
111 | /// real instructions (e.g., JMP64m). |
112 | int getFirstAddrOperandIdx(const MachineInstr &MI); |
113 | |
114 | /// Find any constant pool entry associated with a specific instruction operand. |
115 | const Constant *getConstantFromPool(const MachineInstr &MI, unsigned OpNo); |
116 | |
117 | } // namespace X86 |
118 | |
119 | /// isGlobalStubReference - Return true if the specified TargetFlag operand is |
120 | /// a reference to a stub for a global, not the global itself. |
121 | inline static bool isGlobalStubReference(unsigned char TargetFlag) { |
122 | switch (TargetFlag) { |
123 | case X86II::MO_DLLIMPORT: // dllimport stub. |
124 | case X86II::MO_GOTPCREL: // rip-relative GOT reference. |
125 | case X86II::MO_GOTPCREL_NORELAX: // rip-relative GOT reference. |
126 | case X86II::MO_GOT: // normal GOT reference. |
127 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. |
128 | case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. |
129 | case X86II::MO_COFFSTUB: // COFF .refptr stub. |
130 | return true; |
131 | default: |
132 | return false; |
133 | } |
134 | } |
135 | |
136 | /// isGlobalRelativeToPICBase - Return true if the specified global value |
137 | /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this |
138 | /// is true, the addressing mode has the PIC base register added in (e.g. EBX). |
139 | inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) { |
140 | switch (TargetFlag) { |
141 | case X86II::MO_GOTOFF: // isPICStyleGOT: local global. |
142 | case X86II::MO_GOT: // isPICStyleGOT: other global. |
143 | case X86II::MO_PIC_BASE_OFFSET: // Darwin local global. |
144 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global. |
145 | case X86II::MO_TLVP: // ??? Pretty sure.. |
146 | return true; |
147 | default: |
148 | return false; |
149 | } |
150 | } |
151 | |
152 | inline static bool isScale(const MachineOperand &MO) { |
153 | return MO.isImm() && (MO.getImm() == 1 || MO.getImm() == 2 || |
154 | MO.getImm() == 4 || MO.getImm() == 8); |
155 | } |
156 | |
157 | inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) { |
158 | if (MI.getOperand(i: Op).isFI()) |
159 | return true; |
160 | return Op + X86::AddrSegmentReg <= MI.getNumOperands() && |
161 | MI.getOperand(i: Op + X86::AddrBaseReg).isReg() && |
162 | isScale(MO: MI.getOperand(i: Op + X86::AddrScaleAmt)) && |
163 | MI.getOperand(i: Op + X86::AddrIndexReg).isReg() && |
164 | (MI.getOperand(i: Op + X86::AddrDisp).isImm() || |
165 | MI.getOperand(i: Op + X86::AddrDisp).isGlobal() || |
166 | MI.getOperand(i: Op + X86::AddrDisp).isCPI() || |
167 | MI.getOperand(i: Op + X86::AddrDisp).isJTI()); |
168 | } |
169 | |
170 | inline static bool isMem(const MachineInstr &MI, unsigned Op) { |
171 | if (MI.getOperand(i: Op).isFI()) |
172 | return true; |
173 | return Op + X86::AddrNumOperands <= MI.getNumOperands() && |
174 | MI.getOperand(i: Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op); |
175 | } |
176 | |
177 | inline static bool isAddMemInstrWithRelocation(const MachineInstr &MI) { |
178 | unsigned Op = MI.getOpcode(); |
179 | if (Op == X86::ADD64rm || Op == X86::ADD64mr_ND || Op == X86::ADD64rm_ND) { |
180 | int MemOpNo = X86II::getMemoryOperandNo(TSFlags: MI.getDesc().TSFlags) + |
181 | X86II::getOperandBias(Desc: MI.getDesc()); |
182 | const MachineOperand &MO = MI.getOperand(i: X86::AddrDisp + MemOpNo); |
183 | if (MO.getTargetFlags() == X86II::MO_GOTTPOFF) |
184 | return true; |
185 | } |
186 | |
187 | return false; |
188 | } |
189 | |
190 | inline static bool isMemInstrWithGOTPCREL(const MachineInstr &MI) { |
191 | unsigned Op = MI.getOpcode(); |
192 | switch (Op) { |
193 | case X86::TEST32mr: |
194 | case X86::TEST64mr: |
195 | case X86::CMP32rm: |
196 | case X86::CMP64rm: |
197 | case X86::MOV32rm: |
198 | case X86::MOV64rm: |
199 | case X86::ADC32rm: |
200 | case X86::ADD32rm: |
201 | case X86::AND32rm: |
202 | case X86::OR32rm: |
203 | case X86::SBB32rm: |
204 | case X86::SUB32rm: |
205 | case X86::XOR32rm: |
206 | case X86::ADC64rm: |
207 | case X86::ADD64rm: |
208 | case X86::AND64rm: |
209 | case X86::OR64rm: |
210 | case X86::SBB64rm: |
211 | case X86::SUB64rm: |
212 | case X86::XOR64rm: { |
213 | int MemOpNo = X86II::getMemoryOperandNo(TSFlags: MI.getDesc().TSFlags) + |
214 | X86II::getOperandBias(Desc: MI.getDesc()); |
215 | const MachineOperand &MO = MI.getOperand(i: X86::AddrDisp + MemOpNo); |
216 | if (MO.getTargetFlags() == X86II::MO_GOTPCREL) |
217 | return true; |
218 | break; |
219 | } |
220 | } |
221 | return false; |
222 | } |
223 | |
224 | class X86InstrInfo final : public X86GenInstrInfo { |
225 | X86Subtarget &Subtarget; |
226 | const X86RegisterInfo RI; |
227 | |
228 | LLVM_DECLARE_VIRTUAL_ANCHOR_FUNCTION(); |
229 | |
230 | bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
231 | MachineBasicBlock *&FBB, |
232 | SmallVectorImpl<MachineOperand> &Cond, |
233 | SmallVectorImpl<MachineInstr *> &CondBranches, |
234 | bool AllowModify) const; |
235 | |
236 | bool foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI, Register Reg, |
237 | int64_t ImmVal, MachineRegisterInfo *MRI, |
238 | bool MakeChange) const; |
239 | |
240 | public: |
241 | explicit X86InstrInfo(X86Subtarget &STI); |
242 | |
243 | /// Given a machine instruction descriptor, returns the register |
244 | /// class constraint for OpNum, or NULL. Returned register class |
245 | /// may be different from the definition in the TD file, e.g. |
246 | /// GR*RegClass (definition in TD file) |
247 | /// -> |
248 | /// GR*_NOREX2RegClass (Returned register class) |
249 | const TargetRegisterClass * |
250 | getRegClass(const MCInstrDesc &MCID, unsigned OpNum, |
251 | const TargetRegisterInfo *TRI, |
252 | const MachineFunction &MF) const override; |
253 | |
254 | /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As |
255 | /// such, whenever a client has an instance of instruction info, it should |
256 | /// always be able to get register info as well (through this method). |
257 | /// |
258 | const X86RegisterInfo &getRegisterInfo() const { return RI; } |
259 | |
260 | /// Returns the stack pointer adjustment that happens inside the frame |
261 | /// setup..destroy sequence (e.g. by pushes, or inside the callee). |
262 | int64_t getFrameAdjustment(const MachineInstr &I) const { |
263 | assert(isFrameInstr(I)); |
264 | if (isFrameSetup(I)) |
265 | return I.getOperand(i: 2).getImm(); |
266 | return I.getOperand(i: 1).getImm(); |
267 | } |
268 | |
269 | /// Sets the stack pointer adjustment made inside the frame made up by this |
270 | /// instruction. |
271 | void setFrameAdjustment(MachineInstr &I, int64_t V) const { |
272 | assert(isFrameInstr(I)); |
273 | if (isFrameSetup(I)) |
274 | I.getOperand(i: 2).setImm(V); |
275 | else |
276 | I.getOperand(i: 1).setImm(V); |
277 | } |
278 | |
279 | /// getSPAdjust - This returns the stack pointer adjustment made by |
280 | /// this instruction. For x86, we need to handle more complex call |
281 | /// sequences involving PUSHes. |
282 | int getSPAdjust(const MachineInstr &MI) const override; |
283 | |
284 | /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" |
285 | /// extension instruction. That is, it's like a copy where it's legal for the |
286 | /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns |
287 | /// true, then it's expected the pre-extension value is available as a subreg |
288 | /// of the result register. This also returns the sub-register index in |
289 | /// SubIdx. |
290 | bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, |
291 | Register &DstReg, unsigned &SubIdx) const override; |
292 | |
293 | /// Returns true if the instruction has no behavior (specified or otherwise) |
294 | /// that is based on the value of any of its register operands |
295 | /// |
296 | /// Instructions are considered data invariant even if they set EFLAGS. |
297 | /// |
298 | /// A classical example of something that is inherently not data invariant is |
299 | /// an indirect jump -- the destination is loaded into icache based on the |
300 | /// bits set in the jump destination register. |
301 | /// |
302 | /// FIXME: This should become part of our instruction tables. |
303 | static bool isDataInvariant(MachineInstr &MI); |
304 | |
305 | /// Returns true if the instruction has no behavior (specified or otherwise) |
306 | /// that is based on the value loaded from memory or the value of any |
307 | /// non-address register operands. |
308 | /// |
309 | /// For example, if the latency of the instruction is dependent on the |
310 | /// particular bits set in any of the registers *or* any of the bits loaded |
311 | /// from memory. |
312 | /// |
313 | /// Instructions are considered data invariant even if they set EFLAGS. |
314 | /// |
315 | /// A classical example of something that is inherently not data invariant is |
316 | /// an indirect jump -- the destination is loaded into icache based on the |
317 | /// bits set in the jump destination register. |
318 | /// |
319 | /// FIXME: This should become part of our instruction tables. |
320 | static bool isDataInvariantLoad(MachineInstr &MI); |
321 | |
322 | Register isLoadFromStackSlot(const MachineInstr &MI, |
323 | int &FrameIndex) const override; |
324 | Register isLoadFromStackSlot(const MachineInstr &MI, |
325 | int &FrameIndex, |
326 | TypeSize &MemBytes) const override; |
327 | /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination |
328 | /// stack locations as well. This uses a heuristic so it isn't |
329 | /// reliable for correctness. |
330 | Register isLoadFromStackSlotPostFE(const MachineInstr &MI, |
331 | int &FrameIndex) const override; |
332 | |
333 | Register isStoreToStackSlot(const MachineInstr &MI, |
334 | int &FrameIndex) const override; |
335 | Register isStoreToStackSlot(const MachineInstr &MI, |
336 | int &FrameIndex, |
337 | TypeSize &MemBytes) const override; |
338 | /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination |
339 | /// stack locations as well. This uses a heuristic so it isn't |
340 | /// reliable for correctness. |
341 | Register isStoreToStackSlotPostFE(const MachineInstr &MI, |
342 | int &FrameIndex) const override; |
343 | |
344 | bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; |
345 | void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
346 | Register DestReg, unsigned SubIdx, |
347 | const MachineInstr &Orig, |
348 | const TargetRegisterInfo &TRI) const override; |
349 | |
350 | /// Given an operand within a MachineInstr, insert preceding code to put it |
351 | /// into the right format for a particular kind of LEA instruction. This may |
352 | /// involve using an appropriate super-register instead (with an implicit use |
353 | /// of the original) or creating a new virtual register and inserting COPY |
354 | /// instructions to get the data into the right class. |
355 | /// |
356 | /// Reference parameters are set to indicate how caller should add this |
357 | /// operand to the LEA instruction. |
358 | bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, |
359 | unsigned LEAOpcode, bool AllowSP, Register &NewSrc, |
360 | unsigned &NewSrcSubReg, bool &isKill, |
361 | MachineOperand &ImplicitOp, LiveVariables *LV, |
362 | LiveIntervals *LIS) const; |
363 | |
364 | /// convertToThreeAddress - This method must be implemented by targets that |
365 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
366 | /// may be able to convert a two-address instruction into a true |
367 | /// three-address instruction on demand. This allows the X86 target (for |
368 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
369 | /// would require register copies due to two-addressness. |
370 | /// |
371 | /// This method returns a null pointer if the transformation cannot be |
372 | /// performed, otherwise it returns the new instruction. |
373 | /// |
374 | MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, |
375 | LiveIntervals *LIS) const override; |
376 | |
377 | /// Returns true iff the routine could find two commutable operands in the |
378 | /// given machine instruction. |
379 | /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their |
380 | /// input values can be re-defined in this method only if the input values |
381 | /// are not pre-defined, which is designated by the special value |
382 | /// 'CommuteAnyOperandIndex' assigned to it. |
383 | /// If both of indices are pre-defined and refer to some operands, then the |
384 | /// method simply returns true if the corresponding operands are commutable |
385 | /// and returns false otherwise. |
386 | /// |
387 | /// For example, calling this method this way: |
388 | /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; |
389 | /// findCommutedOpIndices(MI, Op1, Op2); |
390 | /// can be interpreted as a query asking to find an operand that would be |
391 | /// commutable with the operand#1. |
392 | bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, |
393 | unsigned &SrcOpIdx2) const override; |
394 | |
395 | /// Returns true if we have preference on the operands order in MI, the |
396 | /// commute decision is returned in Commute. |
397 | bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override; |
398 | |
399 | /// Returns an adjusted FMA opcode that must be used in FMA instruction that |
400 | /// performs the same computations as the given \p MI but which has the |
401 | /// operands \p SrcOpIdx1 and \p SrcOpIdx2 commuted. |
402 | /// It may return 0 if it is unsafe to commute the operands. |
403 | /// Note that a machine instruction (instead of its opcode) is passed as the |
404 | /// first parameter to make it possible to analyze the instruction's uses and |
405 | /// commute the first operand of FMA even when it seems unsafe when you look |
406 | /// at the opcode. For example, it is Ok to commute the first operand of |
407 | /// VFMADD*SD_Int, if ONLY the lowest 64-bit element of the result is used. |
408 | /// |
409 | /// The returned FMA opcode may differ from the opcode in the given \p MI. |
410 | /// For example, commuting the operands #1 and #3 in the following FMA |
411 | /// FMA213 #1, #2, #3 |
412 | /// results into instruction with adjusted opcode: |
413 | /// FMA231 #3, #2, #1 |
414 | unsigned |
415 | getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, |
416 | unsigned SrcOpIdx2, |
417 | const X86InstrFMA3Group &FMA3Group) const; |
418 | |
419 | // Branch analysis. |
420 | bool isUnconditionalTailCall(const MachineInstr &MI) const override; |
421 | bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, |
422 | const MachineInstr &TailCall) const override; |
423 | void replaceBranchWithTailCall(MachineBasicBlock &MBB, |
424 | SmallVectorImpl<MachineOperand> &Cond, |
425 | const MachineInstr &TailCall) const override; |
426 | |
427 | bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
428 | MachineBasicBlock *&FBB, |
429 | SmallVectorImpl<MachineOperand> &Cond, |
430 | bool AllowModify) const override; |
431 | |
432 | int getJumpTableIndex(const MachineInstr &MI) const override; |
433 | |
434 | std::optional<ExtAddrMode> |
435 | getAddrModeFromMemoryOp(const MachineInstr &MemI, |
436 | const TargetRegisterInfo *TRI) const override; |
437 | |
438 | bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, |
439 | int64_t &ImmVal) const override; |
440 | |
441 | bool preservesZeroValueInReg(const MachineInstr *MI, |
442 | const Register NullValueReg, |
443 | const TargetRegisterInfo *TRI) const override; |
444 | |
445 | bool getMemOperandsWithOffsetWidth( |
446 | const MachineInstr &LdSt, |
447 | SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset, |
448 | bool &OffsetIsScalable, LocationSize &Width, |
449 | const TargetRegisterInfo *TRI) const override; |
450 | bool analyzeBranchPredicate(MachineBasicBlock &MBB, |
451 | TargetInstrInfo::MachineBranchPredicate &MBP, |
452 | bool AllowModify = false) const override; |
453 | |
454 | unsigned removeBranch(MachineBasicBlock &MBB, |
455 | int *BytesRemoved = nullptr) const override; |
456 | unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, |
457 | MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, |
458 | const DebugLoc &DL, |
459 | int *BytesAdded = nullptr) const override; |
460 | bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond, |
461 | Register, Register, Register, int &, int &, |
462 | int &) const override; |
463 | void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
464 | const DebugLoc &DL, Register DstReg, |
465 | ArrayRef<MachineOperand> Cond, Register TrueReg, |
466 | Register FalseReg) const override; |
467 | void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
468 | const DebugLoc &DL, Register DestReg, Register SrcReg, |
469 | bool KillSrc, bool RenamableDest = false, |
470 | bool RenamableSrc = false) const override; |
471 | void storeRegToStackSlot( |
472 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, |
473 | bool isKill, int FrameIndex, const TargetRegisterClass *RC, |
474 | const TargetRegisterInfo *TRI, Register VReg, |
475 | MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override; |
476 | |
477 | void loadRegFromStackSlot( |
478 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, |
479 | int FrameIndex, const TargetRegisterClass *RC, |
480 | const TargetRegisterInfo *TRI, Register VReg, |
481 | MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override; |
482 | |
483 | void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
484 | unsigned Opc, Register Reg, int FrameIdx, |
485 | bool isKill = false) const; |
486 | |
487 | bool expandPostRAPseudo(MachineInstr &MI) const override; |
488 | |
489 | /// Check whether the target can fold a load that feeds a subreg operand |
490 | /// (or a subreg operand that feeds a store). |
491 | bool isSubregFoldable() const override { return true; } |
492 | |
493 | /// Fold a load or store of the specified stack slot into the specified |
494 | /// machine instruction for the specified operand(s). If folding happens, it |
495 | /// is likely that the referenced instruction has been changed. |
496 | /// |
497 | /// \returns true on success. |
498 | MachineInstr * |
499 | foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
500 | ArrayRef<unsigned> Ops, |
501 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
502 | LiveIntervals *LIS = nullptr, |
503 | VirtRegMap *VRM = nullptr) const override; |
504 | |
505 | /// Same as the previous version except it allows folding of any load and |
506 | /// store from / to any address, not just from a specific stack slot. |
507 | MachineInstr *foldMemoryOperandImpl( |
508 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
509 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
510 | LiveIntervals *LIS = nullptr) const override; |
511 | |
512 | bool |
513 | unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, |
514 | bool UnfoldLoad, bool UnfoldStore, |
515 | SmallVectorImpl<MachineInstr *> &NewMIs) const override; |
516 | |
517 | bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
518 | SmallVectorImpl<SDNode *> &NewNodes) const override; |
519 | |
520 | unsigned |
521 | getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, |
522 | unsigned *LoadRegIndex = nullptr) const override; |
523 | |
524 | bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, |
525 | int64_t &Offset2) const override; |
526 | |
527 | /// Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to |
528 | /// make it capable of identifying ENDBR intructions and prevent it from being |
529 | /// re-scheduled. |
530 | bool isSchedulingBoundary(const MachineInstr &MI, |
531 | const MachineBasicBlock *MBB, |
532 | const MachineFunction &MF) const override; |
533 | |
534 | /// This is a used by the pre-regalloc scheduler to determine (in conjunction |
535 | /// with areLoadsFromSameBasePtr) if two loads should be scheduled togther. On |
536 | /// some targets if two loads are loading from addresses in the same cache |
537 | /// line, it's better if they are scheduled together. This function takes two |
538 | /// integers that represent the load offsets from the common base address. It |
539 | /// returns true if it decides it's desirable to schedule the two loads |
540 | /// together. "NumLoads" is the number of loads that have already been |
541 | /// scheduled after Load1. |
542 | bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, |
543 | int64_t Offset2, |
544 | unsigned NumLoads) const override; |
545 | |
546 | void insertNoop(MachineBasicBlock &MBB, |
547 | MachineBasicBlock::iterator MI) const override; |
548 | |
549 | MCInst getNop() const override; |
550 | |
551 | bool |
552 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; |
553 | |
554 | bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; |
555 | |
556 | /// True if MI has a condition code def, e.g. EFLAGS, that is |
557 | /// not marked dead. |
558 | bool hasLiveCondCodeDef(MachineInstr &MI) const; |
559 | |
560 | /// getGlobalBaseReg - Return a virtual register initialized with the |
561 | /// the global base register value. Output instructions required to |
562 | /// initialize the register in the function entry block, if necessary. |
563 | /// |
564 | Register getGlobalBaseReg(MachineFunction *MF) const; |
565 | |
566 | std::pair<uint16_t, uint16_t> |
567 | getExecutionDomain(const MachineInstr &MI) const override; |
568 | |
569 | uint16_t getExecutionDomainCustom(const MachineInstr &MI) const; |
570 | |
571 | void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override; |
572 | |
573 | bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const; |
574 | |
575 | unsigned |
576 | getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, |
577 | const TargetRegisterInfo *TRI) const override; |
578 | unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, |
579 | const TargetRegisterInfo *TRI) const override; |
580 | void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, |
581 | const TargetRegisterInfo *TRI) const override; |
582 | |
583 | MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
584 | unsigned OpNum, |
585 | ArrayRef<MachineOperand> MOs, |
586 | MachineBasicBlock::iterator InsertPt, |
587 | unsigned Size, Align Alignment, |
588 | bool AllowCommute) const; |
589 | |
590 | bool isHighLatencyDef(int opc) const override; |
591 | |
592 | bool hasHighOperandLatency(const TargetSchedModel &SchedModel, |
593 | const MachineRegisterInfo *MRI, |
594 | const MachineInstr &DefMI, unsigned DefIdx, |
595 | const MachineInstr &UseMI, |
596 | unsigned UseIdx) const override; |
597 | |
598 | bool useMachineCombiner() const override { return true; } |
599 | |
600 | bool isAssociativeAndCommutative(const MachineInstr &Inst, |
601 | bool Invert) const override; |
602 | |
603 | bool hasReassociableOperands(const MachineInstr &Inst, |
604 | const MachineBasicBlock *MBB) const override; |
605 | |
606 | void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, |
607 | MachineInstr &NewMI1, |
608 | MachineInstr &NewMI2) const override; |
609 | |
610 | bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
611 | Register &SrcReg2, int64_t &CmpMask, |
612 | int64_t &CmpValue) const override; |
613 | |
614 | /// Check if there exists an earlier instruction that operates on the same |
615 | /// source operands and sets eflags in the same way as CMP and remove CMP if |
616 | /// possible. |
617 | bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
618 | Register SrcReg2, int64_t CmpMask, int64_t CmpValue, |
619 | const MachineRegisterInfo *MRI) const override; |
620 | |
621 | bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, |
622 | MachineRegisterInfo *MRI) const override; |
623 | |
624 | std::pair<unsigned, unsigned> |
625 | decomposeMachineOperandsTargetFlags(unsigned TF) const override; |
626 | |
627 | ArrayRef<std::pair<unsigned, const char *>> |
628 | getSerializableDirectMachineOperandTargetFlags() const override; |
629 | |
630 | std::optional<std::unique_ptr<outliner::OutlinedFunction>> |
631 | getOutliningCandidateInfo( |
632 | const MachineModuleInfo &MMI, |
633 | std::vector<outliner::Candidate> &RepeatedSequenceLocs, |
634 | unsigned MinRepeats) const override; |
635 | |
636 | bool isFunctionSafeToOutlineFrom(MachineFunction &MF, |
637 | bool OutlineFromLinkOnceODRs) const override; |
638 | |
639 | outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, |
640 | MachineBasicBlock::iterator &MIT, |
641 | unsigned Flags) const override; |
642 | |
643 | void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, |
644 | const outliner::OutlinedFunction &OF) const override; |
645 | |
646 | MachineBasicBlock::iterator |
647 | insertOutlinedCall(Module &M, MachineBasicBlock &MBB, |
648 | MachineBasicBlock::iterator &It, MachineFunction &MF, |
649 | outliner::Candidate &C) const override; |
650 | |
651 | void buildClearRegister(Register Reg, MachineBasicBlock &MBB, |
652 | MachineBasicBlock::iterator Iter, DebugLoc &DL, |
653 | bool AllowSideEffects = true) const override; |
654 | |
655 | bool verifyInstruction(const MachineInstr &MI, |
656 | StringRef &ErrInfo) const override; |
657 | #define GET_INSTRINFO_HELPER_DECLS |
658 | #include "X86GenInstrInfo.inc" |
659 | |
660 | static bool hasLockPrefix(const MachineInstr &MI) { |
661 | return MI.getDesc().TSFlags & X86II::LOCK; |
662 | } |
663 | |
664 | std::optional<ParamLoadedValue> |
665 | describeLoadedValue(const MachineInstr &MI, Register Reg) const override; |
666 | |
667 | protected: |
668 | MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
669 | unsigned CommuteOpIdx1, |
670 | unsigned CommuteOpIdx2) const override; |
671 | |
672 | std::optional<DestSourcePair> |
673 | isCopyInstrImpl(const MachineInstr &MI) const override; |
674 | |
675 | bool getMachineCombinerPatterns(MachineInstr &Root, |
676 | SmallVectorImpl<unsigned> &Patterns, |
677 | bool DoRegPressureReduce) const override; |
678 | |
679 | /// When getMachineCombinerPatterns() finds potential patterns, |
680 | /// this function generates the instructions that could replace the |
681 | /// original code sequence. |
682 | void genAlternativeCodeSequence( |
683 | MachineInstr &Root, unsigned Pattern, |
684 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
685 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
686 | DenseMap<Register, unsigned> &InstrIdxForVirtReg) const override; |
687 | |
688 | /// When calculate the latency of the root instruction, accumulate the |
689 | /// latency of the sequence to the root latency. |
690 | /// \param Root - Instruction that could be combined with one of its operands |
691 | /// For X86 instruction (vpmaddwd + vpmaddwd) -> vpdpwssd, the vpmaddwd |
692 | /// is not in the critical path, so the root latency only include vpmaddwd. |
693 | bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const override { |
694 | return false; |
695 | } |
696 | |
697 | void getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops, |
698 | int FI) const override; |
699 | |
700 | private: |
701 | /// This is a helper for convertToThreeAddress for 8 and 16-bit instructions. |
702 | /// We use 32-bit LEA to form 3-address code by promoting to a 32-bit |
703 | /// super-register and then truncating back down to a 8/16-bit sub-register. |
704 | MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc, MachineInstr &MI, |
705 | LiveVariables *LV, |
706 | LiveIntervals *LIS, |
707 | bool Is8BitOp) const; |
708 | |
709 | /// Handles memory folding for special case instructions, for instance those |
710 | /// requiring custom manipulation of the address. |
711 | MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI, |
712 | unsigned OpNum, |
713 | ArrayRef<MachineOperand> MOs, |
714 | MachineBasicBlock::iterator InsertPt, |
715 | unsigned Size, Align Alignment) const; |
716 | |
717 | MachineInstr *foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI, |
718 | unsigned OpNum, |
719 | ArrayRef<MachineOperand> MOs, |
720 | MachineBasicBlock::iterator InsertPt, |
721 | unsigned BitsSize, bool AllowCommute) const; |
722 | |
723 | /// isFrameOperand - Return true and the FrameIndex if the specified |
724 | /// operand and follow operands form a reference to the stack frame. |
725 | bool isFrameOperand(const MachineInstr &MI, unsigned int Op, |
726 | int &FrameIndex) const; |
727 | |
728 | /// Returns true iff the routine could find two commutable operands in the |
729 | /// given machine instruction with 3 vector inputs. |
730 | /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. Their |
731 | /// input values can be re-defined in this method only if the input values |
732 | /// are not pre-defined, which is designated by the special value |
733 | /// 'CommuteAnyOperandIndex' assigned to it. |
734 | /// If both of indices are pre-defined and refer to some operands, then the |
735 | /// method simply returns true if the corresponding operands are commutable |
736 | /// and returns false otherwise. |
737 | /// |
738 | /// For example, calling this method this way: |
739 | /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; |
740 | /// findThreeSrcCommutedOpIndices(MI, Op1, Op2); |
741 | /// can be interpreted as a query asking to find an operand that would be |
742 | /// commutable with the operand#1. |
743 | /// |
744 | /// If IsIntrinsic is set, operand 1 will be ignored for commuting. |
745 | bool findThreeSrcCommutedOpIndices(const MachineInstr &MI, |
746 | unsigned &SrcOpIdx1, |
747 | unsigned &SrcOpIdx2, |
748 | bool IsIntrinsic = false) const; |
749 | |
750 | /// Returns true when instruction \p FlagI produces the same flags as \p OI. |
751 | /// The caller should pass in the results of calling analyzeCompare on \p OI: |
752 | /// \p SrcReg, \p SrcReg2, \p ImmMask, \p ImmValue. |
753 | /// If the flags match \p OI as if it had the input operands swapped then the |
754 | /// function succeeds and sets \p IsSwapped to true. |
755 | /// |
756 | /// Examples of OI, FlagI pairs returning true: |
757 | /// CMP %1, 42 and CMP %1, 42 |
758 | /// CMP %1, %2 and %3 = SUB %1, %2 |
759 | /// TEST %1, %1 and %2 = SUB %1, 0 |
760 | /// CMP %1, %2 and %3 = SUB %2, %1 ; IsSwapped=true |
761 | bool isRedundantFlagInstr(const MachineInstr &FlagI, Register SrcReg, |
762 | Register SrcReg2, int64_t ImmMask, int64_t ImmValue, |
763 | const MachineInstr &OI, bool *IsSwapped, |
764 | int64_t *ImmDelta) const; |
765 | |
766 | /// Commute operands of \p MI for memory fold. |
767 | /// |
768 | /// \param Idx1 the index of operand to be commuted. |
769 | /// |
770 | /// \returns the index of operand that is commuted with \p Idx1. If the method |
771 | /// fails to commute the operands, it will return \p Idx1. |
772 | unsigned commuteOperandsForFold(MachineInstr &MI, unsigned Idx1) const; |
773 | }; |
774 | } // namespace llvm |
775 | |
776 | #endif |
777 | |