1 | //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the SystemZ implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "SystemZInstrInfo.h" |
14 | #include "MCTargetDesc/SystemZMCTargetDesc.h" |
15 | #include "SystemZ.h" |
16 | #include "SystemZInstrBuilder.h" |
17 | #include "SystemZSubtarget.h" |
18 | #include "llvm/ADT/Statistic.h" |
19 | #include "llvm/CodeGen/LiveInterval.h" |
20 | #include "llvm/CodeGen/LiveIntervals.h" |
21 | #include "llvm/CodeGen/LiveRegUnits.h" |
22 | #include "llvm/CodeGen/LiveVariables.h" |
23 | #include "llvm/CodeGen/MachineBasicBlock.h" |
24 | #include "llvm/CodeGen/MachineFrameInfo.h" |
25 | #include "llvm/CodeGen/MachineFunction.h" |
26 | #include "llvm/CodeGen/MachineInstr.h" |
27 | #include "llvm/CodeGen/MachineMemOperand.h" |
28 | #include "llvm/CodeGen/MachineOperand.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/SlotIndexes.h" |
31 | #include "llvm/CodeGen/StackMaps.h" |
32 | #include "llvm/CodeGen/TargetInstrInfo.h" |
33 | #include "llvm/CodeGen/TargetOpcodes.h" |
34 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
35 | #include "llvm/CodeGen/VirtRegMap.h" |
36 | #include "llvm/MC/MCInstrDesc.h" |
37 | #include "llvm/MC/MCRegisterInfo.h" |
38 | #include "llvm/Support/BranchProbability.h" |
39 | #include "llvm/Support/ErrorHandling.h" |
40 | #include "llvm/Support/MathExtras.h" |
41 | #include "llvm/Target/TargetMachine.h" |
42 | #include <cassert> |
43 | #include <cstdint> |
44 | #include <iterator> |
45 | |
46 | using namespace llvm; |
47 | |
48 | #define GET_INSTRINFO_CTOR_DTOR |
49 | #define GET_INSTRMAP_INFO |
50 | #include "SystemZGenInstrInfo.inc" |
51 | |
52 | #define DEBUG_TYPE "systemz-II" |
53 | |
54 | // Return a mask with Count low bits set. |
55 | static uint64_t allOnes(unsigned int Count) { |
56 | return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; |
57 | } |
58 | |
59 | // Pin the vtable to this file. |
60 | void SystemZInstrInfo::anchor() {} |
61 | |
62 | SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti) |
63 | : SystemZGenInstrInfo(-1, -1), |
64 | RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister(), |
65 | sti.getHwMode()), |
66 | STI(sti) {} |
67 | |
68 | // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, |
69 | // each having the opcode given by NewOpcode. |
70 | void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, |
71 | unsigned NewOpcode) const { |
72 | MachineBasicBlock *MBB = MI->getParent(); |
73 | MachineFunction &MF = *MBB->getParent(); |
74 | |
75 | // Get two load or store instructions. Use the original instruction for |
76 | // one of them and create a clone for the other. |
77 | MachineInstr *HighPartMI = MF.CloneMachineInstr(Orig: &*MI); |
78 | MachineInstr *LowPartMI = &*MI; |
79 | MBB->insert(I: LowPartMI, MI: HighPartMI); |
80 | |
81 | // Set up the two 64-bit registers and remember super reg and its flags. |
82 | MachineOperand &HighRegOp = HighPartMI->getOperand(i: 0); |
83 | MachineOperand &LowRegOp = LowPartMI->getOperand(i: 0); |
84 | Register Reg128 = LowRegOp.getReg(); |
85 | unsigned Reg128Killed = getKillRegState(B: LowRegOp.isKill()); |
86 | unsigned Reg128Undef = getUndefRegState(B: LowRegOp.isUndef()); |
87 | HighRegOp.setReg(RI.getSubReg(Reg: HighRegOp.getReg(), Idx: SystemZ::subreg_h64)); |
88 | LowRegOp.setReg(RI.getSubReg(Reg: LowRegOp.getReg(), Idx: SystemZ::subreg_l64)); |
89 | |
90 | // The address in the first (high) instruction is already correct. |
91 | // Adjust the offset in the second (low) instruction. |
92 | MachineOperand &HighOffsetOp = HighPartMI->getOperand(i: 2); |
93 | MachineOperand &LowOffsetOp = LowPartMI->getOperand(i: 2); |
94 | LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); |
95 | |
96 | // Set the opcodes. |
97 | unsigned HighOpcode = getOpcodeForOffset(Opcode: NewOpcode, Offset: HighOffsetOp.getImm()); |
98 | unsigned LowOpcode = getOpcodeForOffset(Opcode: NewOpcode, Offset: LowOffsetOp.getImm()); |
99 | assert(HighOpcode && LowOpcode && "Both offsets should be in range" ); |
100 | HighPartMI->setDesc(get(Opcode: HighOpcode)); |
101 | LowPartMI->setDesc(get(Opcode: LowOpcode)); |
102 | |
103 | MachineInstr *FirstMI = HighPartMI; |
104 | if (MI->mayStore()) { |
105 | FirstMI->getOperand(i: 0).setIsKill(false); |
106 | // Add implicit uses of the super register in case one of the subregs is |
107 | // undefined. We could track liveness and skip storing an undefined |
108 | // subreg, but this is hopefully rare (discovered with llvm-stress). |
109 | // If Reg128 was killed, set kill flag on MI. |
110 | unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit); |
111 | MachineInstrBuilder(MF, HighPartMI).addReg(RegNo: Reg128, flags: Reg128UndefImpl); |
112 | MachineInstrBuilder(MF, LowPartMI).addReg(RegNo: Reg128, flags: (Reg128UndefImpl | Reg128Killed)); |
113 | } else { |
114 | // If HighPartMI clobbers any of the address registers, it needs to come |
115 | // after LowPartMI. |
116 | auto overlapsAddressReg = [&](Register Reg) -> bool { |
117 | return RI.regsOverlap(RegA: Reg, RegB: MI->getOperand(i: 1).getReg()) || |
118 | RI.regsOverlap(RegA: Reg, RegB: MI->getOperand(i: 3).getReg()); |
119 | }; |
120 | if (overlapsAddressReg(HighRegOp.getReg())) { |
121 | assert(!overlapsAddressReg(LowRegOp.getReg()) && |
122 | "Both loads clobber address!" ); |
123 | MBB->splice(Where: HighPartMI, Other: MBB, From: LowPartMI); |
124 | FirstMI = LowPartMI; |
125 | } |
126 | } |
127 | |
128 | // Clear the kill flags on the address registers in the first instruction. |
129 | FirstMI->getOperand(i: 1).setIsKill(false); |
130 | FirstMI->getOperand(i: 3).setIsKill(false); |
131 | } |
132 | |
133 | // Split ADJDYNALLOC instruction MI. |
134 | void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { |
135 | MachineBasicBlock *MBB = MI->getParent(); |
136 | MachineFunction &MF = *MBB->getParent(); |
137 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
138 | MachineOperand &OffsetMO = MI->getOperand(i: 2); |
139 | SystemZCallingConventionRegisters *Regs = STI.getSpecialRegisters(); |
140 | |
141 | uint64_t Offset = (MFFrame.getMaxCallFrameSize() + |
142 | Regs->getCallFrameSize() + |
143 | Regs->getStackPointerBias() + |
144 | OffsetMO.getImm()); |
145 | unsigned NewOpcode = getOpcodeForOffset(Opcode: SystemZ::LA, Offset); |
146 | assert(NewOpcode && "No support for huge argument lists yet" ); |
147 | MI->setDesc(get(Opcode: NewOpcode)); |
148 | OffsetMO.setImm(Offset); |
149 | } |
150 | |
151 | // MI is an RI-style pseudo instruction. Replace it with LowOpcode |
152 | // if the first operand is a low GR32 and HighOpcode if the first operand |
153 | // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand |
154 | // and HighOpcode takes an unsigned 32-bit operand. In those cases, |
155 | // MI has the same kind of operand as LowOpcode, so needs to be converted |
156 | // if HighOpcode is used. |
157 | void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, |
158 | unsigned HighOpcode, |
159 | bool ConvertHigh) const { |
160 | Register Reg = MI.getOperand(i: 0).getReg(); |
161 | bool IsHigh = SystemZ::isHighReg(Reg); |
162 | MI.setDesc(get(Opcode: IsHigh ? HighOpcode : LowOpcode)); |
163 | if (IsHigh && ConvertHigh) |
164 | MI.getOperand(i: 1).setImm(uint32_t(MI.getOperand(i: 1).getImm())); |
165 | } |
166 | |
167 | // MI is a three-operand RIE-style pseudo instruction. Replace it with |
168 | // LowOpcodeK if the registers are both low GR32s, otherwise use a move |
169 | // followed by HighOpcode or LowOpcode, depending on whether the target |
170 | // is a high or low GR32. |
171 | void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, |
172 | unsigned LowOpcodeK, |
173 | unsigned HighOpcode) const { |
174 | Register DestReg = MI.getOperand(i: 0).getReg(); |
175 | Register SrcReg = MI.getOperand(i: 1).getReg(); |
176 | bool DestIsHigh = SystemZ::isHighReg(Reg: DestReg); |
177 | bool SrcIsHigh = SystemZ::isHighReg(Reg: SrcReg); |
178 | if (!DestIsHigh && !SrcIsHigh) |
179 | MI.setDesc(get(Opcode: LowOpcodeK)); |
180 | else { |
181 | if (DestReg != SrcReg) { |
182 | emitGRX32Move(MBB&: *MI.getParent(), MBBI: MI, DL: MI.getDebugLoc(), DestReg, SrcReg, |
183 | LowLowOpcode: SystemZ::LR, Size: 32, KillSrc: MI.getOperand(i: 1).isKill(), |
184 | UndefSrc: MI.getOperand(i: 1).isUndef()); |
185 | MI.getOperand(i: 1).setReg(DestReg); |
186 | } |
187 | MI.setDesc(get(Opcode: DestIsHigh ? HighOpcode : LowOpcode)); |
188 | MI.tieOperands(DefIdx: 0, UseIdx: 1); |
189 | } |
190 | } |
191 | |
192 | // MI is an RXY-style pseudo instruction. Replace it with LowOpcode |
193 | // if the first operand is a low GR32 and HighOpcode if the first operand |
194 | // is a high GR32. |
195 | void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode, |
196 | unsigned HighOpcode) const { |
197 | Register Reg = MI.getOperand(i: 0).getReg(); |
198 | unsigned Opcode = getOpcodeForOffset( |
199 | Opcode: SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode, |
200 | Offset: MI.getOperand(i: 2).getImm()); |
201 | MI.setDesc(get(Opcode)); |
202 | } |
203 | |
204 | // MI is a load-on-condition pseudo instruction with a single register |
205 | // (source or destination) operand. Replace it with LowOpcode if the |
206 | // register is a low GR32 and HighOpcode if the register is a high GR32. |
207 | void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode, |
208 | unsigned HighOpcode) const { |
209 | Register Reg = MI.getOperand(i: 0).getReg(); |
210 | unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode; |
211 | MI.setDesc(get(Opcode)); |
212 | } |
213 | |
214 | // MI is an RR-style pseudo instruction that zero-extends the low Size bits |
215 | // of one GRX32 into another. Replace it with LowOpcode if both operands |
216 | // are low registers, otherwise use RISB[LH]G. |
217 | void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, |
218 | unsigned Size) const { |
219 | MachineInstrBuilder MIB = |
220 | emitGRX32Move(MBB&: *MI.getParent(), MBBI: MI, DL: MI.getDebugLoc(), |
221 | DestReg: MI.getOperand(i: 0).getReg(), SrcReg: MI.getOperand(i: 1).getReg(), LowLowOpcode: LowOpcode, |
222 | Size, KillSrc: MI.getOperand(i: 1).isKill(), UndefSrc: MI.getOperand(i: 1).isUndef()); |
223 | |
224 | // Keep the remaining operands as-is. |
225 | for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI.operands(), N: 2)) |
226 | MIB.add(MO); |
227 | |
228 | MI.eraseFromParent(); |
229 | } |
230 | |
231 | void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { |
232 | MachineBasicBlock *MBB = MI->getParent(); |
233 | MachineFunction &MF = *MBB->getParent(); |
234 | const Register Reg64 = MI->getOperand(i: 0).getReg(); |
235 | const Register Reg32 = RI.getSubReg(Reg: Reg64, Idx: SystemZ::subreg_l32); |
236 | |
237 | // EAR can only load the low subregister so us a shift for %a0 to produce |
238 | // the GR containing %a0 and %a1. |
239 | |
240 | // ear <reg>, %a0 |
241 | BuildMI(BB&: *MBB, I: MI, MIMD: MI->getDebugLoc(), MCID: get(Opcode: SystemZ::EAR), DestReg: Reg32) |
242 | .addReg(RegNo: SystemZ::A0) |
243 | .addReg(RegNo: Reg64, flags: RegState::ImplicitDefine); |
244 | |
245 | // sllg <reg>, <reg>, 32 |
246 | BuildMI(BB&: *MBB, I: MI, MIMD: MI->getDebugLoc(), MCID: get(Opcode: SystemZ::SLLG), DestReg: Reg64) |
247 | .addReg(RegNo: Reg64) |
248 | .addReg(RegNo: 0) |
249 | .addImm(Val: 32); |
250 | |
251 | // ear <reg>, %a1 |
252 | BuildMI(BB&: *MBB, I: MI, MIMD: MI->getDebugLoc(), MCID: get(Opcode: SystemZ::EAR), DestReg: Reg32) |
253 | .addReg(RegNo: SystemZ::A1); |
254 | |
255 | // lg <reg>, 40(<reg>) |
256 | MI->setDesc(get(Opcode: SystemZ::LG)); |
257 | MachineInstrBuilder(MF, MI).addReg(RegNo: Reg64).addImm(Val: 40).addReg(RegNo: 0); |
258 | } |
259 | |
260 | // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR |
261 | // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg |
262 | // are low registers, otherwise use RISB[LH]G. Size is the number of bits |
263 | // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR). |
264 | // KillSrc is true if this move is the last use of SrcReg. |
265 | MachineInstrBuilder |
266 | SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, |
267 | MachineBasicBlock::iterator MBBI, |
268 | const DebugLoc &DL, unsigned DestReg, |
269 | unsigned SrcReg, unsigned LowLowOpcode, |
270 | unsigned Size, bool KillSrc, |
271 | bool UndefSrc) const { |
272 | unsigned Opcode; |
273 | bool DestIsHigh = SystemZ::isHighReg(Reg: DestReg); |
274 | bool SrcIsHigh = SystemZ::isHighReg(Reg: SrcReg); |
275 | if (DestIsHigh && SrcIsHigh) |
276 | Opcode = SystemZ::RISBHH; |
277 | else if (DestIsHigh && !SrcIsHigh) |
278 | Opcode = SystemZ::RISBHL; |
279 | else if (!DestIsHigh && SrcIsHigh) |
280 | Opcode = SystemZ::RISBLH; |
281 | else { |
282 | return BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: LowLowOpcode), DestReg) |
283 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc) | getUndefRegState(B: UndefSrc)); |
284 | } |
285 | unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0); |
286 | return BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode), DestReg) |
287 | .addReg(RegNo: DestReg, flags: RegState::Undef) |
288 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc) | getUndefRegState(B: UndefSrc)) |
289 | .addImm(Val: 32 - Size).addImm(Val: 128 + 31).addImm(Val: Rotate); |
290 | } |
291 | |
292 | MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI, |
293 | bool NewMI, |
294 | unsigned OpIdx1, |
295 | unsigned OpIdx2) const { |
296 | auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { |
297 | if (NewMI) |
298 | return *MI.getParent()->getParent()->CloneMachineInstr(Orig: &MI); |
299 | return MI; |
300 | }; |
301 | |
302 | switch (MI.getOpcode()) { |
303 | case SystemZ::SELRMux: |
304 | case SystemZ::SELFHR: |
305 | case SystemZ::SELR: |
306 | case SystemZ::SELGR: |
307 | case SystemZ::LOCRMux: |
308 | case SystemZ::LOCFHR: |
309 | case SystemZ::LOCR: |
310 | case SystemZ::LOCGR: { |
311 | auto &WorkingMI = cloneIfNew(MI); |
312 | // Invert condition. |
313 | unsigned CCValid = WorkingMI.getOperand(i: 3).getImm(); |
314 | unsigned CCMask = WorkingMI.getOperand(i: 4).getImm(); |
315 | WorkingMI.getOperand(i: 4).setImm(CCMask ^ CCValid); |
316 | return TargetInstrInfo::commuteInstructionImpl(MI&: WorkingMI, /*NewMI=*/false, |
317 | OpIdx1, OpIdx2); |
318 | } |
319 | default: |
320 | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
321 | } |
322 | } |
323 | |
324 | // If MI is a simple load or store for a frame object, return the register |
325 | // it loads or stores and set FrameIndex to the index of the frame object. |
326 | // Return 0 otherwise. |
327 | // |
328 | // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. |
329 | static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, |
330 | unsigned Flag) { |
331 | const MCInstrDesc &MCID = MI.getDesc(); |
332 | if ((MCID.TSFlags & Flag) && MI.getOperand(i: 1).isFI() && |
333 | MI.getOperand(i: 2).getImm() == 0 && MI.getOperand(i: 3).getReg() == 0) { |
334 | FrameIndex = MI.getOperand(i: 1).getIndex(); |
335 | return MI.getOperand(i: 0).getReg(); |
336 | } |
337 | return 0; |
338 | } |
339 | |
340 | Register SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
341 | int &FrameIndex) const { |
342 | return isSimpleMove(MI, FrameIndex, Flag: SystemZII::SimpleBDXLoad); |
343 | } |
344 | |
345 | Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
346 | int &FrameIndex) const { |
347 | return isSimpleMove(MI, FrameIndex, Flag: SystemZII::SimpleBDXStore); |
348 | } |
349 | |
350 | Register SystemZInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, |
351 | int &FrameIndex) const { |
352 | // if this is not a simple load from memory, it's not a load from stack slot |
353 | // either. |
354 | const MCInstrDesc &MCID = MI.getDesc(); |
355 | if (!(MCID.TSFlags & SystemZII::SimpleBDXLoad)) |
356 | return 0; |
357 | |
358 | // This version of isLoadFromStackSlot should only be used post frame-index |
359 | // elimination. |
360 | assert(!MI.getOperand(1).isFI()); |
361 | |
362 | // Now attempt to derive frame index from MachineMemOperands. |
363 | SmallVector<const MachineMemOperand *, 1> Accesses; |
364 | if (hasLoadFromStackSlot(MI, Accesses)) { |
365 | FrameIndex = |
366 | cast<FixedStackPseudoSourceValue>(Val: Accesses.front()->getPseudoValue()) |
367 | ->getFrameIndex(); |
368 | return MI.getOperand(i: 0).getReg(); |
369 | } |
370 | return 0; |
371 | } |
372 | |
373 | Register SystemZInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, |
374 | int &FrameIndex) const { |
375 | // if this is not a simple store to memory, it's not a store to stack slot |
376 | // either. |
377 | const MCInstrDesc &MCID = MI.getDesc(); |
378 | if (!(MCID.TSFlags & SystemZII::SimpleBDXStore)) |
379 | return 0; |
380 | |
381 | // This version of isStoreToStackSlot should only be used post frame-index |
382 | // elimination. |
383 | assert(!MI.getOperand(1).isFI()); |
384 | |
385 | // Now attempt to derive frame index from MachineMemOperands. |
386 | SmallVector<const MachineMemOperand *, 1> Accesses; |
387 | if (hasStoreToStackSlot(MI, Accesses)) { |
388 | FrameIndex = |
389 | cast<FixedStackPseudoSourceValue>(Val: Accesses.front()->getPseudoValue()) |
390 | ->getFrameIndex(); |
391 | return MI.getOperand(i: 0).getReg(); |
392 | } |
393 | return 0; |
394 | } |
395 | |
396 | bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI, |
397 | int &DestFrameIndex, |
398 | int &SrcFrameIndex) const { |
399 | // Check for MVC 0(Length,FI1),0(FI2) |
400 | const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo(); |
401 | if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(i: 0).isFI() || |
402 | MI.getOperand(i: 1).getImm() != 0 || !MI.getOperand(i: 3).isFI() || |
403 | MI.getOperand(i: 4).getImm() != 0) |
404 | return false; |
405 | |
406 | // Check that Length covers the full slots. |
407 | int64_t Length = MI.getOperand(i: 2).getImm(); |
408 | unsigned FI1 = MI.getOperand(i: 0).getIndex(); |
409 | unsigned FI2 = MI.getOperand(i: 3).getIndex(); |
410 | if (MFI.getObjectSize(ObjectIdx: FI1) != Length || |
411 | MFI.getObjectSize(ObjectIdx: FI2) != Length) |
412 | return false; |
413 | |
414 | DestFrameIndex = FI1; |
415 | SrcFrameIndex = FI2; |
416 | return true; |
417 | } |
418 | |
419 | bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
420 | MachineBasicBlock *&TBB, |
421 | MachineBasicBlock *&FBB, |
422 | SmallVectorImpl<MachineOperand> &Cond, |
423 | bool AllowModify) const { |
424 | // Most of the code and comments here are boilerplate. |
425 | |
426 | // Start from the bottom of the block and work up, examining the |
427 | // terminator instructions. |
428 | MachineBasicBlock::iterator I = MBB.end(); |
429 | while (I != MBB.begin()) { |
430 | --I; |
431 | if (I->isDebugInstr()) |
432 | continue; |
433 | |
434 | // Working from the bottom, when we see a non-terminator instruction, we're |
435 | // done. |
436 | if (!isUnpredicatedTerminator(MI: *I)) |
437 | break; |
438 | |
439 | // A terminator that isn't a branch can't easily be handled by this |
440 | // analysis. |
441 | if (!I->isBranch()) |
442 | return true; |
443 | |
444 | // Can't handle indirect branches. |
445 | SystemZII::Branch Branch(getBranchInfo(MI: *I)); |
446 | if (!Branch.hasMBBTarget()) |
447 | return true; |
448 | |
449 | // Punt on compound branches. |
450 | if (Branch.Type != SystemZII::BranchNormal) |
451 | return true; |
452 | |
453 | if (Branch.CCMask == SystemZ::CCMASK_ANY) { |
454 | // Handle unconditional branches. |
455 | if (!AllowModify) { |
456 | TBB = Branch.getMBBTarget(); |
457 | continue; |
458 | } |
459 | |
460 | // If the block has any instructions after a JMP, delete them. |
461 | MBB.erase(I: std::next(x: I), E: MBB.end()); |
462 | |
463 | Cond.clear(); |
464 | FBB = nullptr; |
465 | |
466 | // Delete the JMP if it's equivalent to a fall-through. |
467 | if (MBB.isLayoutSuccessor(MBB: Branch.getMBBTarget())) { |
468 | TBB = nullptr; |
469 | I->eraseFromParent(); |
470 | I = MBB.end(); |
471 | continue; |
472 | } |
473 | |
474 | // TBB is used to indicate the unconditinal destination. |
475 | TBB = Branch.getMBBTarget(); |
476 | continue; |
477 | } |
478 | |
479 | // Working from the bottom, handle the first conditional branch. |
480 | if (Cond.empty()) { |
481 | // FIXME: add X86-style branch swap |
482 | FBB = TBB; |
483 | TBB = Branch.getMBBTarget(); |
484 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: Branch.CCValid)); |
485 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: Branch.CCMask)); |
486 | continue; |
487 | } |
488 | |
489 | // Handle subsequent conditional branches. |
490 | assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch" ); |
491 | |
492 | // Only handle the case where all conditional branches branch to the same |
493 | // destination. |
494 | if (TBB != Branch.getMBBTarget()) |
495 | return true; |
496 | |
497 | // If the conditions are the same, we can leave them alone. |
498 | unsigned OldCCValid = Cond[0].getImm(); |
499 | unsigned OldCCMask = Cond[1].getImm(); |
500 | if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask) |
501 | continue; |
502 | |
503 | // FIXME: Try combining conditions like X86 does. Should be easy on Z! |
504 | return false; |
505 | } |
506 | |
507 | return false; |
508 | } |
509 | |
510 | unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB, |
511 | int *BytesRemoved) const { |
512 | assert(!BytesRemoved && "code size not handled" ); |
513 | |
514 | // Most of the code and comments here are boilerplate. |
515 | MachineBasicBlock::iterator I = MBB.end(); |
516 | unsigned Count = 0; |
517 | |
518 | while (I != MBB.begin()) { |
519 | --I; |
520 | if (I->isDebugInstr()) |
521 | continue; |
522 | if (!I->isBranch()) |
523 | break; |
524 | if (!getBranchInfo(MI: *I).hasMBBTarget()) |
525 | break; |
526 | // Remove the branch. |
527 | I->eraseFromParent(); |
528 | I = MBB.end(); |
529 | ++Count; |
530 | } |
531 | |
532 | return Count; |
533 | } |
534 | |
535 | bool SystemZInstrInfo:: |
536 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
537 | assert(Cond.size() == 2 && "Invalid condition" ); |
538 | Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm()); |
539 | return false; |
540 | } |
541 | |
542 | unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB, |
543 | MachineBasicBlock *TBB, |
544 | MachineBasicBlock *FBB, |
545 | ArrayRef<MachineOperand> Cond, |
546 | const DebugLoc &DL, |
547 | int *BytesAdded) const { |
548 | // In this function we output 32-bit branches, which should always |
549 | // have enough range. They can be shortened and relaxed by later code |
550 | // in the pipeline, if desired. |
551 | |
552 | // Shouldn't be a fall through. |
553 | assert(TBB && "insertBranch must not be told to insert a fallthrough" ); |
554 | assert((Cond.size() == 2 || Cond.size() == 0) && |
555 | "SystemZ branch conditions have one component!" ); |
556 | assert(!BytesAdded && "code size not handled" ); |
557 | |
558 | if (Cond.empty()) { |
559 | // Unconditional branch? |
560 | assert(!FBB && "Unconditional branch with multiple successors!" ); |
561 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SystemZ::J)).addMBB(MBB: TBB); |
562 | return 1; |
563 | } |
564 | |
565 | // Conditional branch. |
566 | unsigned Count = 0; |
567 | unsigned CCValid = Cond[0].getImm(); |
568 | unsigned CCMask = Cond[1].getImm(); |
569 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SystemZ::BRC)) |
570 | .addImm(Val: CCValid).addImm(Val: CCMask).addMBB(MBB: TBB); |
571 | ++Count; |
572 | |
573 | if (FBB) { |
574 | // Two-way Conditional branch. Insert the second branch. |
575 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SystemZ::J)).addMBB(MBB: FBB); |
576 | ++Count; |
577 | } |
578 | return Count; |
579 | } |
580 | |
581 | bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
582 | Register &SrcReg2, int64_t &Mask, |
583 | int64_t &Value) const { |
584 | assert(MI.isCompare() && "Caller should have checked for a comparison" ); |
585 | |
586 | if (MI.getNumExplicitOperands() == 2 && MI.getOperand(i: 0).isReg() && |
587 | MI.getOperand(i: 1).isImm()) { |
588 | SrcReg = MI.getOperand(i: 0).getReg(); |
589 | SrcReg2 = 0; |
590 | Value = MI.getOperand(i: 1).getImm(); |
591 | Mask = ~0; |
592 | return true; |
593 | } |
594 | |
595 | return false; |
596 | } |
597 | |
598 | bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
599 | ArrayRef<MachineOperand> Pred, |
600 | Register DstReg, Register TrueReg, |
601 | Register FalseReg, int &CondCycles, |
602 | int &TrueCycles, |
603 | int &FalseCycles) const { |
604 | // Not all subtargets have LOCR instructions. |
605 | if (!STI.hasLoadStoreOnCond()) |
606 | return false; |
607 | if (Pred.size() != 2) |
608 | return false; |
609 | |
610 | // Check register classes. |
611 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
612 | const TargetRegisterClass *RC = |
613 | RI.getCommonSubClass(A: MRI.getRegClass(Reg: TrueReg), B: MRI.getRegClass(Reg: FalseReg)); |
614 | if (!RC) |
615 | return false; |
616 | |
617 | // We have LOCR instructions for 32 and 64 bit general purpose registers. |
618 | if ((STI.hasLoadStoreOnCond2() && |
619 | SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) || |
620 | SystemZ::GR32BitRegClass.hasSubClassEq(RC) || |
621 | SystemZ::GR64BitRegClass.hasSubClassEq(RC)) { |
622 | CondCycles = 2; |
623 | TrueCycles = 2; |
624 | FalseCycles = 2; |
625 | return true; |
626 | } |
627 | |
628 | // Can't do anything else. |
629 | return false; |
630 | } |
631 | |
632 | void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB, |
633 | MachineBasicBlock::iterator I, |
634 | const DebugLoc &DL, Register DstReg, |
635 | ArrayRef<MachineOperand> Pred, |
636 | Register TrueReg, |
637 | Register FalseReg) const { |
638 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
639 | const TargetRegisterClass *RC = MRI.getRegClass(Reg: DstReg); |
640 | |
641 | assert(Pred.size() == 2 && "Invalid condition" ); |
642 | unsigned CCValid = Pred[0].getImm(); |
643 | unsigned CCMask = Pred[1].getImm(); |
644 | |
645 | unsigned Opc; |
646 | if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) { |
647 | if (STI.hasMiscellaneousExtensions3()) |
648 | Opc = SystemZ::SELRMux; |
649 | else if (STI.hasLoadStoreOnCond2()) |
650 | Opc = SystemZ::LOCRMux; |
651 | else { |
652 | Opc = SystemZ::LOCR; |
653 | MRI.constrainRegClass(Reg: DstReg, RC: &SystemZ::GR32BitRegClass); |
654 | Register TReg = MRI.createVirtualRegister(RegClass: &SystemZ::GR32BitRegClass); |
655 | Register FReg = MRI.createVirtualRegister(RegClass: &SystemZ::GR32BitRegClass); |
656 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: TargetOpcode::COPY), DestReg: TReg).addReg(RegNo: TrueReg); |
657 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: TargetOpcode::COPY), DestReg: FReg).addReg(RegNo: FalseReg); |
658 | TrueReg = TReg; |
659 | FalseReg = FReg; |
660 | } |
661 | } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) { |
662 | if (STI.hasMiscellaneousExtensions3()) |
663 | Opc = SystemZ::SELGR; |
664 | else |
665 | Opc = SystemZ::LOCGR; |
666 | } else |
667 | llvm_unreachable("Invalid register class" ); |
668 | |
669 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: Opc), DestReg: DstReg) |
670 | .addReg(RegNo: FalseReg).addReg(RegNo: TrueReg) |
671 | .addImm(Val: CCValid).addImm(Val: CCMask); |
672 | } |
673 | |
674 | bool SystemZInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
675 | Register Reg, |
676 | MachineRegisterInfo *MRI) const { |
677 | unsigned DefOpc = DefMI.getOpcode(); |
678 | |
679 | if (DefOpc == SystemZ::VGBM) { |
680 | int64_t ImmVal = DefMI.getOperand(i: 1).getImm(); |
681 | if (ImmVal != 0) // TODO: Handle other values |
682 | return false; |
683 | |
684 | // Fold gr128 = COPY (vr128 VGBM imm) |
685 | // |
686 | // %tmp:gr64 = LGHI 0 |
687 | // to gr128 = REG_SEQUENCE %tmp, %tmp |
688 | assert(DefMI.getOperand(0).getReg() == Reg); |
689 | |
690 | if (!UseMI.isCopy()) |
691 | return false; |
692 | |
693 | Register CopyDstReg = UseMI.getOperand(i: 0).getReg(); |
694 | if (CopyDstReg.isVirtual() && |
695 | MRI->getRegClass(Reg: CopyDstReg) == &SystemZ::GR128BitRegClass && |
696 | MRI->hasOneNonDBGUse(RegNo: Reg)) { |
697 | // TODO: Handle physical registers |
698 | // TODO: Handle gr64 uses with subregister indexes |
699 | // TODO: Should this multi-use cases? |
700 | Register TmpReg = MRI->createVirtualRegister(RegClass: &SystemZ::GR64BitRegClass); |
701 | MachineBasicBlock &MBB = *UseMI.getParent(); |
702 | |
703 | loadImmediate(MBB, MBBI: UseMI.getIterator(), Reg: TmpReg, Value: ImmVal); |
704 | |
705 | UseMI.setDesc(get(Opcode: SystemZ::REG_SEQUENCE)); |
706 | UseMI.getOperand(i: 1).setReg(TmpReg); |
707 | MachineInstrBuilder(*MBB.getParent(), &UseMI) |
708 | .addImm(Val: SystemZ::subreg_h64) |
709 | .addReg(RegNo: TmpReg) |
710 | .addImm(Val: SystemZ::subreg_l64); |
711 | |
712 | if (MRI->use_nodbg_empty(RegNo: Reg)) |
713 | DefMI.eraseFromParent(); |
714 | return true; |
715 | } |
716 | |
717 | return false; |
718 | } |
719 | |
720 | if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI && |
721 | DefOpc != SystemZ::LGHI) |
722 | return false; |
723 | if (DefMI.getOperand(i: 0).getReg() != Reg) |
724 | return false; |
725 | int32_t ImmVal = (int32_t)DefMI.getOperand(i: 1).getImm(); |
726 | |
727 | unsigned UseOpc = UseMI.getOpcode(); |
728 | unsigned NewUseOpc; |
729 | unsigned UseIdx; |
730 | int CommuteIdx = -1; |
731 | bool TieOps = false; |
732 | switch (UseOpc) { |
733 | case SystemZ::SELRMux: |
734 | TieOps = true; |
735 | [[fallthrough]]; |
736 | case SystemZ::LOCRMux: |
737 | if (!STI.hasLoadStoreOnCond2()) |
738 | return false; |
739 | NewUseOpc = SystemZ::LOCHIMux; |
740 | if (UseMI.getOperand(i: 2).getReg() == Reg) |
741 | UseIdx = 2; |
742 | else if (UseMI.getOperand(i: 1).getReg() == Reg) |
743 | UseIdx = 2, CommuteIdx = 1; |
744 | else |
745 | return false; |
746 | break; |
747 | case SystemZ::SELGR: |
748 | TieOps = true; |
749 | [[fallthrough]]; |
750 | case SystemZ::LOCGR: |
751 | if (!STI.hasLoadStoreOnCond2()) |
752 | return false; |
753 | NewUseOpc = SystemZ::LOCGHI; |
754 | if (UseMI.getOperand(i: 2).getReg() == Reg) |
755 | UseIdx = 2; |
756 | else if (UseMI.getOperand(i: 1).getReg() == Reg) |
757 | UseIdx = 2, CommuteIdx = 1; |
758 | else |
759 | return false; |
760 | break; |
761 | default: |
762 | return false; |
763 | } |
764 | |
765 | if (CommuteIdx != -1) |
766 | if (!commuteInstruction(MI&: UseMI, NewMI: false, OpIdx1: CommuteIdx, OpIdx2: UseIdx)) |
767 | return false; |
768 | |
769 | bool DeleteDef = MRI->hasOneNonDBGUse(RegNo: Reg); |
770 | UseMI.setDesc(get(Opcode: NewUseOpc)); |
771 | if (TieOps) |
772 | UseMI.tieOperands(DefIdx: 0, UseIdx: 1); |
773 | UseMI.getOperand(i: UseIdx).ChangeToImmediate(ImmVal); |
774 | if (DeleteDef) |
775 | DefMI.eraseFromParent(); |
776 | |
777 | return true; |
778 | } |
779 | |
780 | bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const { |
781 | unsigned Opcode = MI.getOpcode(); |
782 | if (Opcode == SystemZ::Return || |
783 | Opcode == SystemZ::Return_XPLINK || |
784 | Opcode == SystemZ::Trap || |
785 | Opcode == SystemZ::CallJG || |
786 | Opcode == SystemZ::CallBR) |
787 | return true; |
788 | return false; |
789 | } |
790 | |
791 | bool SystemZInstrInfo:: |
792 | isProfitableToIfCvt(MachineBasicBlock &MBB, |
793 | unsigned NumCycles, unsigned , |
794 | BranchProbability Probability) const { |
795 | // Avoid using conditional returns at the end of a loop (since then |
796 | // we'd need to emit an unconditional branch to the beginning anyway, |
797 | // making the loop body longer). This doesn't apply for low-probability |
798 | // loops (eg. compare-and-swap retry), so just decide based on branch |
799 | // probability instead of looping structure. |
800 | // However, since Compare and Trap instructions cost the same as a regular |
801 | // Compare instruction, we should allow the if conversion to convert this |
802 | // into a Conditional Compare regardless of the branch probability. |
803 | if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap && |
804 | MBB.succ_empty() && Probability < BranchProbability(1, 8)) |
805 | return false; |
806 | // For now only convert single instructions. |
807 | return NumCycles == 1; |
808 | } |
809 | |
810 | bool SystemZInstrInfo:: |
811 | isProfitableToIfCvt(MachineBasicBlock &TMBB, |
812 | unsigned NumCyclesT, unsigned , |
813 | MachineBasicBlock &FMBB, |
814 | unsigned NumCyclesF, unsigned , |
815 | BranchProbability Probability) const { |
816 | // For now avoid converting mutually-exclusive cases. |
817 | return false; |
818 | } |
819 | |
820 | bool SystemZInstrInfo:: |
821 | isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, |
822 | BranchProbability Probability) const { |
823 | // For now only duplicate single instructions. |
824 | return NumCycles == 1; |
825 | } |
826 | |
827 | bool SystemZInstrInfo::PredicateInstruction( |
828 | MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { |
829 | assert(Pred.size() == 2 && "Invalid condition" ); |
830 | unsigned CCValid = Pred[0].getImm(); |
831 | unsigned CCMask = Pred[1].getImm(); |
832 | assert(CCMask > 0 && CCMask < 15 && "Invalid predicate" ); |
833 | unsigned Opcode = MI.getOpcode(); |
834 | if (Opcode == SystemZ::Trap) { |
835 | MI.setDesc(get(Opcode: SystemZ::CondTrap)); |
836 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
837 | .addImm(Val: CCValid).addImm(Val: CCMask) |
838 | .addReg(RegNo: SystemZ::CC, flags: RegState::Implicit); |
839 | return true; |
840 | } |
841 | if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) { |
842 | MI.setDesc(get(Opcode: Opcode == SystemZ::Return ? SystemZ::CondReturn |
843 | : SystemZ::CondReturn_XPLINK)); |
844 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
845 | .addImm(Val: CCValid) |
846 | .addImm(Val: CCMask) |
847 | .addReg(RegNo: SystemZ::CC, flags: RegState::Implicit); |
848 | return true; |
849 | } |
850 | if (Opcode == SystemZ::CallJG) { |
851 | MachineOperand FirstOp = MI.getOperand(i: 0); |
852 | const uint32_t *RegMask = MI.getOperand(i: 1).getRegMask(); |
853 | MI.removeOperand(OpNo: 1); |
854 | MI.removeOperand(OpNo: 0); |
855 | MI.setDesc(get(Opcode: SystemZ::CallBRCL)); |
856 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
857 | .addImm(Val: CCValid) |
858 | .addImm(Val: CCMask) |
859 | .add(MO: FirstOp) |
860 | .addRegMask(Mask: RegMask) |
861 | .addReg(RegNo: SystemZ::CC, flags: RegState::Implicit); |
862 | return true; |
863 | } |
864 | if (Opcode == SystemZ::CallBR) { |
865 | MachineOperand Target = MI.getOperand(i: 0); |
866 | const uint32_t *RegMask = MI.getOperand(i: 1).getRegMask(); |
867 | MI.removeOperand(OpNo: 1); |
868 | MI.removeOperand(OpNo: 0); |
869 | MI.setDesc(get(Opcode: SystemZ::CallBCR)); |
870 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
871 | .addImm(Val: CCValid).addImm(Val: CCMask) |
872 | .add(MO: Target) |
873 | .addRegMask(Mask: RegMask) |
874 | .addReg(RegNo: SystemZ::CC, flags: RegState::Implicit); |
875 | return true; |
876 | } |
877 | return false; |
878 | } |
879 | |
880 | void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
881 | MachineBasicBlock::iterator MBBI, |
882 | const DebugLoc &DL, Register DestReg, |
883 | Register SrcReg, bool KillSrc, |
884 | bool RenamableDest, |
885 | bool RenamableSrc) const { |
886 | // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the |
887 | // super register in case one of the subregs is undefined. |
888 | // This handles ADDR128 too. |
889 | if (SystemZ::GR128BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) { |
890 | copyPhysReg(MBB, MBBI, DL, DestReg: RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_h64), |
891 | SrcReg: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_h64), KillSrc); |
892 | MachineInstrBuilder(*MBB.getParent(), std::prev(x: MBBI)) |
893 | .addReg(RegNo: SrcReg, flags: RegState::Implicit); |
894 | copyPhysReg(MBB, MBBI, DL, DestReg: RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_l64), |
895 | SrcReg: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_l64), KillSrc); |
896 | MachineInstrBuilder(*MBB.getParent(), std::prev(x: MBBI)) |
897 | .addReg(RegNo: SrcReg, flags: (getKillRegState(B: KillSrc) | RegState::Implicit)); |
898 | return; |
899 | } |
900 | |
901 | if (SystemZ::GRX32BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) { |
902 | emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, LowLowOpcode: SystemZ::LR, Size: 32, KillSrc, |
903 | UndefSrc: false); |
904 | return; |
905 | } |
906 | |
907 | // Move 128-bit floating-point values between VR128 and FP128. |
908 | if (SystemZ::VR128BitRegClass.contains(Reg: DestReg) && |
909 | SystemZ::FP128BitRegClass.contains(Reg: SrcReg)) { |
910 | MCRegister SrcRegHi = |
911 | RI.getMatchingSuperReg(Reg: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_h64), |
912 | SubIdx: SystemZ::subreg_h64, RC: &SystemZ::VR128BitRegClass); |
913 | MCRegister SrcRegLo = |
914 | RI.getMatchingSuperReg(Reg: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_l64), |
915 | SubIdx: SystemZ::subreg_h64, RC: &SystemZ::VR128BitRegClass); |
916 | |
917 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::VMRHG), DestReg) |
918 | .addReg(RegNo: SrcRegHi, flags: getKillRegState(B: KillSrc)) |
919 | .addReg(RegNo: SrcRegLo, flags: getKillRegState(B: KillSrc)); |
920 | return; |
921 | } |
922 | if (SystemZ::FP128BitRegClass.contains(Reg: DestReg) && |
923 | SystemZ::VR128BitRegClass.contains(Reg: SrcReg)) { |
924 | MCRegister DestRegHi = |
925 | RI.getMatchingSuperReg(Reg: RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_h64), |
926 | SubIdx: SystemZ::subreg_h64, RC: &SystemZ::VR128BitRegClass); |
927 | MCRegister DestRegLo = |
928 | RI.getMatchingSuperReg(Reg: RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_l64), |
929 | SubIdx: SystemZ::subreg_h64, RC: &SystemZ::VR128BitRegClass); |
930 | |
931 | if (DestRegHi != SrcReg.asMCReg()) |
932 | copyPhysReg(MBB, MBBI, DL, DestReg: DestRegHi, SrcReg, KillSrc: false); |
933 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::VREPG), DestReg: DestRegLo) |
934 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)).addImm(Val: 1); |
935 | return; |
936 | } |
937 | |
938 | if (SystemZ::FP128BitRegClass.contains(Reg: DestReg) && |
939 | SystemZ::GR128BitRegClass.contains(Reg: SrcReg)) { |
940 | MCRegister DestRegHi = RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_h64); |
941 | MCRegister DestRegLo = RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_l64); |
942 | MCRegister SrcRegHi = RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_h64); |
943 | MCRegister SrcRegLo = RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_l64); |
944 | |
945 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::LDGR), DestReg: DestRegHi) |
946 | .addReg(RegNo: SrcRegHi) |
947 | .addReg(RegNo: DestReg, flags: RegState::ImplicitDefine); |
948 | |
949 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::LDGR), DestReg: DestRegLo) |
950 | .addReg(RegNo: SrcRegLo, flags: getKillRegState(B: KillSrc)); |
951 | return; |
952 | } |
953 | |
954 | // Move CC value from a GR32. |
955 | if (DestReg == SystemZ::CC) { |
956 | unsigned Opcode = |
957 | SystemZ::GR32BitRegClass.contains(Reg: SrcReg) ? SystemZ::TMLH : SystemZ::TMHH; |
958 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode)) |
959 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)) |
960 | .addImm(Val: 3 << (SystemZ::IPM_CC - 16)); |
961 | return; |
962 | } |
963 | |
964 | if (SystemZ::GR128BitRegClass.contains(Reg: DestReg) && |
965 | SystemZ::VR128BitRegClass.contains(Reg: SrcReg)) { |
966 | MCRegister DestH64 = RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_h64); |
967 | MCRegister DestL64 = RI.getSubReg(Reg: DestReg, Idx: SystemZ::subreg_l64); |
968 | |
969 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::VLGVG), DestReg: DestH64) |
970 | .addReg(RegNo: SrcReg) |
971 | .addReg(RegNo: SystemZ::NoRegister) |
972 | .addImm(Val: 0) |
973 | .addDef(RegNo: DestReg, Flags: RegState::Implicit); |
974 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::VLGVG), DestReg: DestL64) |
975 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)) |
976 | .addReg(RegNo: SystemZ::NoRegister) |
977 | .addImm(Val: 1); |
978 | return; |
979 | } |
980 | |
981 | if (SystemZ::VR128BitRegClass.contains(Reg: DestReg) && |
982 | SystemZ::GR128BitRegClass.contains(Reg: SrcReg)) { |
983 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::VLVGP), DestReg) |
984 | .addReg(RegNo: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_h64)) |
985 | .addReg(RegNo: RI.getSubReg(Reg: SrcReg, Idx: SystemZ::subreg_l64)); |
986 | return; |
987 | } |
988 | |
989 | // Everything else needs only one instruction. |
990 | unsigned Opcode; |
991 | if (SystemZ::GR64BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
992 | Opcode = SystemZ::LGR; |
993 | else if (SystemZ::FP16BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
994 | Opcode = STI.hasVector() ? SystemZ::LDR16 : SystemZ::LER16; |
995 | else if (SystemZ::FP32BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
996 | // For z13 we prefer LDR over LER to avoid partial register dependencies. |
997 | Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER; |
998 | else if (SystemZ::FP64BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
999 | Opcode = SystemZ::LDR; |
1000 | else if (SystemZ::FP128BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
1001 | Opcode = SystemZ::LXR; |
1002 | else if (SystemZ::VR32BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
1003 | Opcode = SystemZ::VLR32; |
1004 | else if (SystemZ::VR64BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
1005 | Opcode = SystemZ::VLR64; |
1006 | else if (SystemZ::VR128BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
1007 | Opcode = SystemZ::VLR; |
1008 | else if (SystemZ::AR32BitRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
1009 | Opcode = SystemZ::CPYA; |
1010 | else if (SystemZ::GR64BitRegClass.contains(Reg: DestReg) && |
1011 | SystemZ::FP64BitRegClass.contains(Reg: SrcReg)) |
1012 | Opcode = SystemZ::LGDR; |
1013 | else if (SystemZ::FP64BitRegClass.contains(Reg: DestReg) && |
1014 | SystemZ::GR64BitRegClass.contains(Reg: SrcReg)) |
1015 | Opcode = SystemZ::LDGR; |
1016 | else |
1017 | llvm_unreachable("Impossible reg-to-reg copy" ); |
1018 | |
1019 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode), DestReg) |
1020 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: KillSrc)); |
1021 | } |
1022 | |
1023 | void SystemZInstrInfo::storeRegToStackSlot( |
1024 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, |
1025 | bool isKill, int FrameIdx, const TargetRegisterClass *RC, |
1026 | const TargetRegisterInfo *TRI, Register VReg, |
1027 | MachineInstr::MIFlag Flags) const { |
1028 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
1029 | |
1030 | // Callers may expect a single instruction, so keep 128-bit moves |
1031 | // together for now and lower them after register allocation. |
1032 | unsigned LoadOpcode, StoreOpcode; |
1033 | getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); |
1034 | addFrameReference(MIB: BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: StoreOpcode)) |
1035 | .addReg(RegNo: SrcReg, flags: getKillRegState(B: isKill)), |
1036 | FI: FrameIdx); |
1037 | } |
1038 | |
1039 | void SystemZInstrInfo::loadRegFromStackSlot( |
1040 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, |
1041 | int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, |
1042 | Register VReg, MachineInstr::MIFlag Flags) const { |
1043 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
1044 | |
1045 | // Callers may expect a single instruction, so keep 128-bit moves |
1046 | // together for now and lower them after register allocation. |
1047 | unsigned LoadOpcode, StoreOpcode; |
1048 | getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); |
1049 | addFrameReference(MIB: BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: LoadOpcode), DestReg), |
1050 | FI: FrameIdx); |
1051 | } |
1052 | |
1053 | // Return true if MI is a simple load or store with a 12-bit displacement |
1054 | // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. |
1055 | static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { |
1056 | const MCInstrDesc &MCID = MI->getDesc(); |
1057 | return ((MCID.TSFlags & Flag) && |
1058 | isUInt<12>(x: MI->getOperand(i: 2).getImm()) && |
1059 | MI->getOperand(i: 3).getReg() == 0); |
1060 | } |
1061 | |
1062 | namespace { |
1063 | |
1064 | struct LogicOp { |
1065 | LogicOp() = default; |
1066 | LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) |
1067 | : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} |
1068 | |
1069 | explicit operator bool() const { return RegSize; } |
1070 | |
1071 | unsigned RegSize = 0; |
1072 | unsigned ImmLSB = 0; |
1073 | unsigned ImmSize = 0; |
1074 | }; |
1075 | |
1076 | } // end anonymous namespace |
1077 | |
1078 | static LogicOp interpretAndImmediate(unsigned Opcode) { |
1079 | switch (Opcode) { |
1080 | case SystemZ::NILMux: return LogicOp(32, 0, 16); |
1081 | case SystemZ::NIHMux: return LogicOp(32, 16, 16); |
1082 | case SystemZ::NILL64: return LogicOp(64, 0, 16); |
1083 | case SystemZ::NILH64: return LogicOp(64, 16, 16); |
1084 | case SystemZ::NIHL64: return LogicOp(64, 32, 16); |
1085 | case SystemZ::NIHH64: return LogicOp(64, 48, 16); |
1086 | case SystemZ::NIFMux: return LogicOp(32, 0, 32); |
1087 | case SystemZ::NILF64: return LogicOp(64, 0, 32); |
1088 | case SystemZ::NIHF64: return LogicOp(64, 32, 32); |
1089 | default: return LogicOp(); |
1090 | } |
1091 | } |
1092 | |
1093 | static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) { |
1094 | if (OldMI->registerDefIsDead(Reg: SystemZ::CC, /*TRI=*/nullptr)) { |
1095 | MachineOperand *CCDef = |
1096 | NewMI->findRegisterDefOperand(Reg: SystemZ::CC, /*TRI=*/nullptr); |
1097 | if (CCDef != nullptr) |
1098 | CCDef->setIsDead(true); |
1099 | } |
1100 | } |
1101 | |
1102 | static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, |
1103 | MachineInstr::MIFlag Flag) { |
1104 | if (OldMI->getFlag(Flag)) |
1105 | NewMI->setFlag(Flag); |
1106 | } |
1107 | |
1108 | MachineInstr * |
1109 | SystemZInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, |
1110 | LiveIntervals *LIS) const { |
1111 | MachineBasicBlock *MBB = MI.getParent(); |
1112 | |
1113 | // Try to convert an AND into an RISBG-type instruction. |
1114 | // TODO: It might be beneficial to select RISBG and shorten to AND instead. |
1115 | if (LogicOp And = interpretAndImmediate(Opcode: MI.getOpcode())) { |
1116 | uint64_t Imm = MI.getOperand(i: 2).getImm() << And.ImmLSB; |
1117 | // AND IMMEDIATE leaves the other bits of the register unchanged. |
1118 | Imm |= allOnes(Count: And.RegSize) & ~(allOnes(Count: And.ImmSize) << And.ImmLSB); |
1119 | unsigned Start, End; |
1120 | if (isRxSBGMask(Mask: Imm, BitSize: And.RegSize, Start, End)) { |
1121 | unsigned NewOpcode; |
1122 | if (And.RegSize == 64) { |
1123 | NewOpcode = SystemZ::RISBG; |
1124 | // Prefer RISBGN if available, since it does not clobber CC. |
1125 | if (STI.hasMiscellaneousExtensions()) |
1126 | NewOpcode = SystemZ::RISBGN; |
1127 | } else { |
1128 | NewOpcode = SystemZ::RISBMux; |
1129 | Start &= 31; |
1130 | End &= 31; |
1131 | } |
1132 | MachineOperand &Dest = MI.getOperand(i: 0); |
1133 | MachineOperand &Src = MI.getOperand(i: 1); |
1134 | MachineInstrBuilder MIB = |
1135 | BuildMI(BB&: *MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpcode)) |
1136 | .add(MO: Dest) |
1137 | .addReg(RegNo: 0) |
1138 | .addReg(RegNo: Src.getReg(), flags: getKillRegState(B: Src.isKill()), |
1139 | SubReg: Src.getSubReg()) |
1140 | .addImm(Val: Start) |
1141 | .addImm(Val: End + 128) |
1142 | .addImm(Val: 0); |
1143 | if (LV) { |
1144 | unsigned NumOps = MI.getNumOperands(); |
1145 | for (unsigned I = 1; I < NumOps; ++I) { |
1146 | MachineOperand &Op = MI.getOperand(i: I); |
1147 | if (Op.isReg() && Op.isKill()) |
1148 | LV->replaceKillInstruction(Reg: Op.getReg(), OldMI&: MI, NewMI&: *MIB); |
1149 | } |
1150 | } |
1151 | if (LIS) |
1152 | LIS->ReplaceMachineInstrInMaps(MI, NewMI&: *MIB); |
1153 | transferDeadCC(OldMI: &MI, NewMI: MIB); |
1154 | return MIB; |
1155 | } |
1156 | } |
1157 | return nullptr; |
1158 | } |
1159 | |
1160 | bool SystemZInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst, |
1161 | bool Invert) const { |
1162 | unsigned Opc = Inst.getOpcode(); |
1163 | if (Invert) { |
1164 | auto InverseOpcode = getInverseOpcode(Opcode: Opc); |
1165 | if (!InverseOpcode) |
1166 | return false; |
1167 | Opc = *InverseOpcode; |
1168 | } |
1169 | |
1170 | switch (Opc) { |
1171 | default: |
1172 | break; |
1173 | // Adds and multiplications. |
1174 | case SystemZ::WFADB: |
1175 | case SystemZ::WFASB: |
1176 | case SystemZ::WFAXB: |
1177 | case SystemZ::VFADB: |
1178 | case SystemZ::VFASB: |
1179 | case SystemZ::WFMDB: |
1180 | case SystemZ::WFMSB: |
1181 | case SystemZ::WFMXB: |
1182 | case SystemZ::VFMDB: |
1183 | case SystemZ::VFMSB: |
1184 | return (Inst.getFlag(Flag: MachineInstr::MIFlag::FmReassoc) && |
1185 | Inst.getFlag(Flag: MachineInstr::MIFlag::FmNsz)); |
1186 | } |
1187 | |
1188 | return false; |
1189 | } |
1190 | |
1191 | std::optional<unsigned> |
1192 | SystemZInstrInfo::getInverseOpcode(unsigned Opcode) const { |
1193 | // fadd => fsub |
1194 | switch (Opcode) { |
1195 | case SystemZ::WFADB: |
1196 | return SystemZ::WFSDB; |
1197 | case SystemZ::WFASB: |
1198 | return SystemZ::WFSSB; |
1199 | case SystemZ::WFAXB: |
1200 | return SystemZ::WFSXB; |
1201 | case SystemZ::VFADB: |
1202 | return SystemZ::VFSDB; |
1203 | case SystemZ::VFASB: |
1204 | return SystemZ::VFSSB; |
1205 | // fsub => fadd |
1206 | case SystemZ::WFSDB: |
1207 | return SystemZ::WFADB; |
1208 | case SystemZ::WFSSB: |
1209 | return SystemZ::WFASB; |
1210 | case SystemZ::WFSXB: |
1211 | return SystemZ::WFAXB; |
1212 | case SystemZ::VFSDB: |
1213 | return SystemZ::VFADB; |
1214 | case SystemZ::VFSSB: |
1215 | return SystemZ::VFASB; |
1216 | default: |
1217 | return std::nullopt; |
1218 | } |
1219 | } |
1220 | |
1221 | MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( |
1222 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
1223 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
1224 | LiveIntervals *LIS, VirtRegMap *VRM) const { |
1225 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
1226 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1227 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
1228 | unsigned Size = MFI.getObjectSize(ObjectIdx: FrameIndex); |
1229 | unsigned Opcode = MI.getOpcode(); |
1230 | |
1231 | // Check CC liveness if new instruction introduces a dead def of CC. |
1232 | SlotIndex MISlot = SlotIndex(); |
1233 | LiveRange *CCLiveRange = nullptr; |
1234 | bool CCLiveAtMI = true; |
1235 | if (LIS) { |
1236 | MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot(); |
1237 | auto CCUnits = TRI->regunits(Reg: MCRegister::from(Val: SystemZ::CC)); |
1238 | assert(range_size(CCUnits) == 1 && "CC only has one reg unit." ); |
1239 | CCLiveRange = &LIS->getRegUnit(Unit: *CCUnits.begin()); |
1240 | CCLiveAtMI = CCLiveRange->liveAt(index: MISlot); |
1241 | } |
1242 | |
1243 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
1244 | if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && |
1245 | isInt<8>(x: MI.getOperand(i: 2).getImm()) && !MI.getOperand(i: 3).getReg()) { |
1246 | // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST |
1247 | MachineInstr *BuiltMI = BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, |
1248 | MIMD: MI.getDebugLoc(), MCID: get(Opcode: SystemZ::AGSI)) |
1249 | .addFrameIndex(Idx: FrameIndex) |
1250 | .addImm(Val: 0) |
1251 | .addImm(Val: MI.getOperand(i: 2).getImm()); |
1252 | BuiltMI->findRegisterDefOperand(Reg: SystemZ::CC, /*TRI=*/nullptr) |
1253 | ->setIsDead(true); |
1254 | CCLiveRange->createDeadDef(Def: MISlot, VNIAlloc&: LIS->getVNInfoAllocator()); |
1255 | return BuiltMI; |
1256 | } |
1257 | return nullptr; |
1258 | } |
1259 | |
1260 | // All other cases require a single operand. |
1261 | if (Ops.size() != 1) |
1262 | return nullptr; |
1263 | |
1264 | unsigned OpNum = Ops[0]; |
1265 | const TargetRegisterClass *RC = |
1266 | MF.getRegInfo().getRegClass(Reg: MI.getOperand(i: OpNum).getReg()); |
1267 | assert((Size * 8 == TRI->getRegSizeInBits(*RC) || |
1268 | (RC == &SystemZ::FP16BitRegClass && Size == 4 && !STI.hasVector())) && |
1269 | "Invalid size combination" ); |
1270 | (void)RC; |
1271 | |
1272 | if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 && |
1273 | isInt<8>(x: MI.getOperand(i: 2).getImm())) { |
1274 | // A(G)HI %reg, CONST -> A(G)SI %mem, CONST |
1275 | Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); |
1276 | MachineInstr *BuiltMI = |
1277 | BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), MCID: get(Opcode)) |
1278 | .addFrameIndex(Idx: FrameIndex) |
1279 | .addImm(Val: 0) |
1280 | .addImm(Val: MI.getOperand(i: 2).getImm()); |
1281 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1282 | transferMIFlag(OldMI: &MI, NewMI: BuiltMI, Flag: MachineInstr::NoSWrap); |
1283 | return BuiltMI; |
1284 | } |
1285 | |
1286 | if ((Opcode == SystemZ::ALFI && OpNum == 0 && |
1287 | isInt<8>(x: (int32_t)MI.getOperand(i: 2).getImm())) || |
1288 | (Opcode == SystemZ::ALGFI && OpNum == 0 && |
1289 | isInt<8>(x: (int64_t)MI.getOperand(i: 2).getImm()))) { |
1290 | // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST |
1291 | Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI); |
1292 | MachineInstr *BuiltMI = |
1293 | BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), MCID: get(Opcode)) |
1294 | .addFrameIndex(Idx: FrameIndex) |
1295 | .addImm(Val: 0) |
1296 | .addImm(Val: (int8_t)MI.getOperand(i: 2).getImm()); |
1297 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1298 | return BuiltMI; |
1299 | } |
1300 | |
1301 | if ((Opcode == SystemZ::SLFI && OpNum == 0 && |
1302 | isInt<8>(x: (int32_t)-MI.getOperand(i: 2).getImm())) || |
1303 | (Opcode == SystemZ::SLGFI && OpNum == 0 && |
1304 | isInt<8>(x: (int64_t)-MI.getOperand(i: 2).getImm()))) { |
1305 | // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST |
1306 | Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI); |
1307 | MachineInstr *BuiltMI = |
1308 | BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), MCID: get(Opcode)) |
1309 | .addFrameIndex(Idx: FrameIndex) |
1310 | .addImm(Val: 0) |
1311 | .addImm(Val: (int8_t)-MI.getOperand(i: 2).getImm()); |
1312 | transferDeadCC(OldMI: &MI, NewMI: BuiltMI); |
1313 | return BuiltMI; |
1314 | } |
1315 | |
1316 | unsigned MemImmOpc = 0; |
1317 | switch (Opcode) { |
1318 | case SystemZ::LHIMux: |
1319 | case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break; |
1320 | case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break; |
1321 | case SystemZ::CHIMux: |
1322 | case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break; |
1323 | case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break; |
1324 | case SystemZ::CLFIMux: |
1325 | case SystemZ::CLFI: |
1326 | if (isUInt<16>(x: MI.getOperand(i: 1).getImm())) |
1327 | MemImmOpc = SystemZ::CLFHSI; |
1328 | break; |
1329 | case SystemZ::CLGFI: |
1330 | if (isUInt<16>(x: MI.getOperand(i: 1).getImm())) |
1331 | MemImmOpc = SystemZ::CLGHSI; |
1332 | break; |
1333 | default: break; |
1334 | } |
1335 | if (MemImmOpc) |
1336 | return BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), |
1337 | MCID: get(Opcode: MemImmOpc)) |
1338 | .addFrameIndex(Idx: FrameIndex) |
1339 | .addImm(Val: 0) |
1340 | .addImm(Val: MI.getOperand(i: 1).getImm()); |
1341 | |
1342 | if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { |
1343 | bool Op0IsGPR = (Opcode == SystemZ::LGDR); |
1344 | bool Op1IsGPR = (Opcode == SystemZ::LDGR); |
1345 | // If we're spilling the destination of an LDGR or LGDR, store the |
1346 | // source register instead. |
1347 | if (OpNum == 0) { |
1348 | unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; |
1349 | return BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), |
1350 | MCID: get(Opcode: StoreOpcode)) |
1351 | .add(MO: MI.getOperand(i: 1)) |
1352 | .addFrameIndex(Idx: FrameIndex) |
1353 | .addImm(Val: 0) |
1354 | .addReg(RegNo: 0); |
1355 | } |
1356 | // If we're spilling the source of an LDGR or LGDR, load the |
1357 | // destination register instead. |
1358 | if (OpNum == 1) { |
1359 | unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; |
1360 | return BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), |
1361 | MCID: get(Opcode: LoadOpcode)) |
1362 | .add(MO: MI.getOperand(i: 0)) |
1363 | .addFrameIndex(Idx: FrameIndex) |
1364 | .addImm(Val: 0) |
1365 | .addReg(RegNo: 0); |
1366 | } |
1367 | } |
1368 | |
1369 | // Look for cases where the source of a simple store or the destination |
1370 | // of a simple load is being spilled. Try to use MVC instead. |
1371 | // |
1372 | // Although MVC is in practice a fast choice in these cases, it is still |
1373 | // logically a bytewise copy. This means that we cannot use it if the |
1374 | // load or store is volatile. We also wouldn't be able to use MVC if |
1375 | // the two memories partially overlap, but that case cannot occur here, |
1376 | // because we know that one of the memories is a full frame index. |
1377 | // |
1378 | // For performance reasons, we also want to avoid using MVC if the addresses |
1379 | // might be equal. We don't worry about that case here, because spill slot |
1380 | // coloring happens later, and because we have special code to remove |
1381 | // MVCs that turn out to be redundant. |
1382 | if (OpNum == 0 && MI.hasOneMemOperand()) { |
1383 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
1384 | if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) { |
1385 | // Handle conversion of loads. |
1386 | if (isSimpleBD12Move(MI: &MI, Flag: SystemZII::SimpleBDXLoad)) { |
1387 | return BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), |
1388 | MCID: get(Opcode: SystemZ::MVC)) |
1389 | .addFrameIndex(Idx: FrameIndex) |
1390 | .addImm(Val: 0) |
1391 | .addImm(Val: Size) |
1392 | .add(MO: MI.getOperand(i: 1)) |
1393 | .addImm(Val: MI.getOperand(i: 2).getImm()) |
1394 | .addMemOperand(MMO); |
1395 | } |
1396 | // Handle conversion of stores. |
1397 | if (isSimpleBD12Move(MI: &MI, Flag: SystemZII::SimpleBDXStore)) { |
1398 | return BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), |
1399 | MCID: get(Opcode: SystemZ::MVC)) |
1400 | .add(MO: MI.getOperand(i: 1)) |
1401 | .addImm(Val: MI.getOperand(i: 2).getImm()) |
1402 | .addImm(Val: Size) |
1403 | .addFrameIndex(Idx: FrameIndex) |
1404 | .addImm(Val: 0) |
1405 | .addMemOperand(MMO); |
1406 | } |
1407 | } |
1408 | } |
1409 | |
1410 | // If the spilled operand is the final one or the instruction is |
1411 | // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of |
1412 | // CC if it is live and MI does not define it. |
1413 | unsigned NumOps = MI.getNumExplicitOperands(); |
1414 | int MemOpcode = SystemZ::getMemOpcode(Opcode); |
1415 | if (MemOpcode == -1 || |
1416 | (CCLiveAtMI && !MI.definesRegister(Reg: SystemZ::CC, /*TRI=*/nullptr) && |
1417 | get(Opcode: MemOpcode).hasImplicitDefOfPhysReg(Reg: SystemZ::CC))) |
1418 | return nullptr; |
1419 | |
1420 | // Check if all other vregs have a usable allocation in the case of vector |
1421 | // to FP conversion. |
1422 | const MCInstrDesc &MCID = MI.getDesc(); |
1423 | for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) { |
1424 | const MCOperandInfo &MCOI = MCID.operands()[I]; |
1425 | if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum) |
1426 | continue; |
1427 | const TargetRegisterClass *RC = TRI->getRegClass(i: MCOI.RegClass); |
1428 | if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) { |
1429 | Register Reg = MI.getOperand(i: I).getReg(); |
1430 | Register PhysReg = Reg.isVirtual() |
1431 | ? (VRM ? Register(VRM->getPhys(virtReg: Reg)) : Register()) |
1432 | : Reg; |
1433 | if (!PhysReg || |
1434 | !(SystemZ::FP32BitRegClass.contains(Reg: PhysReg) || |
1435 | SystemZ::FP64BitRegClass.contains(Reg: PhysReg) || |
1436 | SystemZ::VF128BitRegClass.contains(Reg: PhysReg))) |
1437 | return nullptr; |
1438 | } |
1439 | } |
1440 | // Fused multiply and add/sub need to have the same dst and accumulator reg. |
1441 | bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB || |
1442 | Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB); |
1443 | if (FusedFPOp) { |
1444 | Register DstReg = VRM->getPhys(virtReg: MI.getOperand(i: 0).getReg()); |
1445 | Register AccReg = VRM->getPhys(virtReg: MI.getOperand(i: 3).getReg()); |
1446 | if (OpNum == 0 || OpNum == 3 || DstReg != AccReg) |
1447 | return nullptr; |
1448 | } |
1449 | |
1450 | // Try to swap compare operands if possible. |
1451 | bool NeedsCommute = false; |
1452 | if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR || |
1453 | MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR || |
1454 | MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB || |
1455 | MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) && |
1456 | OpNum == 0 && prepareCompareSwapOperands(MBBI: MI)) |
1457 | NeedsCommute = true; |
1458 | |
1459 | bool CCOperands = false; |
1460 | if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR || |
1461 | MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) { |
1462 | assert(MI.getNumOperands() == 6 && NumOps == 5 && |
1463 | "LOCR/SELR instruction operands corrupt?" ); |
1464 | NumOps -= 2; |
1465 | CCOperands = true; |
1466 | } |
1467 | |
1468 | // See if this is a 3-address instruction that is convertible to 2-address |
1469 | // and suitable for folding below. Only try this with virtual registers |
1470 | // and a provided VRM (during regalloc). |
1471 | if (NumOps == 3 && SystemZ::getTargetMemOpcode(Opcode: MemOpcode) != -1) { |
1472 | if (VRM == nullptr) |
1473 | return nullptr; |
1474 | else { |
1475 | Register DstReg = MI.getOperand(i: 0).getReg(); |
1476 | Register DstPhys = |
1477 | (DstReg.isVirtual() ? Register(VRM->getPhys(virtReg: DstReg)) : DstReg); |
1478 | Register SrcReg = (OpNum == 2 ? MI.getOperand(i: 1).getReg() |
1479 | : ((OpNum == 1 && MI.isCommutable()) |
1480 | ? MI.getOperand(i: 2).getReg() |
1481 | : Register())); |
1482 | if (DstPhys && !SystemZ::GRH32BitRegClass.contains(Reg: DstPhys) && SrcReg && |
1483 | SrcReg.isVirtual() && DstPhys == VRM->getPhys(virtReg: SrcReg)) |
1484 | NeedsCommute = (OpNum == 1); |
1485 | else |
1486 | return nullptr; |
1487 | } |
1488 | } |
1489 | |
1490 | if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) { |
1491 | const MCInstrDesc &MemDesc = get(Opcode: MemOpcode); |
1492 | uint64_t AccessBytes = SystemZII::getAccessSize(Flags: MemDesc.TSFlags); |
1493 | assert(AccessBytes != 0 && "Size of access should be known" ); |
1494 | assert(AccessBytes <= Size && "Access outside the frame index" ); |
1495 | uint64_t Offset = Size - AccessBytes; |
1496 | MachineInstrBuilder MIB = BuildMI(BB&: *InsertPt->getParent(), I: InsertPt, |
1497 | MIMD: MI.getDebugLoc(), MCID: get(Opcode: MemOpcode)); |
1498 | if (MI.isCompare()) { |
1499 | assert(NumOps == 2 && "Expected 2 register operands for a compare." ); |
1500 | MIB.add(MO: MI.getOperand(i: NeedsCommute ? 1 : 0)); |
1501 | } |
1502 | else if (FusedFPOp) { |
1503 | MIB.add(MO: MI.getOperand(i: 0)); |
1504 | MIB.add(MO: MI.getOperand(i: 3)); |
1505 | MIB.add(MO: MI.getOperand(i: OpNum == 1 ? 2 : 1)); |
1506 | } |
1507 | else { |
1508 | MIB.add(MO: MI.getOperand(i: 0)); |
1509 | if (NeedsCommute) |
1510 | MIB.add(MO: MI.getOperand(i: 2)); |
1511 | else |
1512 | for (unsigned I = 1; I < OpNum; ++I) |
1513 | MIB.add(MO: MI.getOperand(i: I)); |
1514 | } |
1515 | MIB.addFrameIndex(Idx: FrameIndex).addImm(Val: Offset); |
1516 | if (MemDesc.TSFlags & SystemZII::HasIndex) |
1517 | MIB.addReg(RegNo: 0); |
1518 | if (CCOperands) { |
1519 | unsigned CCValid = MI.getOperand(i: NumOps).getImm(); |
1520 | unsigned CCMask = MI.getOperand(i: NumOps + 1).getImm(); |
1521 | MIB.addImm(Val: CCValid); |
1522 | MIB.addImm(Val: NeedsCommute ? CCMask ^ CCValid : CCMask); |
1523 | } |
1524 | if (MIB->definesRegister(Reg: SystemZ::CC, /*TRI=*/nullptr) && |
1525 | (!MI.definesRegister(Reg: SystemZ::CC, /*TRI=*/nullptr) || |
1526 | MI.registerDefIsDead(Reg: SystemZ::CC, /*TRI=*/nullptr))) { |
1527 | MIB->addRegisterDead(Reg: SystemZ::CC, RegInfo: TRI); |
1528 | if (CCLiveRange) |
1529 | CCLiveRange->createDeadDef(Def: MISlot, VNIAlloc&: LIS->getVNInfoAllocator()); |
1530 | } |
1531 | // Constrain the register classes if converted from a vector opcode. The |
1532 | // allocated regs are in an FP reg-class per previous check above. |
1533 | for (const MachineOperand &MO : MIB->operands()) |
1534 | if (MO.isReg() && MO.getReg().isVirtual()) { |
1535 | Register Reg = MO.getReg(); |
1536 | if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass) |
1537 | MRI.setRegClass(Reg, RC: &SystemZ::FP32BitRegClass); |
1538 | else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass) |
1539 | MRI.setRegClass(Reg, RC: &SystemZ::FP64BitRegClass); |
1540 | else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass) |
1541 | MRI.setRegClass(Reg, RC: &SystemZ::VF128BitRegClass); |
1542 | } |
1543 | |
1544 | transferDeadCC(OldMI: &MI, NewMI: MIB); |
1545 | transferMIFlag(OldMI: &MI, NewMI: MIB, Flag: MachineInstr::NoSWrap); |
1546 | transferMIFlag(OldMI: &MI, NewMI: MIB, Flag: MachineInstr::NoFPExcept); |
1547 | return MIB; |
1548 | } |
1549 | |
1550 | return nullptr; |
1551 | } |
1552 | |
1553 | MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( |
1554 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
1555 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
1556 | LiveIntervals *LIS) const { |
1557 | MachineRegisterInfo *MRI = &MF.getRegInfo(); |
1558 | MachineBasicBlock *MBB = MI.getParent(); |
1559 | |
1560 | // For reassociable FP operations, any loads have been purposefully left |
1561 | // unfolded so that MachineCombiner can do its work on reg/reg |
1562 | // opcodes. After that, as many loads as possible are now folded. |
1563 | // TODO: This may be beneficial with other opcodes as well as machine-sink |
1564 | // can move loads close to their user in a different MBB, which the isel |
1565 | // matcher did not see. |
1566 | unsigned LoadOpc = 0; |
1567 | unsigned RegMemOpcode = 0; |
1568 | const TargetRegisterClass *FPRC = nullptr; |
1569 | RegMemOpcode = MI.getOpcode() == SystemZ::WFADB ? SystemZ::ADB |
1570 | : MI.getOpcode() == SystemZ::WFSDB ? SystemZ::SDB |
1571 | : MI.getOpcode() == SystemZ::WFMDB ? SystemZ::MDB |
1572 | : 0; |
1573 | if (RegMemOpcode) { |
1574 | LoadOpc = SystemZ::VL64; |
1575 | FPRC = &SystemZ::FP64BitRegClass; |
1576 | } else { |
1577 | RegMemOpcode = MI.getOpcode() == SystemZ::WFASB ? SystemZ::AEB |
1578 | : MI.getOpcode() == SystemZ::WFSSB ? SystemZ::SEB |
1579 | : MI.getOpcode() == SystemZ::WFMSB ? SystemZ::MEEB |
1580 | : 0; |
1581 | if (RegMemOpcode) { |
1582 | LoadOpc = SystemZ::VL32; |
1583 | FPRC = &SystemZ::FP32BitRegClass; |
1584 | } |
1585 | } |
1586 | if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc) |
1587 | return nullptr; |
1588 | |
1589 | // If RegMemOpcode clobbers CC, first make sure CC is not live at this point. |
1590 | if (get(Opcode: RegMemOpcode).hasImplicitDefOfPhysReg(Reg: SystemZ::CC)) { |
1591 | assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold." ); |
1592 | assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB." ); |
1593 | for (MachineBasicBlock::iterator MII = std::prev(x: InsertPt);; |
1594 | --MII) { |
1595 | if (MII->definesRegister(Reg: SystemZ::CC, /*TRI=*/nullptr)) { |
1596 | if (!MII->registerDefIsDead(Reg: SystemZ::CC, /*TRI=*/nullptr)) |
1597 | return nullptr; |
1598 | break; |
1599 | } |
1600 | if (MII == MBB->begin()) { |
1601 | if (MBB->isLiveIn(Reg: SystemZ::CC)) |
1602 | return nullptr; |
1603 | break; |
1604 | } |
1605 | } |
1606 | } |
1607 | |
1608 | Register FoldAsLoadDefReg = LoadMI.getOperand(i: 0).getReg(); |
1609 | if (Ops.size() != 1 || FoldAsLoadDefReg != MI.getOperand(i: Ops[0]).getReg()) |
1610 | return nullptr; |
1611 | Register DstReg = MI.getOperand(i: 0).getReg(); |
1612 | MachineOperand LHS = MI.getOperand(i: 1); |
1613 | MachineOperand RHS = MI.getOperand(i: 2); |
1614 | MachineOperand &RegMO = RHS.getReg() == FoldAsLoadDefReg ? LHS : RHS; |
1615 | if ((RegMemOpcode == SystemZ::SDB || RegMemOpcode == SystemZ::SEB) && |
1616 | FoldAsLoadDefReg != RHS.getReg()) |
1617 | return nullptr; |
1618 | |
1619 | MachineOperand &Base = LoadMI.getOperand(i: 1); |
1620 | MachineOperand &Disp = LoadMI.getOperand(i: 2); |
1621 | MachineOperand &Indx = LoadMI.getOperand(i: 3); |
1622 | MachineInstrBuilder MIB = |
1623 | BuildMI(BB&: *MI.getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), MCID: get(Opcode: RegMemOpcode), DestReg: DstReg) |
1624 | .add(MO: RegMO) |
1625 | .add(MO: Base) |
1626 | .add(MO: Disp) |
1627 | .add(MO: Indx); |
1628 | MIB->addRegisterDead(Reg: SystemZ::CC, RegInfo: &RI); |
1629 | MRI->setRegClass(Reg: DstReg, RC: FPRC); |
1630 | MRI->setRegClass(Reg: RegMO.getReg(), RC: FPRC); |
1631 | transferMIFlag(OldMI: &MI, NewMI: MIB, Flag: MachineInstr::NoFPExcept); |
1632 | |
1633 | return MIB; |
1634 | } |
1635 | |
1636 | bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
1637 | switch (MI.getOpcode()) { |
1638 | case SystemZ::L128: |
1639 | splitMove(MI, NewOpcode: SystemZ::LG); |
1640 | return true; |
1641 | |
1642 | case SystemZ::ST128: |
1643 | splitMove(MI, NewOpcode: SystemZ::STG); |
1644 | return true; |
1645 | |
1646 | case SystemZ::LX: |
1647 | splitMove(MI, NewOpcode: SystemZ::LD); |
1648 | return true; |
1649 | |
1650 | case SystemZ::STX: |
1651 | splitMove(MI, NewOpcode: SystemZ::STD); |
1652 | return true; |
1653 | |
1654 | case SystemZ::LBMux: |
1655 | expandRXYPseudo(MI, LowOpcode: SystemZ::LB, HighOpcode: SystemZ::LBH); |
1656 | return true; |
1657 | |
1658 | case SystemZ::LHMux: |
1659 | expandRXYPseudo(MI, LowOpcode: SystemZ::LH, HighOpcode: SystemZ::LHH); |
1660 | return true; |
1661 | |
1662 | case SystemZ::LLCRMux: |
1663 | expandZExtPseudo(MI, LowOpcode: SystemZ::LLCR, Size: 8); |
1664 | return true; |
1665 | |
1666 | case SystemZ::LLHRMux: |
1667 | expandZExtPseudo(MI, LowOpcode: SystemZ::LLHR, Size: 16); |
1668 | return true; |
1669 | |
1670 | case SystemZ::LLCMux: |
1671 | expandRXYPseudo(MI, LowOpcode: SystemZ::LLC, HighOpcode: SystemZ::LLCH); |
1672 | return true; |
1673 | |
1674 | case SystemZ::LLHMux: |
1675 | expandRXYPseudo(MI, LowOpcode: SystemZ::LLH, HighOpcode: SystemZ::LLHH); |
1676 | return true; |
1677 | |
1678 | case SystemZ::LMux: |
1679 | expandRXYPseudo(MI, LowOpcode: SystemZ::L, HighOpcode: SystemZ::LFH); |
1680 | return true; |
1681 | |
1682 | case SystemZ::LOCMux: |
1683 | expandLOCPseudo(MI, LowOpcode: SystemZ::LOC, HighOpcode: SystemZ::LOCFH); |
1684 | return true; |
1685 | |
1686 | case SystemZ::LOCHIMux: |
1687 | expandLOCPseudo(MI, LowOpcode: SystemZ::LOCHI, HighOpcode: SystemZ::LOCHHI); |
1688 | return true; |
1689 | |
1690 | case SystemZ::STCMux: |
1691 | expandRXYPseudo(MI, LowOpcode: SystemZ::STC, HighOpcode: SystemZ::STCH); |
1692 | return true; |
1693 | |
1694 | case SystemZ::STHMux: |
1695 | expandRXYPseudo(MI, LowOpcode: SystemZ::STH, HighOpcode: SystemZ::STHH); |
1696 | return true; |
1697 | |
1698 | case SystemZ::STMux: |
1699 | expandRXYPseudo(MI, LowOpcode: SystemZ::ST, HighOpcode: SystemZ::STFH); |
1700 | return true; |
1701 | |
1702 | case SystemZ::STOCMux: |
1703 | expandLOCPseudo(MI, LowOpcode: SystemZ::STOC, HighOpcode: SystemZ::STOCFH); |
1704 | return true; |
1705 | |
1706 | case SystemZ::LHIMux: |
1707 | expandRIPseudo(MI, LowOpcode: SystemZ::LHI, HighOpcode: SystemZ::IIHF, ConvertHigh: true); |
1708 | return true; |
1709 | |
1710 | case SystemZ::IIFMux: |
1711 | expandRIPseudo(MI, LowOpcode: SystemZ::IILF, HighOpcode: SystemZ::IIHF, ConvertHigh: false); |
1712 | return true; |
1713 | |
1714 | case SystemZ::IILMux: |
1715 | expandRIPseudo(MI, LowOpcode: SystemZ::IILL, HighOpcode: SystemZ::IIHL, ConvertHigh: false); |
1716 | return true; |
1717 | |
1718 | case SystemZ::IIHMux: |
1719 | expandRIPseudo(MI, LowOpcode: SystemZ::IILH, HighOpcode: SystemZ::IIHH, ConvertHigh: false); |
1720 | return true; |
1721 | |
1722 | case SystemZ::NIFMux: |
1723 | expandRIPseudo(MI, LowOpcode: SystemZ::NILF, HighOpcode: SystemZ::NIHF, ConvertHigh: false); |
1724 | return true; |
1725 | |
1726 | case SystemZ::NILMux: |
1727 | expandRIPseudo(MI, LowOpcode: SystemZ::NILL, HighOpcode: SystemZ::NIHL, ConvertHigh: false); |
1728 | return true; |
1729 | |
1730 | case SystemZ::NIHMux: |
1731 | expandRIPseudo(MI, LowOpcode: SystemZ::NILH, HighOpcode: SystemZ::NIHH, ConvertHigh: false); |
1732 | return true; |
1733 | |
1734 | case SystemZ::OIFMux: |
1735 | expandRIPseudo(MI, LowOpcode: SystemZ::OILF, HighOpcode: SystemZ::OIHF, ConvertHigh: false); |
1736 | return true; |
1737 | |
1738 | case SystemZ::OILMux: |
1739 | expandRIPseudo(MI, LowOpcode: SystemZ::OILL, HighOpcode: SystemZ::OIHL, ConvertHigh: false); |
1740 | return true; |
1741 | |
1742 | case SystemZ::OIHMux: |
1743 | expandRIPseudo(MI, LowOpcode: SystemZ::OILH, HighOpcode: SystemZ::OIHH, ConvertHigh: false); |
1744 | return true; |
1745 | |
1746 | case SystemZ::XIFMux: |
1747 | expandRIPseudo(MI, LowOpcode: SystemZ::XILF, HighOpcode: SystemZ::XIHF, ConvertHigh: false); |
1748 | return true; |
1749 | |
1750 | case SystemZ::TMLMux: |
1751 | expandRIPseudo(MI, LowOpcode: SystemZ::TMLL, HighOpcode: SystemZ::TMHL, ConvertHigh: false); |
1752 | return true; |
1753 | |
1754 | case SystemZ::TMHMux: |
1755 | expandRIPseudo(MI, LowOpcode: SystemZ::TMLH, HighOpcode: SystemZ::TMHH, ConvertHigh: false); |
1756 | return true; |
1757 | |
1758 | case SystemZ::AHIMux: |
1759 | expandRIPseudo(MI, LowOpcode: SystemZ::AHI, HighOpcode: SystemZ::AIH, ConvertHigh: false); |
1760 | return true; |
1761 | |
1762 | case SystemZ::AHIMuxK: |
1763 | expandRIEPseudo(MI, LowOpcode: SystemZ::AHI, LowOpcodeK: SystemZ::AHIK, HighOpcode: SystemZ::AIH); |
1764 | return true; |
1765 | |
1766 | case SystemZ::AFIMux: |
1767 | expandRIPseudo(MI, LowOpcode: SystemZ::AFI, HighOpcode: SystemZ::AIH, ConvertHigh: false); |
1768 | return true; |
1769 | |
1770 | case SystemZ::CHIMux: |
1771 | expandRIPseudo(MI, LowOpcode: SystemZ::CHI, HighOpcode: SystemZ::CIH, ConvertHigh: false); |
1772 | return true; |
1773 | |
1774 | case SystemZ::CFIMux: |
1775 | expandRIPseudo(MI, LowOpcode: SystemZ::CFI, HighOpcode: SystemZ::CIH, ConvertHigh: false); |
1776 | return true; |
1777 | |
1778 | case SystemZ::CLFIMux: |
1779 | expandRIPseudo(MI, LowOpcode: SystemZ::CLFI, HighOpcode: SystemZ::CLIH, ConvertHigh: false); |
1780 | return true; |
1781 | |
1782 | case SystemZ::CMux: |
1783 | expandRXYPseudo(MI, LowOpcode: SystemZ::C, HighOpcode: SystemZ::CHF); |
1784 | return true; |
1785 | |
1786 | case SystemZ::CLMux: |
1787 | expandRXYPseudo(MI, LowOpcode: SystemZ::CL, HighOpcode: SystemZ::CLHF); |
1788 | return true; |
1789 | |
1790 | case SystemZ::RISBMux: { |
1791 | bool DestIsHigh = SystemZ::isHighReg(Reg: MI.getOperand(i: 0).getReg()); |
1792 | bool SrcIsHigh = SystemZ::isHighReg(Reg: MI.getOperand(i: 2).getReg()); |
1793 | if (SrcIsHigh == DestIsHigh) |
1794 | MI.setDesc(get(Opcode: DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL)); |
1795 | else { |
1796 | MI.setDesc(get(Opcode: DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH)); |
1797 | MI.getOperand(i: 5).setImm(MI.getOperand(i: 5).getImm() ^ 32); |
1798 | } |
1799 | return true; |
1800 | } |
1801 | |
1802 | case SystemZ::ADJDYNALLOC: |
1803 | splitAdjDynAlloc(MI); |
1804 | return true; |
1805 | |
1806 | case TargetOpcode::LOAD_STACK_GUARD: |
1807 | expandLoadStackGuard(MI: &MI); |
1808 | return true; |
1809 | |
1810 | default: |
1811 | return false; |
1812 | } |
1813 | } |
1814 | |
1815 | unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
1816 | if (MI.isInlineAsm()) { |
1817 | const MachineFunction *MF = MI.getParent()->getParent(); |
1818 | const char *AsmStr = MI.getOperand(i: 0).getSymbolName(); |
1819 | return getInlineAsmLength(Str: AsmStr, MAI: *MF->getTarget().getMCAsmInfo()); |
1820 | } |
1821 | else if (MI.getOpcode() == SystemZ::PATCHPOINT) |
1822 | return PatchPointOpers(&MI).getNumPatchBytes(); |
1823 | else if (MI.getOpcode() == SystemZ::STACKMAP) |
1824 | return MI.getOperand(i: 1).getImm(); |
1825 | else if (MI.getOpcode() == SystemZ::FENTRY_CALL) |
1826 | return 6; |
1827 | if (MI.getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER) |
1828 | return 18; |
1829 | if (MI.getOpcode() == TargetOpcode::PATCHABLE_RET) |
1830 | return 18 + (MI.getOperand(i: 0).getImm() == SystemZ::CondReturn ? 4 : 0); |
1831 | |
1832 | return MI.getDesc().getSize(); |
1833 | } |
1834 | |
1835 | SystemZII::Branch |
1836 | SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const { |
1837 | switch (MI.getOpcode()) { |
1838 | case SystemZ::BR: |
1839 | case SystemZ::BI: |
1840 | case SystemZ::J: |
1841 | case SystemZ::JG: |
1842 | return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, |
1843 | SystemZ::CCMASK_ANY, &MI.getOperand(i: 0)); |
1844 | |
1845 | case SystemZ::BRC: |
1846 | case SystemZ::BRCL: |
1847 | return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(i: 0).getImm(), |
1848 | MI.getOperand(i: 1).getImm(), &MI.getOperand(i: 2)); |
1849 | |
1850 | case SystemZ::BRCT: |
1851 | case SystemZ::BRCTH: |
1852 | return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP, |
1853 | SystemZ::CCMASK_CMP_NE, &MI.getOperand(i: 2)); |
1854 | |
1855 | case SystemZ::BRCTG: |
1856 | return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP, |
1857 | SystemZ::CCMASK_CMP_NE, &MI.getOperand(i: 2)); |
1858 | |
1859 | case SystemZ::CIJ: |
1860 | case SystemZ::CRJ: |
1861 | return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP, |
1862 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1863 | |
1864 | case SystemZ::CLIJ: |
1865 | case SystemZ::CLRJ: |
1866 | return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP, |
1867 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1868 | |
1869 | case SystemZ::CGIJ: |
1870 | case SystemZ::CGRJ: |
1871 | return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP, |
1872 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1873 | |
1874 | case SystemZ::CLGIJ: |
1875 | case SystemZ::CLGRJ: |
1876 | return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP, |
1877 | MI.getOperand(i: 2).getImm(), &MI.getOperand(i: 3)); |
1878 | |
1879 | case SystemZ::INLINEASM_BR: |
1880 | // Don't try to analyze asm goto, so pass nullptr as branch target argument. |
1881 | return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr); |
1882 | |
1883 | default: |
1884 | llvm_unreachable("Unrecognized branch opcode" ); |
1885 | } |
1886 | } |
1887 | |
1888 | void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, |
1889 | unsigned &LoadOpcode, |
1890 | unsigned &StoreOpcode) const { |
1891 | if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { |
1892 | LoadOpcode = SystemZ::L; |
1893 | StoreOpcode = SystemZ::ST; |
1894 | } else if (RC == &SystemZ::GRH32BitRegClass) { |
1895 | LoadOpcode = SystemZ::LFH; |
1896 | StoreOpcode = SystemZ::STFH; |
1897 | } else if (RC == &SystemZ::GRX32BitRegClass) { |
1898 | LoadOpcode = SystemZ::LMux; |
1899 | StoreOpcode = SystemZ::STMux; |
1900 | } else if (RC == &SystemZ::GR64BitRegClass || |
1901 | RC == &SystemZ::ADDR64BitRegClass) { |
1902 | LoadOpcode = SystemZ::LG; |
1903 | StoreOpcode = SystemZ::STG; |
1904 | } else if (RC == &SystemZ::GR128BitRegClass || |
1905 | RC == &SystemZ::ADDR128BitRegClass) { |
1906 | LoadOpcode = SystemZ::L128; |
1907 | StoreOpcode = SystemZ::ST128; |
1908 | } else if (RC == &SystemZ::FP16BitRegClass && !STI.hasVector()) { |
1909 | LoadOpcode = SystemZ::LE16; |
1910 | StoreOpcode = SystemZ::STE16; |
1911 | } else if (RC == &SystemZ::FP32BitRegClass) { |
1912 | LoadOpcode = SystemZ::LE; |
1913 | StoreOpcode = SystemZ::STE; |
1914 | } else if (RC == &SystemZ::FP64BitRegClass) { |
1915 | LoadOpcode = SystemZ::LD; |
1916 | StoreOpcode = SystemZ::STD; |
1917 | } else if (RC == &SystemZ::FP128BitRegClass) { |
1918 | LoadOpcode = SystemZ::LX; |
1919 | StoreOpcode = SystemZ::STX; |
1920 | } else if (RC == &SystemZ::FP16BitRegClass || |
1921 | RC == &SystemZ::VR16BitRegClass) { |
1922 | LoadOpcode = SystemZ::VL16; |
1923 | StoreOpcode = SystemZ::VST16; |
1924 | } else if (RC == &SystemZ::VR32BitRegClass) { |
1925 | LoadOpcode = SystemZ::VL32; |
1926 | StoreOpcode = SystemZ::VST32; |
1927 | } else if (RC == &SystemZ::VR64BitRegClass) { |
1928 | LoadOpcode = SystemZ::VL64; |
1929 | StoreOpcode = SystemZ::VST64; |
1930 | } else if (RC == &SystemZ::VF128BitRegClass || |
1931 | RC == &SystemZ::VR128BitRegClass) { |
1932 | LoadOpcode = SystemZ::VL; |
1933 | StoreOpcode = SystemZ::VST; |
1934 | } else |
1935 | llvm_unreachable("Unsupported regclass to load or store" ); |
1936 | } |
1937 | |
1938 | unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, |
1939 | int64_t Offset, |
1940 | const MachineInstr *MI) const { |
1941 | const MCInstrDesc &MCID = get(Opcode); |
1942 | int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); |
1943 | if (isUInt<12>(x: Offset) && isUInt<12>(x: Offset2)) { |
1944 | // Get the instruction to use for unsigned 12-bit displacements. |
1945 | int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); |
1946 | if (Disp12Opcode >= 0) |
1947 | return Disp12Opcode; |
1948 | |
1949 | // All address-related instructions can use unsigned 12-bit |
1950 | // displacements. |
1951 | return Opcode; |
1952 | } |
1953 | if (isInt<20>(x: Offset) && isInt<20>(x: Offset2)) { |
1954 | // Get the instruction to use for signed 20-bit displacements. |
1955 | int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); |
1956 | if (Disp20Opcode >= 0) |
1957 | return Disp20Opcode; |
1958 | |
1959 | // Check whether Opcode allows signed 20-bit displacements. |
1960 | if (MCID.TSFlags & SystemZII::Has20BitOffset) |
1961 | return Opcode; |
1962 | |
1963 | // If a VR32/VR64 reg ended up in an FP register, use the FP opcode. |
1964 | if (MI && MI->getOperand(i: 0).isReg()) { |
1965 | Register Reg = MI->getOperand(i: 0).getReg(); |
1966 | if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) { |
1967 | switch (Opcode) { |
1968 | case SystemZ::VL32: |
1969 | return SystemZ::LEY; |
1970 | case SystemZ::VST32: |
1971 | return SystemZ::STEY; |
1972 | case SystemZ::VL64: |
1973 | return SystemZ::LDY; |
1974 | case SystemZ::VST64: |
1975 | return SystemZ::STDY; |
1976 | default: break; |
1977 | } |
1978 | } |
1979 | } |
1980 | } |
1981 | return 0; |
1982 | } |
1983 | |
1984 | bool SystemZInstrInfo::hasDisplacementPairInsn(unsigned Opcode) const { |
1985 | const MCInstrDesc &MCID = get(Opcode); |
1986 | if (MCID.TSFlags & SystemZII::Has20BitOffset) |
1987 | return SystemZ::getDisp12Opcode(Opcode) >= 0; |
1988 | return SystemZ::getDisp20Opcode(Opcode) >= 0; |
1989 | } |
1990 | |
1991 | unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const { |
1992 | switch (Opcode) { |
1993 | case SystemZ::L: return SystemZ::LT; |
1994 | case SystemZ::LY: return SystemZ::LT; |
1995 | case SystemZ::LG: return SystemZ::LTG; |
1996 | case SystemZ::LGF: return SystemZ::LTGF; |
1997 | case SystemZ::LR: return SystemZ::LTR; |
1998 | case SystemZ::LGFR: return SystemZ::LTGFR; |
1999 | case SystemZ::LGR: return SystemZ::LTGR; |
2000 | case SystemZ::LCDFR: return SystemZ::LCDBR; |
2001 | case SystemZ::LPDFR: return SystemZ::LPDBR; |
2002 | case SystemZ::LNDFR: return SystemZ::LNDBR; |
2003 | case SystemZ::LCDFR_32: return SystemZ::LCEBR; |
2004 | case SystemZ::LPDFR_32: return SystemZ::LPEBR; |
2005 | case SystemZ::LNDFR_32: return SystemZ::LNEBR; |
2006 | // On zEC12 we prefer to use RISBGN. But if there is a chance to |
2007 | // actually use the condition code, we may turn it back into RISGB. |
2008 | // Note that RISBG is not really a "load-and-test" instruction, |
2009 | // but sets the same condition code values, so is OK to use here. |
2010 | case SystemZ::RISBGN: return SystemZ::RISBG; |
2011 | default: return 0; |
2012 | } |
2013 | } |
2014 | |
2015 | bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize, |
2016 | unsigned &Start, unsigned &End) const { |
2017 | // Reject trivial all-zero masks. |
2018 | Mask &= allOnes(Count: BitSize); |
2019 | if (Mask == 0) |
2020 | return false; |
2021 | |
2022 | // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of |
2023 | // the msb and End specifies the index of the lsb. |
2024 | unsigned LSB, Length; |
2025 | if (isShiftedMask_64(Value: Mask, MaskIdx&: LSB, MaskLen&: Length)) { |
2026 | Start = 63 - (LSB + Length - 1); |
2027 | End = 63 - LSB; |
2028 | return true; |
2029 | } |
2030 | |
2031 | // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb |
2032 | // of the low 1s and End specifies the lsb of the high 1s. |
2033 | if (isShiftedMask_64(Value: Mask ^ allOnes(Count: BitSize), MaskIdx&: LSB, MaskLen&: Length)) { |
2034 | assert(LSB > 0 && "Bottom bit must be set" ); |
2035 | assert(LSB + Length < BitSize && "Top bit must be set" ); |
2036 | Start = 63 - (LSB - 1); |
2037 | End = 63 - (LSB + Length); |
2038 | return true; |
2039 | } |
2040 | |
2041 | return false; |
2042 | } |
2043 | |
2044 | unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode, |
2045 | SystemZII::FusedCompareType Type, |
2046 | const MachineInstr *MI) const { |
2047 | switch (Opcode) { |
2048 | case SystemZ::CHI: |
2049 | case SystemZ::CGHI: |
2050 | if (!(MI && isInt<8>(x: MI->getOperand(i: 1).getImm()))) |
2051 | return 0; |
2052 | break; |
2053 | case SystemZ::CLFI: |
2054 | case SystemZ::CLGFI: |
2055 | if (!(MI && isUInt<8>(x: MI->getOperand(i: 1).getImm()))) |
2056 | return 0; |
2057 | break; |
2058 | case SystemZ::CL: |
2059 | case SystemZ::CLG: |
2060 | if (!STI.hasMiscellaneousExtensions()) |
2061 | return 0; |
2062 | if (!(MI && MI->getOperand(i: 3).getReg() == 0)) |
2063 | return 0; |
2064 | break; |
2065 | } |
2066 | switch (Type) { |
2067 | case SystemZII::CompareAndBranch: |
2068 | switch (Opcode) { |
2069 | case SystemZ::CR: |
2070 | return SystemZ::CRJ; |
2071 | case SystemZ::CGR: |
2072 | return SystemZ::CGRJ; |
2073 | case SystemZ::CHI: |
2074 | return SystemZ::CIJ; |
2075 | case SystemZ::CGHI: |
2076 | return SystemZ::CGIJ; |
2077 | case SystemZ::CLR: |
2078 | return SystemZ::CLRJ; |
2079 | case SystemZ::CLGR: |
2080 | return SystemZ::CLGRJ; |
2081 | case SystemZ::CLFI: |
2082 | return SystemZ::CLIJ; |
2083 | case SystemZ::CLGFI: |
2084 | return SystemZ::CLGIJ; |
2085 | default: |
2086 | return 0; |
2087 | } |
2088 | case SystemZII::CompareAndReturn: |
2089 | switch (Opcode) { |
2090 | case SystemZ::CR: |
2091 | return SystemZ::CRBReturn; |
2092 | case SystemZ::CGR: |
2093 | return SystemZ::CGRBReturn; |
2094 | case SystemZ::CHI: |
2095 | return SystemZ::CIBReturn; |
2096 | case SystemZ::CGHI: |
2097 | return SystemZ::CGIBReturn; |
2098 | case SystemZ::CLR: |
2099 | return SystemZ::CLRBReturn; |
2100 | case SystemZ::CLGR: |
2101 | return SystemZ::CLGRBReturn; |
2102 | case SystemZ::CLFI: |
2103 | return SystemZ::CLIBReturn; |
2104 | case SystemZ::CLGFI: |
2105 | return SystemZ::CLGIBReturn; |
2106 | default: |
2107 | return 0; |
2108 | } |
2109 | case SystemZII::CompareAndSibcall: |
2110 | switch (Opcode) { |
2111 | case SystemZ::CR: |
2112 | return SystemZ::CRBCall; |
2113 | case SystemZ::CGR: |
2114 | return SystemZ::CGRBCall; |
2115 | case SystemZ::CHI: |
2116 | return SystemZ::CIBCall; |
2117 | case SystemZ::CGHI: |
2118 | return SystemZ::CGIBCall; |
2119 | case SystemZ::CLR: |
2120 | return SystemZ::CLRBCall; |
2121 | case SystemZ::CLGR: |
2122 | return SystemZ::CLGRBCall; |
2123 | case SystemZ::CLFI: |
2124 | return SystemZ::CLIBCall; |
2125 | case SystemZ::CLGFI: |
2126 | return SystemZ::CLGIBCall; |
2127 | default: |
2128 | return 0; |
2129 | } |
2130 | case SystemZII::CompareAndTrap: |
2131 | switch (Opcode) { |
2132 | case SystemZ::CR: |
2133 | return SystemZ::CRT; |
2134 | case SystemZ::CGR: |
2135 | return SystemZ::CGRT; |
2136 | case SystemZ::CHI: |
2137 | return SystemZ::CIT; |
2138 | case SystemZ::CGHI: |
2139 | return SystemZ::CGIT; |
2140 | case SystemZ::CLR: |
2141 | return SystemZ::CLRT; |
2142 | case SystemZ::CLGR: |
2143 | return SystemZ::CLGRT; |
2144 | case SystemZ::CLFI: |
2145 | return SystemZ::CLFIT; |
2146 | case SystemZ::CLGFI: |
2147 | return SystemZ::CLGIT; |
2148 | case SystemZ::CL: |
2149 | return SystemZ::CLT; |
2150 | case SystemZ::CLG: |
2151 | return SystemZ::CLGT; |
2152 | default: |
2153 | return 0; |
2154 | } |
2155 | } |
2156 | return 0; |
2157 | } |
2158 | |
2159 | bool SystemZInstrInfo:: |
2160 | prepareCompareSwapOperands(MachineBasicBlock::iterator const MBBI) const { |
2161 | assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() && |
2162 | MBBI->getOperand(1).isReg() && !MBBI->mayLoad() && |
2163 | "Not a compare reg/reg." ); |
2164 | |
2165 | MachineBasicBlock *MBB = MBBI->getParent(); |
2166 | bool CCLive = true; |
2167 | SmallVector<MachineInstr *, 4> CCUsers; |
2168 | for (MachineInstr &MI : llvm::make_range(x: std::next(x: MBBI), y: MBB->end())) { |
2169 | if (MI.readsRegister(Reg: SystemZ::CC, /*TRI=*/nullptr)) { |
2170 | unsigned Flags = MI.getDesc().TSFlags; |
2171 | if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast)) |
2172 | CCUsers.push_back(Elt: &MI); |
2173 | else |
2174 | return false; |
2175 | } |
2176 | if (MI.definesRegister(Reg: SystemZ::CC, /*TRI=*/nullptr)) { |
2177 | CCLive = false; |
2178 | break; |
2179 | } |
2180 | } |
2181 | if (CCLive) { |
2182 | LiveRegUnits LiveRegs(*MBB->getParent()->getSubtarget().getRegisterInfo()); |
2183 | LiveRegs.addLiveOuts(MBB: *MBB); |
2184 | if (!LiveRegs.available(Reg: SystemZ::CC)) |
2185 | return false; |
2186 | } |
2187 | |
2188 | // Update all CC users. |
2189 | for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { |
2190 | unsigned Flags = CCUsers[Idx]->getDesc().TSFlags; |
2191 | unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ? |
2192 | 0 : CCUsers[Idx]->getNumExplicitOperands() - 2); |
2193 | MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(i: FirstOpNum + 1); |
2194 | unsigned NewCCMask = SystemZ::reverseCCMask(CCMask: CCMaskMO.getImm()); |
2195 | CCMaskMO.setImm(NewCCMask); |
2196 | } |
2197 | |
2198 | return true; |
2199 | } |
2200 | |
2201 | unsigned SystemZ::reverseCCMask(unsigned CCMask) { |
2202 | return ((CCMask & SystemZ::CCMASK_CMP_EQ) | |
2203 | ((CCMask & SystemZ::CCMASK_CMP_GT) ? SystemZ::CCMASK_CMP_LT : 0) | |
2204 | ((CCMask & SystemZ::CCMASK_CMP_LT) ? SystemZ::CCMASK_CMP_GT : 0) | |
2205 | (CCMask & SystemZ::CCMASK_CMP_UO)); |
2206 | } |
2207 | |
2208 | MachineBasicBlock *SystemZ::emitBlockAfter(MachineBasicBlock *MBB) { |
2209 | MachineFunction &MF = *MBB->getParent(); |
2210 | MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(BB: MBB->getBasicBlock()); |
2211 | MF.insert(MBBI: std::next(x: MachineFunction::iterator(MBB)), MBB: NewMBB); |
2212 | return NewMBB; |
2213 | } |
2214 | |
2215 | MachineBasicBlock *SystemZ::splitBlockAfter(MachineBasicBlock::iterator MI, |
2216 | MachineBasicBlock *MBB) { |
2217 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
2218 | NewMBB->splice(Where: NewMBB->begin(), Other: MBB, |
2219 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
2220 | NewMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
2221 | return NewMBB; |
2222 | } |
2223 | |
2224 | MachineBasicBlock *SystemZ::splitBlockBefore(MachineBasicBlock::iterator MI, |
2225 | MachineBasicBlock *MBB) { |
2226 | MachineBasicBlock *NewMBB = emitBlockAfter(MBB); |
2227 | NewMBB->splice(Where: NewMBB->begin(), Other: MBB, From: MI, To: MBB->end()); |
2228 | NewMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
2229 | return NewMBB; |
2230 | } |
2231 | |
2232 | unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const { |
2233 | if (!STI.hasLoadAndTrap()) |
2234 | return 0; |
2235 | switch (Opcode) { |
2236 | case SystemZ::L: |
2237 | case SystemZ::LY: |
2238 | return SystemZ::LAT; |
2239 | case SystemZ::LG: |
2240 | return SystemZ::LGAT; |
2241 | case SystemZ::LFH: |
2242 | return SystemZ::LFHAT; |
2243 | case SystemZ::LLGF: |
2244 | return SystemZ::LLGFAT; |
2245 | case SystemZ::LLGT: |
2246 | return SystemZ::LLGTAT; |
2247 | } |
2248 | return 0; |
2249 | } |
2250 | |
2251 | void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, |
2252 | MachineBasicBlock::iterator MBBI, |
2253 | unsigned Reg, uint64_t Value) const { |
2254 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
2255 | unsigned Opcode = 0; |
2256 | if (isInt<16>(x: Value)) |
2257 | Opcode = SystemZ::LGHI; |
2258 | else if (SystemZ::isImmLL(Val: Value)) |
2259 | Opcode = SystemZ::LLILL; |
2260 | else if (SystemZ::isImmLH(Val: Value)) { |
2261 | Opcode = SystemZ::LLILH; |
2262 | Value >>= 16; |
2263 | } |
2264 | else if (isInt<32>(x: Value)) |
2265 | Opcode = SystemZ::LGFI; |
2266 | if (Opcode) { |
2267 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode), DestReg: Reg).addImm(Val: Value); |
2268 | return; |
2269 | } |
2270 | |
2271 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
2272 | assert (MRI.isSSA() && "Huge values only handled before reg-alloc ." ); |
2273 | Register Reg0 = MRI.createVirtualRegister(RegClass: &SystemZ::GR64BitRegClass); |
2274 | Register Reg1 = MRI.createVirtualRegister(RegClass: &SystemZ::GR64BitRegClass); |
2275 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::IMPLICIT_DEF), DestReg: Reg0); |
2276 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::IIHF64), DestReg: Reg1) |
2277 | .addReg(RegNo: Reg0).addImm(Val: Value >> 32); |
2278 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: SystemZ::IILF64), DestReg: Reg) |
2279 | .addReg(RegNo: Reg1).addImm(Val: Value & ((uint64_t(1) << 32) - 1)); |
2280 | } |
2281 | |
2282 | bool SystemZInstrInfo::verifyInstruction(const MachineInstr &MI, |
2283 | StringRef &ErrInfo) const { |
2284 | const MCInstrDesc &MCID = MI.getDesc(); |
2285 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { |
2286 | if (I >= MCID.getNumOperands()) |
2287 | break; |
2288 | const MachineOperand &Op = MI.getOperand(i: I); |
2289 | const MCOperandInfo &MCOI = MCID.operands()[I]; |
2290 | // Addressing modes have register and immediate operands. Op should be a |
2291 | // register (or frame index) operand if MCOI.RegClass contains a valid |
2292 | // register class, or an immediate otherwise. |
2293 | if (MCOI.OperandType == MCOI::OPERAND_MEMORY && |
2294 | ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) || |
2295 | (MCOI.RegClass == -1 && !Op.isImm()))) { |
2296 | ErrInfo = "Addressing mode operands corrupt!" ; |
2297 | return false; |
2298 | } |
2299 | } |
2300 | |
2301 | return true; |
2302 | } |
2303 | |
2304 | bool SystemZInstrInfo:: |
2305 | areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, |
2306 | const MachineInstr &MIb) const { |
2307 | |
2308 | if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) |
2309 | return false; |
2310 | |
2311 | // If mem-operands show that the same address Value is used by both |
2312 | // instructions, check for non-overlapping offsets and widths. Not |
2313 | // sure if a register based analysis would be an improvement... |
2314 | |
2315 | MachineMemOperand *MMOa = *MIa.memoperands_begin(); |
2316 | MachineMemOperand *MMOb = *MIb.memoperands_begin(); |
2317 | const Value *VALa = MMOa->getValue(); |
2318 | const Value *VALb = MMOb->getValue(); |
2319 | bool SameVal = (VALa && VALb && (VALa == VALb)); |
2320 | if (!SameVal) { |
2321 | const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); |
2322 | const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); |
2323 | if (PSVa && PSVb && (PSVa == PSVb)) |
2324 | SameVal = true; |
2325 | } |
2326 | if (SameVal) { |
2327 | int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset(); |
2328 | LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize(); |
2329 | int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; |
2330 | int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; |
2331 | LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; |
2332 | if (LowWidth.hasValue() && |
2333 | LowOffset + (int)LowWidth.getValue() <= HighOffset) |
2334 | return true; |
2335 | } |
2336 | |
2337 | return false; |
2338 | } |
2339 | |
2340 | bool SystemZInstrInfo::getConstValDefinedInReg(const MachineInstr &MI, |
2341 | const Register Reg, |
2342 | int64_t &ImmVal) const { |
2343 | |
2344 | if (MI.getOpcode() == SystemZ::VGBM && Reg == MI.getOperand(i: 0).getReg()) { |
2345 | ImmVal = MI.getOperand(i: 1).getImm(); |
2346 | // TODO: Handle non-0 values |
2347 | return ImmVal == 0; |
2348 | } |
2349 | |
2350 | return false; |
2351 | } |
2352 | |
2353 | std::optional<DestSourcePair> |
2354 | SystemZInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { |
2355 | // if MI is a simple single-register copy operation, return operand pair |
2356 | if (MI.isMoveReg()) |
2357 | return DestSourcePair(MI.getOperand(i: 0), MI.getOperand(i: 1)); |
2358 | |
2359 | return std::nullopt; |
2360 | } |
2361 | |