1 | //===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the Mips32/64 implementation of TargetFrameLowering class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MipsSEFrameLowering.h" |
14 | #include "MCTargetDesc/MipsABIInfo.h" |
15 | #include "MipsMachineFunction.h" |
16 | #include "MipsRegisterInfo.h" |
17 | #include "MipsSEInstrInfo.h" |
18 | #include "MipsSubtarget.h" |
19 | #include "llvm/ADT/BitVector.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/ADT/StringSwitch.h" |
22 | #include "llvm/CodeGen/MachineBasicBlock.h" |
23 | #include "llvm/CodeGen/MachineFrameInfo.h" |
24 | #include "llvm/CodeGen/MachineFunction.h" |
25 | #include "llvm/CodeGen/MachineInstr.h" |
26 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
27 | #include "llvm/CodeGen/MachineModuleInfo.h" |
28 | #include "llvm/CodeGen/MachineOperand.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/RegisterScavenging.h" |
31 | #include "llvm/CodeGen/TargetInstrInfo.h" |
32 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
33 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
34 | #include "llvm/IR/DebugLoc.h" |
35 | #include "llvm/IR/Function.h" |
36 | #include "llvm/MC/MCDwarf.h" |
37 | #include "llvm/MC/MCRegisterInfo.h" |
38 | #include "llvm/MC/MachineLocation.h" |
39 | #include "llvm/Support/CodeGen.h" |
40 | #include "llvm/Support/ErrorHandling.h" |
41 | #include "llvm/Support/MathExtras.h" |
42 | #include <cassert> |
43 | #include <cstdint> |
44 | #include <utility> |
45 | #include <vector> |
46 | |
47 | using namespace llvm; |
48 | |
49 | static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { |
50 | if (Mips::ACC64RegClass.contains(Reg: Src)) |
51 | return std::make_pair(x: (unsigned)Mips::PseudoMFHI, |
52 | y: (unsigned)Mips::PseudoMFLO); |
53 | |
54 | if (Mips::ACC64DSPRegClass.contains(Reg: Src)) |
55 | return std::make_pair(x: (unsigned)Mips::MFHI_DSP, y: (unsigned)Mips::MFLO_DSP); |
56 | |
57 | if (Mips::ACC128RegClass.contains(Reg: Src)) |
58 | return std::make_pair(x: (unsigned)Mips::PseudoMFHI64, |
59 | y: (unsigned)Mips::PseudoMFLO64); |
60 | |
61 | return std::make_pair(x: 0, y: 0); |
62 | } |
63 | |
64 | namespace { |
65 | |
66 | /// Helper class to expand pseudos. |
67 | class ExpandPseudo { |
68 | public: |
69 | ExpandPseudo(MachineFunction &MF); |
70 | bool expand(); |
71 | |
72 | private: |
73 | using Iter = MachineBasicBlock::iterator; |
74 | |
75 | bool expandInstr(MachineBasicBlock &MBB, Iter I); |
76 | void expandLoadCCond(MachineBasicBlock &MBB, Iter I); |
77 | void expandStoreCCond(MachineBasicBlock &MBB, Iter I); |
78 | void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); |
79 | void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, |
80 | unsigned MFLoOpc, unsigned RegSize); |
81 | bool expandCopy(MachineBasicBlock &MBB, Iter I); |
82 | bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, |
83 | unsigned MFLoOpc); |
84 | bool expandBuildPairF64(MachineBasicBlock &MBB, |
85 | MachineBasicBlock::iterator I, bool FP64) const; |
86 | bool expandExtractElementF64(MachineBasicBlock &MBB, |
87 | MachineBasicBlock::iterator I, bool FP64) const; |
88 | |
89 | MachineFunction &MF; |
90 | MachineRegisterInfo &MRI; |
91 | const MipsSubtarget &Subtarget; |
92 | const MipsSEInstrInfo &TII; |
93 | const MipsRegisterInfo &RegInfo; |
94 | }; |
95 | |
96 | } // end anonymous namespace |
97 | |
98 | ExpandPseudo::ExpandPseudo(MachineFunction &MF_) |
99 | : MF(MF_), MRI(MF.getRegInfo()), |
100 | Subtarget(MF.getSubtarget<MipsSubtarget>()), |
101 | TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), |
102 | RegInfo(*Subtarget.getRegisterInfo()) {} |
103 | |
104 | bool ExpandPseudo::expand() { |
105 | bool Expanded = false; |
106 | |
107 | for (auto &MBB : MF) { |
108 | for (Iter I = MBB.begin(), End = MBB.end(); I != End;) |
109 | Expanded |= expandInstr(MBB, I: I++); |
110 | } |
111 | |
112 | return Expanded; |
113 | } |
114 | |
115 | bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { |
116 | switch(I->getOpcode()) { |
117 | case Mips::LOAD_CCOND_DSP: |
118 | expandLoadCCond(MBB, I); |
119 | break; |
120 | case Mips::STORE_CCOND_DSP: |
121 | expandStoreCCond(MBB, I); |
122 | break; |
123 | case Mips::LOAD_ACC64: |
124 | case Mips::LOAD_ACC64DSP: |
125 | expandLoadACC(MBB, I, RegSize: 4); |
126 | break; |
127 | case Mips::LOAD_ACC128: |
128 | expandLoadACC(MBB, I, RegSize: 8); |
129 | break; |
130 | case Mips::STORE_ACC64: |
131 | expandStoreACC(MBB, I, MFHiOpc: Mips::PseudoMFHI, MFLoOpc: Mips::PseudoMFLO, RegSize: 4); |
132 | break; |
133 | case Mips::STORE_ACC64DSP: |
134 | expandStoreACC(MBB, I, MFHiOpc: Mips::MFHI_DSP, MFLoOpc: Mips::MFLO_DSP, RegSize: 4); |
135 | break; |
136 | case Mips::STORE_ACC128: |
137 | expandStoreACC(MBB, I, MFHiOpc: Mips::PseudoMFHI64, MFLoOpc: Mips::PseudoMFLO64, RegSize: 8); |
138 | break; |
139 | case Mips::BuildPairF64: |
140 | if (expandBuildPairF64(MBB, I, FP64: false)) |
141 | MBB.erase(I); |
142 | return false; |
143 | case Mips::BuildPairF64_64: |
144 | if (expandBuildPairF64(MBB, I, FP64: true)) |
145 | MBB.erase(I); |
146 | return false; |
147 | case Mips::ExtractElementF64: |
148 | if (expandExtractElementF64(MBB, I, FP64: false)) |
149 | MBB.erase(I); |
150 | return false; |
151 | case Mips::ExtractElementF64_64: |
152 | if (expandExtractElementF64(MBB, I, FP64: true)) |
153 | MBB.erase(I); |
154 | return false; |
155 | case TargetOpcode::COPY: |
156 | if (!expandCopy(MBB, I)) |
157 | return false; |
158 | break; |
159 | default: |
160 | return false; |
161 | } |
162 | |
163 | MBB.erase(I); |
164 | return true; |
165 | } |
166 | |
167 | void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { |
168 | // load $vr, FI |
169 | // copy ccond, $vr |
170 | |
171 | assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); |
172 | |
173 | const TargetRegisterClass *RC = RegInfo.intRegClass(Size: 4); |
174 | Register VR = MRI.createVirtualRegister(RegClass: RC); |
175 | Register Dst = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex(); |
176 | |
177 | TII.loadRegFromStack(MBB, MI: I, DestReg: VR, FrameIndex: FI, RC, TRI: &RegInfo, Offset: 0); |
178 | BuildMI(BB&: MBB, I, MIMD: I->getDebugLoc(), MCID: TII.get(Opcode: TargetOpcode::COPY), DestReg: Dst) |
179 | .addReg(RegNo: VR, flags: RegState::Kill); |
180 | } |
181 | |
182 | void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { |
183 | // copy $vr, ccond |
184 | // store $vr, FI |
185 | |
186 | assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); |
187 | |
188 | const TargetRegisterClass *RC = RegInfo.intRegClass(Size: 4); |
189 | Register VR = MRI.createVirtualRegister(RegClass: RC); |
190 | Register Src = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex(); |
191 | |
192 | BuildMI(BB&: MBB, I, MIMD: I->getDebugLoc(), MCID: TII.get(Opcode: TargetOpcode::COPY), DestReg: VR) |
193 | .addReg(RegNo: Src, flags: getKillRegState(B: I->getOperand(i: 0).isKill())); |
194 | TII.storeRegToStack(MBB, MI: I, SrcReg: VR, isKill: true, FrameIndex: FI, RC, TRI: &RegInfo, Offset: 0); |
195 | } |
196 | |
197 | void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, |
198 | unsigned RegSize) { |
199 | // load $vr0, FI |
200 | // copy lo, $vr0 |
201 | // load $vr1, FI + 4 |
202 | // copy hi, $vr1 |
203 | |
204 | assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); |
205 | |
206 | const TargetRegisterClass *RC = RegInfo.intRegClass(Size: RegSize); |
207 | Register VR0 = MRI.createVirtualRegister(RegClass: RC); |
208 | Register VR1 = MRI.createVirtualRegister(RegClass: RC); |
209 | Register Dst = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex(); |
210 | Register Lo = RegInfo.getSubReg(Reg: Dst, Idx: Mips::sub_lo); |
211 | Register Hi = RegInfo.getSubReg(Reg: Dst, Idx: Mips::sub_hi); |
212 | DebugLoc DL = I->getDebugLoc(); |
213 | const MCInstrDesc &Desc = TII.get(Opcode: TargetOpcode::COPY); |
214 | |
215 | TII.loadRegFromStack(MBB, MI: I, DestReg: VR0, FrameIndex: FI, RC, TRI: &RegInfo, Offset: 0); |
216 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: Desc, DestReg: Lo).addReg(RegNo: VR0, flags: RegState::Kill); |
217 | TII.loadRegFromStack(MBB, MI: I, DestReg: VR1, FrameIndex: FI, RC, TRI: &RegInfo, Offset: RegSize); |
218 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: Desc, DestReg: Hi).addReg(RegNo: VR1, flags: RegState::Kill); |
219 | } |
220 | |
221 | void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, |
222 | unsigned MFHiOpc, unsigned MFLoOpc, |
223 | unsigned RegSize) { |
224 | // mflo $vr0, src |
225 | // store $vr0, FI |
226 | // mfhi $vr1, src |
227 | // store $vr1, FI + 4 |
228 | |
229 | assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); |
230 | |
231 | const TargetRegisterClass *RC = RegInfo.intRegClass(Size: RegSize); |
232 | Register VR0 = MRI.createVirtualRegister(RegClass: RC); |
233 | Register VR1 = MRI.createVirtualRegister(RegClass: RC); |
234 | Register Src = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex(); |
235 | unsigned SrcKill = getKillRegState(B: I->getOperand(i: 0).isKill()); |
236 | DebugLoc DL = I->getDebugLoc(); |
237 | |
238 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: MFLoOpc), DestReg: VR0).addReg(RegNo: Src); |
239 | TII.storeRegToStack(MBB, MI: I, SrcReg: VR0, isKill: true, FrameIndex: FI, RC, TRI: &RegInfo, Offset: 0); |
240 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: MFHiOpc), DestReg: VR1).addReg(RegNo: Src, flags: SrcKill); |
241 | TII.storeRegToStack(MBB, MI: I, SrcReg: VR1, isKill: true, FrameIndex: FI, RC, TRI: &RegInfo, Offset: RegSize); |
242 | } |
243 | |
244 | bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { |
245 | Register Src = I->getOperand(i: 1).getReg(); |
246 | std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); |
247 | |
248 | if (!Opcodes.first) |
249 | return false; |
250 | |
251 | return expandCopyACC(MBB, I, MFHiOpc: Opcodes.first, MFLoOpc: Opcodes.second); |
252 | } |
253 | |
254 | bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, |
255 | unsigned MFHiOpc, unsigned MFLoOpc) { |
256 | // mflo $vr0, src |
257 | // copy dst_lo, $vr0 |
258 | // mfhi $vr1, src |
259 | // copy dst_hi, $vr1 |
260 | |
261 | unsigned Dst = I->getOperand(i: 0).getReg(), Src = I->getOperand(i: 1).getReg(); |
262 | const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Reg: Dst); |
263 | unsigned VRegSize = RegInfo.getRegSizeInBits(RC: *DstRC) / 16; |
264 | const TargetRegisterClass *RC = RegInfo.intRegClass(Size: VRegSize); |
265 | Register VR0 = MRI.createVirtualRegister(RegClass: RC); |
266 | Register VR1 = MRI.createVirtualRegister(RegClass: RC); |
267 | unsigned SrcKill = getKillRegState(B: I->getOperand(i: 1).isKill()); |
268 | Register DstLo = RegInfo.getSubReg(Reg: Dst, Idx: Mips::sub_lo); |
269 | Register DstHi = RegInfo.getSubReg(Reg: Dst, Idx: Mips::sub_hi); |
270 | DebugLoc DL = I->getDebugLoc(); |
271 | |
272 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: MFLoOpc), DestReg: VR0).addReg(RegNo: Src); |
273 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: TargetOpcode::COPY), DestReg: DstLo) |
274 | .addReg(RegNo: VR0, flags: RegState::Kill); |
275 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: MFHiOpc), DestReg: VR1).addReg(RegNo: Src, flags: SrcKill); |
276 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: TargetOpcode::COPY), DestReg: DstHi) |
277 | .addReg(RegNo: VR1, flags: RegState::Kill); |
278 | return true; |
279 | } |
280 | |
281 | /// This method expands the same instruction that MipsSEInstrInfo:: |
282 | /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not |
283 | /// available and the case where the ABI is FP64A. It is implemented here |
284 | /// because frame indexes are eliminated before MipsSEInstrInfo:: |
285 | /// expandBuildPairF64 is called. |
286 | bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, |
287 | MachineBasicBlock::iterator I, |
288 | bool FP64) const { |
289 | // For fpxx and when mthc1 is not available, use: |
290 | // spill + reload via ldc1 |
291 | // |
292 | // The case where dmtc1 is available doesn't need to be handled here |
293 | // because it never creates a BuildPairF64 node. |
294 | // |
295 | // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence |
296 | // for odd-numbered double precision values (because the lower 32-bits is |
297 | // transferred with mtc1 which is redirected to the upper half of the even |
298 | // register). Unfortunately, we have to make this decision before register |
299 | // allocation so for now we use a spill/reload sequence for all |
300 | // double-precision values in regardless of being an odd/even register. |
301 | // |
302 | // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as |
303 | // implicit operand, so other passes (like ShrinkWrapping) are aware that |
304 | // stack is used. |
305 | if (I->getNumOperands() == 4 && I->getOperand(i: 3).isReg() |
306 | && I->getOperand(i: 3).getReg() == Mips::SP) { |
307 | Register DstReg = I->getOperand(i: 0).getReg(); |
308 | Register LoReg = I->getOperand(i: 1).getReg(); |
309 | Register HiReg = I->getOperand(i: 2).getReg(); |
310 | |
311 | // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are |
312 | // the cases where mthc1 is not available). 64-bit architectures and |
313 | // MIPS32r2 or later can use FGR64 though. |
314 | assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || |
315 | !Subtarget.isFP64bit()); |
316 | |
317 | const TargetRegisterClass *RC = &Mips::GPR32RegClass; |
318 | const TargetRegisterClass *RC2 = |
319 | FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; |
320 | |
321 | // We re-use the same spill slot each time so that the stack frame doesn't |
322 | // grow too much in functions with a large number of moves. |
323 | int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC: RC2); |
324 | if (!Subtarget.isLittle()) |
325 | std::swap(a&: LoReg, b&: HiReg); |
326 | TII.storeRegToStack(MBB, MI: I, SrcReg: LoReg, isKill: I->getOperand(i: 1).isKill(), FrameIndex: FI, RC, |
327 | TRI: &RegInfo, Offset: 0); |
328 | TII.storeRegToStack(MBB, MI: I, SrcReg: HiReg, isKill: I->getOperand(i: 2).isKill(), FrameIndex: FI, RC, |
329 | TRI: &RegInfo, Offset: 4); |
330 | TII.loadRegFromStack(MBB, MI: I, DestReg: DstReg, FrameIndex: FI, RC: RC2, TRI: &RegInfo, Offset: 0); |
331 | return true; |
332 | } |
333 | |
334 | return false; |
335 | } |
336 | |
337 | /// This method expands the same instruction that MipsSEInstrInfo:: |
338 | /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not |
339 | /// available and the case where the ABI is FP64A. It is implemented here |
340 | /// because frame indexes are eliminated before MipsSEInstrInfo:: |
341 | /// expandExtractElementF64 is called. |
342 | bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, |
343 | MachineBasicBlock::iterator I, |
344 | bool FP64) const { |
345 | const MachineOperand &Op1 = I->getOperand(i: 1); |
346 | const MachineOperand &Op2 = I->getOperand(i: 2); |
347 | |
348 | if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { |
349 | Register DstReg = I->getOperand(i: 0).getReg(); |
350 | BuildMI(BB&: MBB, I, MIMD: I->getDebugLoc(), MCID: TII.get(Opcode: Mips::IMPLICIT_DEF), DestReg: DstReg); |
351 | return true; |
352 | } |
353 | |
354 | // For fpxx and when mfhc1 is not available, use: |
355 | // spill + reload via ldc1 |
356 | // |
357 | // The case where dmfc1 is available doesn't need to be handled here |
358 | // because it never creates a ExtractElementF64 node. |
359 | // |
360 | // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence |
361 | // for odd-numbered double precision values (because the lower 32-bits is |
362 | // transferred with mfc1 which is redirected to the upper half of the even |
363 | // register). Unfortunately, we have to make this decision before register |
364 | // allocation so for now we use a spill/reload sequence for all |
365 | // double-precision values in regardless of being an odd/even register. |
366 | // |
367 | // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as |
368 | // implicit operand, so other passes (like ShrinkWrapping) are aware that |
369 | // stack is used. |
370 | if (I->getNumOperands() == 4 && I->getOperand(i: 3).isReg() |
371 | && I->getOperand(i: 3).getReg() == Mips::SP) { |
372 | Register DstReg = I->getOperand(i: 0).getReg(); |
373 | Register SrcReg = Op1.getReg(); |
374 | unsigned N = Op2.getImm(); |
375 | int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); |
376 | |
377 | // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are |
378 | // the cases where mfhc1 is not available). 64-bit architectures and |
379 | // MIPS32r2 or later can use FGR64 though. |
380 | assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || |
381 | !Subtarget.isFP64bit()); |
382 | |
383 | const TargetRegisterClass *RC = |
384 | FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; |
385 | const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; |
386 | |
387 | // We re-use the same spill slot each time so that the stack frame doesn't |
388 | // grow too much in functions with a large number of moves. |
389 | int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC); |
390 | TII.storeRegToStack(MBB, MI: I, SrcReg, isKill: Op1.isKill(), FrameIndex: FI, RC, TRI: &RegInfo, Offset: 0); |
391 | TII.loadRegFromStack(MBB, MI: I, DestReg: DstReg, FrameIndex: FI, RC: RC2, TRI: &RegInfo, Offset); |
392 | return true; |
393 | } |
394 | |
395 | return false; |
396 | } |
397 | |
398 | MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) |
399 | : MipsFrameLowering(STI, STI.getStackAlignment()) {} |
400 | |
401 | void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, |
402 | MachineBasicBlock &MBB) const { |
403 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
404 | MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); |
405 | |
406 | const MipsSEInstrInfo &TII = |
407 | *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); |
408 | const MipsRegisterInfo &RegInfo = |
409 | *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); |
410 | |
411 | MachineBasicBlock::iterator MBBI = MBB.begin(); |
412 | DebugLoc dl; |
413 | MipsABIInfo ABI = STI.getABI(); |
414 | unsigned SP = ABI.GetStackPtr(); |
415 | unsigned FP = ABI.GetFramePtr(); |
416 | unsigned ZERO = ABI.GetNullPtr(); |
417 | unsigned MOVE = ABI.GetGPRMoveOp(); |
418 | unsigned ADDiu = ABI.GetPtrAddiuOp(); |
419 | unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; |
420 | |
421 | const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? |
422 | &Mips::GPR64RegClass : &Mips::GPR32RegClass; |
423 | |
424 | // First, compute final stack size. |
425 | uint64_t StackSize = MFI.getStackSize(); |
426 | |
427 | // No need to allocate space on the stack. |
428 | if (StackSize == 0 && !MFI.adjustsStack()) return; |
429 | |
430 | const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo(); |
431 | |
432 | // Adjust stack. |
433 | TII.adjustStackPtr(SP, Amount: -StackSize, MBB, I: MBBI); |
434 | |
435 | // emit ".cfi_def_cfa_offset StackSize" |
436 | unsigned CFIIndex = |
437 | MF.addFrameInst(Inst: MCCFIInstruction::cfiDefCfaOffset(L: nullptr, Offset: StackSize)); |
438 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
439 | .addCFIIndex(CFIIndex); |
440 | |
441 | if (MF.getFunction().hasFnAttribute(Kind: "interrupt" )) |
442 | emitInterruptPrologueStub(MF, MBB); |
443 | |
444 | const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); |
445 | |
446 | if (!CSI.empty()) { |
447 | // Find the instruction past the last instruction that saves a callee-saved |
448 | // register to the stack. |
449 | for (unsigned i = 0; i < CSI.size(); ++i) |
450 | ++MBBI; |
451 | |
452 | // Iterate over list of callee-saved registers and emit .cfi_offset |
453 | // directives. |
454 | for (const CalleeSavedInfo &I : CSI) { |
455 | int64_t Offset = MFI.getObjectOffset(ObjectIdx: I.getFrameIdx()); |
456 | Register Reg = I.getReg(); |
457 | |
458 | // If Reg is a double precision register, emit two cfa_offsets, |
459 | // one for each of the paired single precision registers. |
460 | if (Mips::AFGR64RegClass.contains(Reg)) { |
461 | unsigned Reg0 = |
462 | MRI->getDwarfRegNum(RegNum: RegInfo.getSubReg(Reg, Idx: Mips::sub_lo), isEH: true); |
463 | unsigned Reg1 = |
464 | MRI->getDwarfRegNum(RegNum: RegInfo.getSubReg(Reg, Idx: Mips::sub_hi), isEH: true); |
465 | |
466 | if (!STI.isLittle()) |
467 | std::swap(a&: Reg0, b&: Reg1); |
468 | |
469 | unsigned CFIIndex = MF.addFrameInst( |
470 | Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg0, Offset)); |
471 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
472 | .addCFIIndex(CFIIndex); |
473 | |
474 | CFIIndex = MF.addFrameInst( |
475 | Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg1, Offset: Offset + 4)); |
476 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
477 | .addCFIIndex(CFIIndex); |
478 | } else if (Mips::FGR64RegClass.contains(Reg)) { |
479 | unsigned Reg0 = MRI->getDwarfRegNum(RegNum: Reg, isEH: true); |
480 | unsigned Reg1 = MRI->getDwarfRegNum(RegNum: Reg, isEH: true) + 1; |
481 | |
482 | if (!STI.isLittle()) |
483 | std::swap(a&: Reg0, b&: Reg1); |
484 | |
485 | unsigned CFIIndex = MF.addFrameInst( |
486 | Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg0, Offset)); |
487 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
488 | .addCFIIndex(CFIIndex); |
489 | |
490 | CFIIndex = MF.addFrameInst( |
491 | Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg1, Offset: Offset + 4)); |
492 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
493 | .addCFIIndex(CFIIndex); |
494 | } else { |
495 | // Reg is either in GPR32 or FGR32. |
496 | unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset( |
497 | L: nullptr, Register: MRI->getDwarfRegNum(RegNum: Reg, isEH: true), Offset)); |
498 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
499 | .addCFIIndex(CFIIndex); |
500 | } |
501 | } |
502 | } |
503 | |
504 | if (MipsFI->callsEhReturn()) { |
505 | // Insert instructions that spill eh data registers. |
506 | for (int I = 0; I < 4; ++I) { |
507 | if (!MBB.isLiveIn(Reg: ABI.GetEhDataReg(I))) |
508 | MBB.addLiveIn(PhysReg: ABI.GetEhDataReg(I)); |
509 | TII.storeRegToStackSlot(MBB, MBBI, SrcReg: ABI.GetEhDataReg(I), isKill: false, |
510 | FrameIndex: MipsFI->getEhDataRegFI(Reg: I), RC, TRI: &RegInfo, |
511 | VReg: Register()); |
512 | } |
513 | |
514 | // Emit .cfi_offset directives for eh data registers. |
515 | for (int I = 0; I < 4; ++I) { |
516 | int64_t Offset = MFI.getObjectOffset(ObjectIdx: MipsFI->getEhDataRegFI(Reg: I)); |
517 | unsigned Reg = MRI->getDwarfRegNum(RegNum: ABI.GetEhDataReg(I), isEH: true); |
518 | unsigned CFIIndex = MF.addFrameInst( |
519 | Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg, Offset)); |
520 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
521 | .addCFIIndex(CFIIndex); |
522 | } |
523 | } |
524 | |
525 | // if framepointer enabled, set it to point to the stack pointer. |
526 | if (hasFP(MF)) { |
527 | // Insert instruction "move $fp, $sp" at this location. |
528 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: MOVE), DestReg: FP).addReg(RegNo: SP).addReg(RegNo: ZERO) |
529 | .setMIFlag(MachineInstr::FrameSetup); |
530 | |
531 | // emit ".cfi_def_cfa_register $fp" |
532 | unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createDefCfaRegister( |
533 | L: nullptr, Register: MRI->getDwarfRegNum(RegNum: FP, isEH: true))); |
534 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
535 | .addCFIIndex(CFIIndex); |
536 | |
537 | if (RegInfo.hasStackRealignment(MF)) { |
538 | // addiu $Reg, $zero, -MaxAlignment |
539 | // andi $sp, $sp, $Reg |
540 | Register VR = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
541 | assert((Log2(MFI.getMaxAlign()) < 16) && |
542 | "Function's alignment size requirement is not supported." ); |
543 | int64_t MaxAlign = -(int64_t)MFI.getMaxAlign().value(); |
544 | |
545 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: ADDiu), DestReg: VR).addReg(RegNo: ZERO).addImm(Val: MaxAlign); |
546 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: AND), DestReg: SP).addReg(RegNo: SP).addReg(RegNo: VR); |
547 | |
548 | if (hasBP(MF)) { |
549 | // move $s7, $sp |
550 | unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; |
551 | BuildMI(BB&: MBB, I: MBBI, MIMD: dl, MCID: TII.get(Opcode: MOVE), DestReg: BP) |
552 | .addReg(RegNo: SP) |
553 | .addReg(RegNo: ZERO); |
554 | } |
555 | } |
556 | } |
557 | } |
558 | |
559 | void MipsSEFrameLowering::emitInterruptPrologueStub( |
560 | MachineFunction &MF, MachineBasicBlock &MBB) const { |
561 | MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); |
562 | MachineBasicBlock::iterator MBBI = MBB.begin(); |
563 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
564 | |
565 | // Report an error the target doesn't support Mips32r2 or later. |
566 | // The epilogue relies on the use of the "ehb" to clear execution |
567 | // hazards. Pre R2 Mips relies on an implementation defined number |
568 | // of "ssnop"s to clear the execution hazard. Support for ssnop hazard |
569 | // clearing is not provided so reject that configuration. |
570 | if (!STI.hasMips32r2()) |
571 | report_fatal_error( |
572 | reason: "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " |
573 | "MIPS16 targets." ); |
574 | |
575 | // The GP register contains the "user" value, so we cannot perform |
576 | // any gp relative loads until we restore the "kernel" or "system" gp |
577 | // value. Until support is written we shall only accept the static |
578 | // relocation model. |
579 | if ((STI.getRelocationModel() != Reloc::Static)) |
580 | report_fatal_error(reason: "\"interrupt\" attribute is only supported for the " |
581 | "static relocation model on MIPS at the present time." ); |
582 | |
583 | if (!STI.isABI_O32() || STI.hasMips64()) |
584 | report_fatal_error(reason: "\"interrupt\" attribute is only supported for the " |
585 | "O32 ABI on MIPS32R2+ at the present time." ); |
586 | |
587 | // Perform ISR handling like GCC |
588 | StringRef IntKind = |
589 | MF.getFunction().getFnAttribute(Kind: "interrupt" ).getValueAsString(); |
590 | const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; |
591 | |
592 | // EIC interrupt handling needs to read the Cause register to disable |
593 | // interrupts. |
594 | if (IntKind == "eic" ) { |
595 | // Coprocessor registers are always live per se. |
596 | MBB.addLiveIn(PhysReg: Mips::COP013); |
597 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MFC0), DestReg: Mips::K0) |
598 | .addReg(RegNo: Mips::COP013) |
599 | .addImm(Val: 0) |
600 | .setMIFlag(MachineInstr::FrameSetup); |
601 | |
602 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::EXT), DestReg: Mips::K0) |
603 | .addReg(RegNo: Mips::K0) |
604 | .addImm(Val: 10) |
605 | .addImm(Val: 6) |
606 | .setMIFlag(MachineInstr::FrameSetup); |
607 | } |
608 | |
609 | // Fetch and spill EPC |
610 | MBB.addLiveIn(PhysReg: Mips::COP014); |
611 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MFC0), DestReg: Mips::K1) |
612 | .addReg(RegNo: Mips::COP014) |
613 | .addImm(Val: 0) |
614 | .setMIFlag(MachineInstr::FrameSetup); |
615 | |
616 | STI.getInstrInfo()->storeRegToStack(MBB, MI: MBBI, SrcReg: Mips::K1, isKill: false, |
617 | FrameIndex: MipsFI->getISRRegFI(Reg: 0), RC: PtrRC, |
618 | TRI: STI.getRegisterInfo(), Offset: 0); |
619 | |
620 | // Fetch and Spill Status |
621 | MBB.addLiveIn(PhysReg: Mips::COP012); |
622 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MFC0), DestReg: Mips::K1) |
623 | .addReg(RegNo: Mips::COP012) |
624 | .addImm(Val: 0) |
625 | .setMIFlag(MachineInstr::FrameSetup); |
626 | |
627 | STI.getInstrInfo()->storeRegToStack(MBB, MI: MBBI, SrcReg: Mips::K1, isKill: false, |
628 | FrameIndex: MipsFI->getISRRegFI(Reg: 1), RC: PtrRC, |
629 | TRI: STI.getRegisterInfo(), Offset: 0); |
630 | |
631 | // Build the configuration for disabling lower priority interrupts. Non EIC |
632 | // interrupts need to be masked off with zero, EIC from the Cause register. |
633 | unsigned InsPosition = 8; |
634 | unsigned InsSize = 0; |
635 | unsigned SrcReg = Mips::ZERO; |
636 | |
637 | // If the interrupt we're tied to is the EIC, switch the source for the |
638 | // masking off interrupts to the cause register. |
639 | if (IntKind == "eic" ) { |
640 | SrcReg = Mips::K0; |
641 | InsPosition = 10; |
642 | InsSize = 6; |
643 | } else |
644 | InsSize = StringSwitch<unsigned>(IntKind) |
645 | .Case(S: "sw0" , Value: 1) |
646 | .Case(S: "sw1" , Value: 2) |
647 | .Case(S: "hw0" , Value: 3) |
648 | .Case(S: "hw1" , Value: 4) |
649 | .Case(S: "hw2" , Value: 5) |
650 | .Case(S: "hw3" , Value: 6) |
651 | .Case(S: "hw4" , Value: 7) |
652 | .Case(S: "hw5" , Value: 8) |
653 | .Default(Value: 0); |
654 | assert(InsSize != 0 && "Unknown interrupt type!" ); |
655 | |
656 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::INS), DestReg: Mips::K1) |
657 | .addReg(RegNo: SrcReg) |
658 | .addImm(Val: InsPosition) |
659 | .addImm(Val: InsSize) |
660 | .addReg(RegNo: Mips::K1) |
661 | .setMIFlag(MachineInstr::FrameSetup); |
662 | |
663 | // Mask off KSU, ERL, EXL |
664 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::INS), DestReg: Mips::K1) |
665 | .addReg(RegNo: Mips::ZERO) |
666 | .addImm(Val: 1) |
667 | .addImm(Val: 4) |
668 | .addReg(RegNo: Mips::K1) |
669 | .setMIFlag(MachineInstr::FrameSetup); |
670 | |
671 | // Disable the FPU as we are not spilling those register sets. |
672 | if (!STI.useSoftFloat()) |
673 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::INS), DestReg: Mips::K1) |
674 | .addReg(RegNo: Mips::ZERO) |
675 | .addImm(Val: 29) |
676 | .addImm(Val: 1) |
677 | .addReg(RegNo: Mips::K1) |
678 | .setMIFlag(MachineInstr::FrameSetup); |
679 | |
680 | // Set the new status |
681 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MTC0), DestReg: Mips::COP012) |
682 | .addReg(RegNo: Mips::K1) |
683 | .addImm(Val: 0) |
684 | .setMIFlag(MachineInstr::FrameSetup); |
685 | } |
686 | |
687 | void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, |
688 | MachineBasicBlock &MBB) const { |
689 | MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); |
690 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
691 | MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); |
692 | |
693 | const MipsSEInstrInfo &TII = |
694 | *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); |
695 | const MipsRegisterInfo &RegInfo = |
696 | *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); |
697 | |
698 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
699 | MipsABIInfo ABI = STI.getABI(); |
700 | unsigned SP = ABI.GetStackPtr(); |
701 | unsigned FP = ABI.GetFramePtr(); |
702 | unsigned ZERO = ABI.GetNullPtr(); |
703 | unsigned MOVE = ABI.GetGPRMoveOp(); |
704 | |
705 | // if framepointer enabled, restore the stack pointer. |
706 | if (hasFP(MF)) { |
707 | // Find the first instruction that restores a callee-saved register. |
708 | MachineBasicBlock::iterator I = MBBI; |
709 | |
710 | for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) |
711 | --I; |
712 | |
713 | // Insert instruction "move $sp, $fp" at this location. |
714 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: TII.get(Opcode: MOVE), DestReg: SP).addReg(RegNo: FP).addReg(RegNo: ZERO); |
715 | } |
716 | |
717 | if (MipsFI->callsEhReturn()) { |
718 | const TargetRegisterClass *RC = |
719 | ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; |
720 | |
721 | // Find first instruction that restores a callee-saved register. |
722 | MachineBasicBlock::iterator I = MBBI; |
723 | for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) |
724 | --I; |
725 | |
726 | // Insert instructions that restore eh data registers. |
727 | for (int J = 0; J < 4; ++J) { |
728 | TII.loadRegFromStackSlot(MBB, MBBI: I, DestReg: ABI.GetEhDataReg(I: J), |
729 | FrameIndex: MipsFI->getEhDataRegFI(Reg: J), RC, TRI: &RegInfo, |
730 | VReg: Register()); |
731 | } |
732 | } |
733 | |
734 | if (MF.getFunction().hasFnAttribute(Kind: "interrupt" )) |
735 | emitInterruptEpilogueStub(MF, MBB); |
736 | |
737 | // Get the number of bytes from FrameInfo |
738 | uint64_t StackSize = MFI.getStackSize(); |
739 | |
740 | if (!StackSize) |
741 | return; |
742 | |
743 | // Adjust stack. |
744 | TII.adjustStackPtr(SP, Amount: StackSize, MBB, I: MBBI); |
745 | } |
746 | |
747 | void MipsSEFrameLowering::emitInterruptEpilogueStub( |
748 | MachineFunction &MF, MachineBasicBlock &MBB) const { |
749 | MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); |
750 | MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); |
751 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
752 | |
753 | // Perform ISR handling like GCC |
754 | const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; |
755 | |
756 | // Disable Interrupts. |
757 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::DI), DestReg: Mips::ZERO); |
758 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::EHB)); |
759 | |
760 | // Restore EPC |
761 | STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, DestReg: Mips::K1, |
762 | FrameIndex: MipsFI->getISRRegFI(Reg: 0), RC: PtrRC, |
763 | TRI: STI.getRegisterInfo(), VReg: Register()); |
764 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MTC0), DestReg: Mips::COP014) |
765 | .addReg(RegNo: Mips::K1) |
766 | .addImm(Val: 0); |
767 | |
768 | // Restore Status |
769 | STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, DestReg: Mips::K1, |
770 | FrameIndex: MipsFI->getISRRegFI(Reg: 1), RC: PtrRC, |
771 | TRI: STI.getRegisterInfo(), VReg: Register()); |
772 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: STI.getInstrInfo()->get(Opcode: Mips::MTC0), DestReg: Mips::COP012) |
773 | .addReg(RegNo: Mips::K1) |
774 | .addImm(Val: 0); |
775 | } |
776 | |
777 | StackOffset |
778 | MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, |
779 | Register &FrameReg) const { |
780 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
781 | MipsABIInfo ABI = STI.getABI(); |
782 | |
783 | if (MFI.isFixedObjectIndex(ObjectIdx: FI)) |
784 | FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); |
785 | else |
786 | FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); |
787 | |
788 | return StackOffset::getFixed(Fixed: MFI.getObjectOffset(ObjectIdx: FI) + MFI.getStackSize() - |
789 | getOffsetOfLocalArea() + |
790 | MFI.getOffsetAdjustment()); |
791 | } |
792 | |
793 | bool MipsSEFrameLowering::spillCalleeSavedRegisters( |
794 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, |
795 | ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { |
796 | MachineFunction *MF = MBB.getParent(); |
797 | const TargetInstrInfo &TII = *STI.getInstrInfo(); |
798 | |
799 | for (const CalleeSavedInfo &I : CSI) { |
800 | // Add the callee-saved register as live-in. Do not add if the register is |
801 | // RA and return address is taken, because it has already been added in |
802 | // method MipsTargetLowering::lowerRETURNADDR. |
803 | // It's killed at the spill, unless the register is RA and return address |
804 | // is taken. |
805 | Register Reg = I.getReg(); |
806 | bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) |
807 | && MF->getFrameInfo().isReturnAddressTaken(); |
808 | if (!IsRAAndRetAddrIsTaken) |
809 | MBB.addLiveIn(PhysReg: Reg); |
810 | |
811 | // ISRs require HI/LO to be spilled into kernel registers to be then |
812 | // spilled to the stack frame. |
813 | bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || |
814 | Reg == Mips::HI0 || Reg == Mips::HI0_64); |
815 | const Function &Func = MBB.getParent()->getFunction(); |
816 | if (IsLOHI && Func.hasFnAttribute(Kind: "interrupt" )) { |
817 | DebugLoc DL = MI->getDebugLoc(); |
818 | |
819 | unsigned Op = 0; |
820 | if (!STI.getABI().ArePtrs64bit()) { |
821 | Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; |
822 | Reg = Mips::K0; |
823 | } else { |
824 | Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; |
825 | Reg = Mips::K0_64; |
826 | } |
827 | BuildMI(BB&: MBB, I: MI, MIMD: DL, MCID: TII.get(Opcode: Op), DestReg: Mips::K0) |
828 | .setMIFlag(MachineInstr::FrameSetup); |
829 | } |
830 | |
831 | // Insert the spill to the stack frame. |
832 | bool IsKill = !IsRAAndRetAddrIsTaken; |
833 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); |
834 | TII.storeRegToStackSlot(MBB, MI, SrcReg: Reg, isKill: IsKill, FrameIndex: I.getFrameIdx(), RC, TRI, |
835 | VReg: Register()); |
836 | } |
837 | |
838 | return true; |
839 | } |
840 | |
841 | bool |
842 | MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { |
843 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
844 | // Reserve call frame if the size of the maximum call frame fits into 16-bit |
845 | // immediate field and there are no variable sized objects on the stack. |
846 | // Make sure the second register scavenger spill slot can be accessed with one |
847 | // instruction. |
848 | return isInt<16>(x: MFI.getMaxCallFrameSize() + getStackAlignment()) && |
849 | !MFI.hasVarSizedObjects(); |
850 | } |
851 | |
852 | /// Mark \p Reg and all registers aliasing it in the bitset. |
853 | static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, |
854 | unsigned Reg) { |
855 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
856 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) |
857 | SavedRegs.set(*AI); |
858 | } |
859 | |
860 | void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, |
861 | BitVector &SavedRegs, |
862 | RegScavenger *RS) const { |
863 | TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); |
864 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
865 | MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); |
866 | MipsABIInfo ABI = STI.getABI(); |
867 | unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA; |
868 | unsigned FP = ABI.GetFramePtr(); |
869 | unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; |
870 | |
871 | // Mark $ra and $fp as used if function has dedicated frame pointer. |
872 | if (hasFP(MF)) { |
873 | setAliasRegs(MF, SavedRegs, Reg: RA); |
874 | setAliasRegs(MF, SavedRegs, Reg: FP); |
875 | } |
876 | // Mark $s7 as used if function has dedicated base pointer. |
877 | if (hasBP(MF)) |
878 | setAliasRegs(MF, SavedRegs, Reg: BP); |
879 | |
880 | // Create spill slots for eh data registers if function calls eh_return. |
881 | if (MipsFI->callsEhReturn()) |
882 | MipsFI->createEhDataRegsFI(MF); |
883 | |
884 | // Create spill slots for Coprocessor 0 registers if function is an ISR. |
885 | if (MipsFI->isISR()) |
886 | MipsFI->createISRRegFI(MF); |
887 | |
888 | // Expand pseudo instructions which load, store or copy accumulators. |
889 | // Add an emergency spill slot if a pseudo was expanded. |
890 | if (ExpandPseudo(MF).expand()) { |
891 | // The spill slot should be half the size of the accumulator. If target have |
892 | // general-purpose registers 64 bits wide, it should be 64-bit, otherwise |
893 | // it should be 32-bit. |
894 | const TargetRegisterClass &RC = STI.isGP64bit() ? |
895 | Mips::GPR64RegClass : Mips::GPR32RegClass; |
896 | int FI = MF.getFrameInfo().CreateStackObject(Size: TRI->getSpillSize(RC), |
897 | Alignment: TRI->getSpillAlign(RC), isSpillSlot: false); |
898 | RS->addScavengingFrameIndex(FI); |
899 | } |
900 | |
901 | // Set scavenging frame index if necessary. |
902 | uint64_t MaxSPOffset = estimateStackSize(MF); |
903 | |
904 | // MSA has a minimum offset of 10 bits signed. If there is a variable |
905 | // sized object on the stack, the estimation cannot account for it. |
906 | if (isIntN(N: STI.hasMSA() ? 10 : 16, x: MaxSPOffset) && |
907 | !MF.getFrameInfo().hasVarSizedObjects()) |
908 | return; |
909 | |
910 | const TargetRegisterClass &RC = |
911 | ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass; |
912 | int FI = MF.getFrameInfo().CreateStackObject(Size: TRI->getSpillSize(RC), |
913 | Alignment: TRI->getSpillAlign(RC), isSpillSlot: false); |
914 | RS->addScavengingFrameIndex(FI); |
915 | } |
916 | |
917 | const MipsFrameLowering * |
918 | llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { |
919 | return new MipsSEFrameLowering(ST); |
920 | } |
921 | |