1//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// The pass tries to use the 32-bit encoding for instructions when possible.
8//===----------------------------------------------------------------------===//
9//
10
11#include "SIShrinkInstructions.h"
12#include "AMDGPU.h"
13#include "GCNSubtarget.h"
14#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15#include "Utils/AMDGPUBaseInfo.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18
19#define DEBUG_TYPE "si-shrink-instructions"
20
21STATISTIC(NumInstructionsShrunk,
22 "Number of 64-bit instruction reduced to 32-bit.");
23STATISTIC(NumLiteralConstantsFolded,
24 "Number of literal constants folded into 32-bit instructions.");
25
26using namespace llvm;
27
28namespace {
29
30enum ChangeKind { None, UpdateHint, UpdateInst };
31
32class SIShrinkInstructions {
33 MachineFunction *MF;
34 MachineRegisterInfo *MRI;
35 const GCNSubtarget *ST;
36 const SIInstrInfo *TII;
37 const SIRegisterInfo *TRI;
38 bool IsPostRA;
39
40 bool foldImmediates(MachineInstr &MI, bool TryToCommute = true) const;
41 bool shouldShrinkTrue16(MachineInstr &MI) const;
42 bool isKImmOperand(const MachineOperand &Src) const;
43 bool isKUImmOperand(const MachineOperand &Src) const;
44 bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const;
45 void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const;
46 bool shrinkScalarCompare(MachineInstr &MI) const;
47 bool shrinkMIMG(MachineInstr &MI) const;
48 bool shrinkMadFma(MachineInstr &MI) const;
49 ChangeKind shrinkScalarLogicOp(MachineInstr &MI) const;
50 bool tryReplaceDeadSDST(MachineInstr &MI) const;
51 bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
52 Register Reg, unsigned SubReg) const;
53 bool instReadsReg(const MachineInstr *MI, unsigned Reg,
54 unsigned SubReg) const;
55 bool instModifiesReg(const MachineInstr *MI, unsigned Reg,
56 unsigned SubReg) const;
57 TargetInstrInfo::RegSubRegPair getSubRegForIndex(Register Reg, unsigned Sub,
58 unsigned I) const;
59 void dropInstructionKeepingImpDefs(MachineInstr &MI) const;
60 MachineInstr *matchSwap(MachineInstr &MovT) const;
61
62public:
63 SIShrinkInstructions() = default;
64 bool run(MachineFunction &MF);
65};
66
67class SIShrinkInstructionsLegacy : public MachineFunctionPass {
68
69public:
70 static char ID;
71
72 SIShrinkInstructionsLegacy() : MachineFunctionPass(ID) {}
73
74 bool runOnMachineFunction(MachineFunction &MF) override;
75
76 StringRef getPassName() const override { return "SI Shrink Instructions"; }
77
78 void getAnalysisUsage(AnalysisUsage &AU) const override {
79 AU.setPreservesCFG();
80 MachineFunctionPass::getAnalysisUsage(AU);
81 }
82};
83
84} // End anonymous namespace.
85
86INITIALIZE_PASS(SIShrinkInstructionsLegacy, DEBUG_TYPE,
87 "SI Shrink Instructions", false, false)
88
89char SIShrinkInstructionsLegacy::ID = 0;
90
91FunctionPass *llvm::createSIShrinkInstructionsLegacyPass() {
92 return new SIShrinkInstructionsLegacy();
93}
94
95/// This function checks \p MI for operands defined by a move immediate
96/// instruction and then folds the literal constant into the instruction if it
97/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
98bool SIShrinkInstructions::foldImmediates(MachineInstr &MI,
99 bool TryToCommute) const {
100 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
101
102 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src0);
103
104 // Try to fold Src0
105 MachineOperand &Src0 = MI.getOperand(i: Src0Idx);
106 if (Src0.isReg()) {
107 Register Reg = Src0.getReg();
108 if (Reg.isVirtual()) {
109 MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
110 if (Def && Def->isMoveImmediate()) {
111 MachineOperand &MovSrc = Def->getOperand(i: 1);
112 bool ConstantFolded = false;
113
114 if (TII->isOperandLegal(MI, OpIdx: Src0Idx, MO: &MovSrc)) {
115 if (MovSrc.isImm()) {
116 Src0.ChangeToImmediate(ImmVal: MovSrc.getImm());
117 ConstantFolded = true;
118 } else if (MovSrc.isFI()) {
119 Src0.ChangeToFrameIndex(Idx: MovSrc.getIndex());
120 ConstantFolded = true;
121 } else if (MovSrc.isGlobal()) {
122 Src0.ChangeToGA(GV: MovSrc.getGlobal(), Offset: MovSrc.getOffset(),
123 TargetFlags: MovSrc.getTargetFlags());
124 ConstantFolded = true;
125 }
126 }
127
128 if (ConstantFolded) {
129 if (MRI->use_nodbg_empty(RegNo: Reg))
130 Def->eraseFromParent();
131 ++NumLiteralConstantsFolded;
132 return true;
133 }
134 }
135 }
136 }
137
138 // We have failed to fold src0, so commute the instruction and try again.
139 if (TryToCommute && MI.isCommutable()) {
140 if (TII->commuteInstruction(MI)) {
141 if (foldImmediates(MI, TryToCommute: false))
142 return true;
143
144 // Commute back.
145 TII->commuteInstruction(MI);
146 }
147 }
148
149 return false;
150}
151
152/// Do not shrink the instruction if its registers are not expressible in the
153/// shrunk encoding.
154bool SIShrinkInstructions::shouldShrinkTrue16(MachineInstr &MI) const {
155 for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
156 const MachineOperand &MO = MI.getOperand(i: I);
157 if (MO.isReg()) {
158 Register Reg = MO.getReg();
159 assert(!Reg.isVirtual() && "Prior checks should ensure we only shrink "
160 "True16 Instructions post-RA");
161 if (AMDGPU::VGPR_32RegClass.contains(Reg) &&
162 !AMDGPU::VGPR_32_Lo128RegClass.contains(Reg))
163 return false;
164
165 if (AMDGPU::VGPR_16RegClass.contains(Reg) &&
166 !AMDGPU::VGPR_16_Lo128RegClass.contains(Reg))
167 return false;
168 }
169 }
170 return true;
171}
172
173bool SIShrinkInstructions::isKImmOperand(const MachineOperand &Src) const {
174 return isInt<16>(x: SignExtend64(X: Src.getImm(), B: 32)) &&
175 !TII->isInlineConstant(MI: *Src.getParent(), OpIdx: Src.getOperandNo());
176}
177
178bool SIShrinkInstructions::isKUImmOperand(const MachineOperand &Src) const {
179 return isUInt<16>(x: Src.getImm()) &&
180 !TII->isInlineConstant(MI: *Src.getParent(), OpIdx: Src.getOperandNo());
181}
182
183bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src,
184 bool &IsUnsigned) const {
185 if (isInt<16>(x: SignExtend64(X: Src.getImm(), B: 32))) {
186 IsUnsigned = false;
187 return !TII->isInlineConstant(MO: Src);
188 }
189
190 if (isUInt<16>(x: Src.getImm())) {
191 IsUnsigned = true;
192 return !TII->isInlineConstant(MO: Src);
193 }
194
195 return false;
196}
197
198/// \returns the opcode of an instruction a move immediate of the constant \p
199/// Src can be replaced with if the constant is replaced with \p ModifiedImm.
200/// i.e.
201///
202/// If the bitreverse of a constant is an inline immediate, reverse the
203/// immediate and return the bitreverse opcode.
204///
205/// If the bitwise negation of a constant is an inline immediate, reverse the
206/// immediate and return the bitwise not opcode.
207static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII,
208 const MachineOperand &Src,
209 int32_t &ModifiedImm, bool Scalar) {
210 if (TII->isInlineConstant(MO: Src))
211 return 0;
212 int32_t SrcImm = static_cast<int32_t>(Src.getImm());
213
214 if (!Scalar) {
215 // We could handle the scalar case with here, but we would need to check
216 // that SCC is not live as S_NOT_B32 clobbers it. It's probably not worth
217 // it, as the reasonable values are already covered by s_movk_i32.
218 ModifiedImm = ~SrcImm;
219 if (TII->isInlineConstant(Imm: APInt(32, ModifiedImm, true)))
220 return AMDGPU::V_NOT_B32_e32;
221 }
222
223 ModifiedImm = reverseBits<int32_t>(Val: SrcImm);
224 if (TII->isInlineConstant(Imm: APInt(32, ModifiedImm, true)))
225 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
226
227 return 0;
228}
229
230/// Copy implicit register operands from specified instruction to this
231/// instruction that are not part of the instruction definition.
232void SIShrinkInstructions::copyExtraImplicitOps(MachineInstr &NewMI,
233 MachineInstr &MI) const {
234 MachineFunction &MF = *MI.getMF();
235 for (unsigned i = MI.getDesc().getNumOperands() +
236 MI.getDesc().implicit_uses().size() +
237 MI.getDesc().implicit_defs().size(),
238 e = MI.getNumOperands();
239 i != e; ++i) {
240 const MachineOperand &MO = MI.getOperand(i);
241 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
242 NewMI.addOperand(MF, Op: MO);
243 }
244}
245
246bool SIShrinkInstructions::shrinkScalarCompare(MachineInstr &MI) const {
247 if (!ST->hasSCmpK())
248 return false;
249
250 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
251 // get constants on the RHS.
252 bool Changed = false;
253 if (!MI.getOperand(i: 0).isReg()) {
254 if (TII->commuteInstruction(MI, NewMI: false, OpIdx1: 0, OpIdx2: 1))
255 Changed = true;
256 }
257
258 // cmpk requires src0 to be a register
259 const MachineOperand &Src0 = MI.getOperand(i: 0);
260 if (!Src0.isReg())
261 return Changed;
262
263 MachineOperand &Src1 = MI.getOperand(i: 1);
264 if (!Src1.isImm())
265 return Changed;
266
267 int SOPKOpc = AMDGPU::getSOPKOp(Opcode: MI.getOpcode());
268 if (SOPKOpc == -1)
269 return Changed;
270
271 // eq/ne is special because the imm16 can be treated as signed or unsigned,
272 // and initially selected to the unsigned versions.
273 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
274 bool HasUImm;
275 if (isKImmOrKUImmOperand(Src: Src1, IsUnsigned&: HasUImm)) {
276 if (!HasUImm) {
277 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
278 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
279 Src1.setImm(SignExtend32(X: Src1.getImm(), B: 32));
280 }
281
282 MI.setDesc(TII->get(Opcode: SOPKOpc));
283 Changed = true;
284 }
285
286 return Changed;
287 }
288
289 const MCInstrDesc &NewDesc = TII->get(Opcode: SOPKOpc);
290
291 if ((SIInstrInfo::sopkIsZext(Opcode: SOPKOpc) && isKUImmOperand(Src: Src1)) ||
292 (!SIInstrInfo::sopkIsZext(Opcode: SOPKOpc) && isKImmOperand(Src: Src1))) {
293 if (!SIInstrInfo::sopkIsZext(Opcode: SOPKOpc))
294 Src1.setImm(SignExtend64(X: Src1.getImm(), B: 32));
295 MI.setDesc(NewDesc);
296 Changed = true;
297 }
298 return Changed;
299}
300
301// Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
302bool SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) const {
303 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc: MI.getOpcode());
304 if (!Info)
305 return false;
306
307 uint8_t NewEncoding;
308 switch (Info->MIMGEncoding) {
309 case AMDGPU::MIMGEncGfx10NSA:
310 NewEncoding = AMDGPU::MIMGEncGfx10Default;
311 break;
312 case AMDGPU::MIMGEncGfx11NSA:
313 NewEncoding = AMDGPU::MIMGEncGfx11Default;
314 break;
315 default:
316 return false;
317 }
318
319 int VAddr0Idx =
320 AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::vaddr0);
321 unsigned NewAddrDwords = Info->VAddrDwords;
322 const TargetRegisterClass *RC;
323
324 if (Info->VAddrDwords == 2) {
325 RC = &AMDGPU::VReg_64RegClass;
326 } else if (Info->VAddrDwords == 3) {
327 RC = &AMDGPU::VReg_96RegClass;
328 } else if (Info->VAddrDwords == 4) {
329 RC = &AMDGPU::VReg_128RegClass;
330 } else if (Info->VAddrDwords == 5) {
331 RC = &AMDGPU::VReg_160RegClass;
332 } else if (Info->VAddrDwords == 6) {
333 RC = &AMDGPU::VReg_192RegClass;
334 } else if (Info->VAddrDwords == 7) {
335 RC = &AMDGPU::VReg_224RegClass;
336 } else if (Info->VAddrDwords == 8) {
337 RC = &AMDGPU::VReg_256RegClass;
338 } else if (Info->VAddrDwords == 9) {
339 RC = &AMDGPU::VReg_288RegClass;
340 } else if (Info->VAddrDwords == 10) {
341 RC = &AMDGPU::VReg_320RegClass;
342 } else if (Info->VAddrDwords == 11) {
343 RC = &AMDGPU::VReg_352RegClass;
344 } else if (Info->VAddrDwords == 12) {
345 RC = &AMDGPU::VReg_384RegClass;
346 } else {
347 RC = &AMDGPU::VReg_512RegClass;
348 NewAddrDwords = 16;
349 }
350
351 unsigned VgprBase = 0;
352 unsigned NextVgpr = 0;
353 bool IsUndef = true;
354 bool IsKill = NewAddrDwords == Info->VAddrDwords;
355 const unsigned NSAMaxSize = ST->getNSAMaxSize();
356 const bool IsPartialNSA = NewAddrDwords > NSAMaxSize;
357 const unsigned EndVAddr = IsPartialNSA ? NSAMaxSize : Info->VAddrOperands;
358 for (unsigned Idx = 0; Idx < EndVAddr; ++Idx) {
359 const MachineOperand &Op = MI.getOperand(i: VAddr0Idx + Idx);
360 unsigned Vgpr = TRI->getHWRegIndex(Reg: Op.getReg());
361 unsigned Dwords = TRI->getRegSizeInBits(Reg: Op.getReg(), MRI: *MRI) / 32;
362 assert(Dwords > 0 && "Un-implemented for less than 32 bit regs");
363
364 if (Idx == 0) {
365 VgprBase = Vgpr;
366 NextVgpr = Vgpr + Dwords;
367 } else if (Vgpr == NextVgpr) {
368 NextVgpr = Vgpr + Dwords;
369 } else {
370 return false;
371 }
372
373 if (!Op.isUndef())
374 IsUndef = false;
375 if (!Op.isKill())
376 IsKill = false;
377 }
378
379 if (VgprBase + NewAddrDwords > 256)
380 return false;
381
382 // Further check for implicit tied operands - this may be present if TFE is
383 // enabled
384 int TFEIdx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::tfe);
385 int LWEIdx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::lwe);
386 unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(i: TFEIdx).getImm();
387 unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(i: LWEIdx).getImm();
388 int ToUntie = -1;
389 if (TFEVal || LWEVal) {
390 // TFE/LWE is enabled so we need to deal with an implicit tied operand
391 for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
392 if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
393 MI.getOperand(i).isImplicit()) {
394 // This is the tied operand
395 assert(
396 ToUntie == -1 &&
397 "found more than one tied implicit operand when expecting only 1");
398 ToUntie = i;
399 MI.untieRegOperand(OpIdx: ToUntie);
400 }
401 }
402 }
403
404 unsigned NewOpcode = AMDGPU::getMIMGOpcode(BaseOpcode: Info->BaseOpcode, MIMGEncoding: NewEncoding,
405 VDataDwords: Info->VDataDwords, VAddrDwords: NewAddrDwords);
406 MI.setDesc(TII->get(Opcode: NewOpcode));
407 MI.getOperand(i: VAddr0Idx).setReg(RC->getRegister(i: VgprBase));
408 MI.getOperand(i: VAddr0Idx).setIsUndef(IsUndef);
409 MI.getOperand(i: VAddr0Idx).setIsKill(IsKill);
410
411 for (unsigned i = 1; i < EndVAddr; ++i)
412 MI.removeOperand(OpNo: VAddr0Idx + 1);
413
414 if (ToUntie >= 0) {
415 MI.tieOperands(
416 DefIdx: AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::vdata),
417 UseIdx: ToUntie - (EndVAddr - 1));
418 }
419 return true;
420}
421
422// Shrink MAD to MADAK/MADMK and FMA to FMAAK/FMAMK.
423bool SIShrinkInstructions::shrinkMadFma(MachineInstr &MI) const {
424 // Pre-GFX10 VOP3 instructions like MAD/FMA cannot take a literal operand so
425 // there is no reason to try to shrink them.
426 if (!ST->hasVOP3Literal())
427 return false;
428
429 // There is no advantage to doing this pre-RA.
430 if (!IsPostRA)
431 return false;
432
433 if (TII->hasAnyModifiersSet(MI))
434 return false;
435
436 const unsigned Opcode = MI.getOpcode();
437 MachineOperand &Src0 = *TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::src0);
438 MachineOperand &Src1 = *TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::src1);
439 MachineOperand &Src2 = *TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::src2);
440 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
441
442 bool Swap;
443
444 // Detect "Dst = VSrc * VGPR + Imm" and convert to AK form.
445 if (Src2.isImm() && !TII->isInlineConstant(MO: Src2)) {
446 if (Src1.isReg() && TRI->isVGPR(MRI: *MRI, Reg: Src1.getReg()))
447 Swap = false;
448 else if (Src0.isReg() && TRI->isVGPR(MRI: *MRI, Reg: Src0.getReg()))
449 Swap = true;
450 else
451 return false;
452
453 switch (Opcode) {
454 default:
455 llvm_unreachable("Unexpected mad/fma opcode!");
456 case AMDGPU::V_MAD_F32_e64:
457 NewOpcode = AMDGPU::V_MADAK_F32;
458 break;
459 case AMDGPU::V_FMA_F32_e64:
460 NewOpcode = AMDGPU::V_FMAAK_F32;
461 break;
462 case AMDGPU::V_MAD_F16_e64:
463 NewOpcode = AMDGPU::V_MADAK_F16;
464 break;
465 case AMDGPU::V_FMA_F16_e64:
466 case AMDGPU::V_FMA_F16_gfx9_e64:
467 NewOpcode = AMDGPU::V_FMAAK_F16;
468 break;
469 case AMDGPU::V_FMA_F16_gfx9_t16_e64:
470 NewOpcode = AMDGPU::V_FMAAK_F16_t16;
471 break;
472 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
473 NewOpcode = AMDGPU::V_FMAAK_F16_fake16;
474 break;
475 case AMDGPU::V_FMA_F64_e64:
476 if (ST->hasFmaakFmamkF64Insts())
477 NewOpcode = AMDGPU::V_FMAAK_F64;
478 break;
479 }
480 }
481
482 // Detect "Dst = VSrc * Imm + VGPR" and convert to MK form.
483 if (Src2.isReg() && TRI->isVGPR(MRI: *MRI, Reg: Src2.getReg())) {
484 if (Src1.isImm() && !TII->isInlineConstant(MO: Src1))
485 Swap = false;
486 else if (Src0.isImm() && !TII->isInlineConstant(MO: Src0))
487 Swap = true;
488 else
489 return false;
490
491 switch (Opcode) {
492 default:
493 llvm_unreachable("Unexpected mad/fma opcode!");
494 case AMDGPU::V_MAD_F32_e64:
495 NewOpcode = AMDGPU::V_MADMK_F32;
496 break;
497 case AMDGPU::V_FMA_F32_e64:
498 NewOpcode = AMDGPU::V_FMAMK_F32;
499 break;
500 case AMDGPU::V_MAD_F16_e64:
501 NewOpcode = AMDGPU::V_MADMK_F16;
502 break;
503 case AMDGPU::V_FMA_F16_e64:
504 case AMDGPU::V_FMA_F16_gfx9_e64:
505 NewOpcode = AMDGPU::V_FMAMK_F16;
506 break;
507 case AMDGPU::V_FMA_F16_gfx9_t16_e64:
508 NewOpcode = AMDGPU::V_FMAMK_F16_t16;
509 break;
510 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
511 NewOpcode = AMDGPU::V_FMAMK_F16_fake16;
512 break;
513 case AMDGPU::V_FMA_F64_e64:
514 if (ST->hasFmaakFmamkF64Insts())
515 NewOpcode = AMDGPU::V_FMAMK_F64;
516 break;
517 }
518 }
519
520 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
521 return false;
522
523 if (AMDGPU::isTrue16Inst(Opc: NewOpcode) && !shouldShrinkTrue16(MI))
524 return false;
525
526 if (Swap) {
527 // Swap Src0 and Src1 by building a new instruction.
528 BuildMI(BB&: *MI.getParent(), I&: MI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: NewOpcode),
529 DestReg: MI.getOperand(i: 0).getReg())
530 .add(MO: Src1)
531 .add(MO: Src0)
532 .add(MO: Src2)
533 .setMIFlags(MI.getFlags());
534 MI.eraseFromParent();
535 } else {
536 TII->removeModOperands(MI);
537 MI.setDesc(TII->get(Opcode: NewOpcode));
538 }
539 return true;
540}
541
542/// Attempt to shrink AND/OR/XOR operations requiring non-inlineable literals.
543/// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
544/// If the inverse of the immediate is legal, use ANDN2, ORN2 or
545/// XNOR (as a ^ b == ~(a ^ ~b)).
546/// \return ChangeKind::None if no changes were made.
547/// ChangeKind::UpdateHint if regalloc hints were updated.
548/// ChangeKind::UpdateInst if the instruction was modified.
549ChangeKind SIShrinkInstructions::shrinkScalarLogicOp(MachineInstr &MI) const {
550 unsigned Opc = MI.getOpcode();
551 const MachineOperand *Dest = &MI.getOperand(i: 0);
552 MachineOperand *Src0 = &MI.getOperand(i: 1);
553 MachineOperand *Src1 = &MI.getOperand(i: 2);
554 MachineOperand *SrcReg = Src0;
555 MachineOperand *SrcImm = Src1;
556
557 if (!SrcImm->isImm() ||
558 AMDGPU::isInlinableLiteral32(Literal: SrcImm->getImm(), HasInv2Pi: ST->hasInv2PiInlineImm()))
559 return ChangeKind::None;
560
561 uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
562 uint32_t NewImm = 0;
563
564 if (Opc == AMDGPU::S_AND_B32) {
565 if (isPowerOf2_32(Value: ~Imm) &&
566 MI.findRegisterDefOperand(Reg: AMDGPU::SCC, /*TRI=*/nullptr)->isDead()) {
567 NewImm = llvm::countr_one(Value: Imm);
568 Opc = AMDGPU::S_BITSET0_B32;
569 } else if (AMDGPU::isInlinableLiteral32(Literal: ~Imm, HasInv2Pi: ST->hasInv2PiInlineImm())) {
570 NewImm = ~Imm;
571 Opc = AMDGPU::S_ANDN2_B32;
572 }
573 } else if (Opc == AMDGPU::S_OR_B32) {
574 if (isPowerOf2_32(Value: Imm) &&
575 MI.findRegisterDefOperand(Reg: AMDGPU::SCC, /*TRI=*/nullptr)->isDead()) {
576 NewImm = llvm::countr_zero(Val: Imm);
577 Opc = AMDGPU::S_BITSET1_B32;
578 } else if (AMDGPU::isInlinableLiteral32(Literal: ~Imm, HasInv2Pi: ST->hasInv2PiInlineImm())) {
579 NewImm = ~Imm;
580 Opc = AMDGPU::S_ORN2_B32;
581 }
582 } else if (Opc == AMDGPU::S_XOR_B32) {
583 if (AMDGPU::isInlinableLiteral32(Literal: ~Imm, HasInv2Pi: ST->hasInv2PiInlineImm())) {
584 NewImm = ~Imm;
585 Opc = AMDGPU::S_XNOR_B32;
586 }
587 } else {
588 llvm_unreachable("unexpected opcode");
589 }
590
591 if (NewImm != 0) {
592 if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
593 MRI->setRegAllocationHint(VReg: Dest->getReg(), Type: 0, PrefReg: SrcReg->getReg());
594 MRI->setRegAllocationHint(VReg: SrcReg->getReg(), Type: 0, PrefReg: Dest->getReg());
595 return ChangeKind::UpdateHint;
596 }
597
598 if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
599 const bool IsUndef = SrcReg->isUndef();
600 const bool IsKill = SrcReg->isKill();
601 TII->mutateAndCleanupImplicit(MI, NewDesc: TII->get(Opcode: Opc));
602 if (Opc == AMDGPU::S_BITSET0_B32 ||
603 Opc == AMDGPU::S_BITSET1_B32) {
604 Src0->ChangeToImmediate(ImmVal: NewImm);
605 // Remove the immediate and add the tied input.
606 MI.getOperand(i: 2).ChangeToRegister(Reg: Dest->getReg(), /*IsDef*/ isDef: false,
607 /*isImp*/ false, isKill: IsKill,
608 /*isDead*/ false, isUndef: IsUndef);
609 MI.tieOperands(DefIdx: 0, UseIdx: 2);
610 } else {
611 SrcImm->setImm(NewImm);
612 }
613 return ChangeKind::UpdateInst;
614 }
615 }
616
617 return ChangeKind::None;
618}
619
620// This is the same as MachineInstr::readsRegister/modifiesRegister except
621// it takes subregs into account.
622bool SIShrinkInstructions::instAccessReg(
623 iterator_range<MachineInstr::const_mop_iterator> &&R, Register Reg,
624 unsigned SubReg) const {
625 for (const MachineOperand &MO : R) {
626 if (!MO.isReg())
627 continue;
628
629 if (Reg.isPhysical() && MO.getReg().isPhysical()) {
630 if (TRI->regsOverlap(RegA: Reg, RegB: MO.getReg()))
631 return true;
632 } else if (MO.getReg() == Reg && Reg.isVirtual()) {
633 LaneBitmask Overlap = TRI->getSubRegIndexLaneMask(SubIdx: SubReg) &
634 TRI->getSubRegIndexLaneMask(SubIdx: MO.getSubReg());
635 if (Overlap.any())
636 return true;
637 }
638 }
639 return false;
640}
641
642bool SIShrinkInstructions::instReadsReg(const MachineInstr *MI, unsigned Reg,
643 unsigned SubReg) const {
644 return instAccessReg(R: MI->uses(), Reg, SubReg);
645}
646
647bool SIShrinkInstructions::instModifiesReg(const MachineInstr *MI, unsigned Reg,
648 unsigned SubReg) const {
649 return instAccessReg(R: MI->defs(), Reg, SubReg);
650}
651
652TargetInstrInfo::RegSubRegPair
653SIShrinkInstructions::getSubRegForIndex(Register Reg, unsigned Sub,
654 unsigned I) const {
655 if (TRI->getRegSizeInBits(Reg, MRI: *MRI) != 32) {
656 if (Reg.isPhysical()) {
657 Reg = TRI->getSubReg(Reg, Idx: TRI->getSubRegFromChannel(Channel: I));
658 } else {
659 Sub = TRI->getSubRegFromChannel(Channel: I + TRI->getChannelFromSubReg(SubReg: Sub));
660 }
661 }
662 return TargetInstrInfo::RegSubRegPair(Reg, Sub);
663}
664
665void SIShrinkInstructions::dropInstructionKeepingImpDefs(
666 MachineInstr &MI) const {
667 for (unsigned i = MI.getDesc().getNumOperands() +
668 MI.getDesc().implicit_uses().size() +
669 MI.getDesc().implicit_defs().size(),
670 e = MI.getNumOperands();
671 i != e; ++i) {
672 const MachineOperand &Op = MI.getOperand(i);
673 if (!Op.isDef())
674 continue;
675 BuildMI(BB&: *MI.getParent(), I: MI.getIterator(), MIMD: MI.getDebugLoc(),
676 MCID: TII->get(Opcode: AMDGPU::IMPLICIT_DEF), DestReg: Op.getReg());
677 }
678
679 MI.eraseFromParent();
680}
681
682// Match:
683// mov t, x
684// mov x, y
685// mov y, t
686//
687// =>
688//
689// mov t, x (t is potentially dead and move eliminated)
690// v_swap_b32 x, y
691//
692// Returns next valid instruction pointer if was able to create v_swap_b32.
693//
694// This shall not be done too early not to prevent possible folding which may
695// remove matched moves, and this should preferably be done before RA to
696// release saved registers and also possibly after RA which can insert copies
697// too.
698//
699// This is really just a generic peephole that is not a canonical shrinking,
700// although requirements match the pass placement and it reduces code size too.
701MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
702 assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
703 MovT.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
704 MovT.getOpcode() == AMDGPU::COPY);
705
706 Register T = MovT.getOperand(i: 0).getReg();
707 unsigned Tsub = MovT.getOperand(i: 0).getSubReg();
708 MachineOperand &Xop = MovT.getOperand(i: 1);
709
710 if (!Xop.isReg())
711 return nullptr;
712 Register X = Xop.getReg();
713 unsigned Xsub = Xop.getSubReg();
714
715 unsigned Size = TII->getOpSize(MI: MovT, OpNo: 0);
716
717 // We can't match v_swap_b16 pre-RA, because VGPR_16_Lo128 registers
718 // are not allocatble.
719 if (Size == 2 && X.isVirtual())
720 return nullptr;
721
722 if (!TRI->isVGPR(MRI: *MRI, Reg: X))
723 return nullptr;
724
725 const unsigned SearchLimit = 16;
726 unsigned Count = 0;
727 bool KilledT = false;
728 for (auto Iter = std::next(x: MovT.getIterator()),
729 E = MovT.getParent()->instr_end();
730 Iter != E && Count < SearchLimit && !KilledT; ++Iter) {
731
732 MachineInstr *MovY = &*Iter;
733 KilledT = MovY->killsRegister(Reg: T, TRI);
734 if (MovY->isDebugInstr())
735 continue;
736 ++Count;
737
738 if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
739 MovY->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
740 MovY->getOpcode() != AMDGPU::COPY) ||
741 !MovY->getOperand(i: 1).isReg() || MovY->getOperand(i: 1).getReg() != T ||
742 MovY->getOperand(i: 1).getSubReg() != Tsub)
743 continue;
744
745 Register Y = MovY->getOperand(i: 0).getReg();
746 unsigned Ysub = MovY->getOperand(i: 0).getSubReg();
747
748 if (!TRI->isVGPR(MRI: *MRI, Reg: Y))
749 continue;
750
751 MachineInstr *MovX = nullptr;
752 for (auto IY = MovY->getIterator(), I = std::next(x: MovT.getIterator());
753 I != IY; ++I) {
754 if (I->isDebugInstr())
755 continue;
756 if (instReadsReg(MI: &*I, Reg: X, SubReg: Xsub) || instModifiesReg(MI: &*I, Reg: Y, SubReg: Ysub) ||
757 instModifiesReg(MI: &*I, Reg: T, SubReg: Tsub) ||
758 (MovX && instModifiesReg(MI: &*I, Reg: X, SubReg: Xsub))) {
759 MovX = nullptr;
760 break;
761 }
762 if (!instReadsReg(MI: &*I, Reg: Y, SubReg: Ysub)) {
763 if (!MovX && instModifiesReg(MI: &*I, Reg: X, SubReg: Xsub)) {
764 MovX = nullptr;
765 break;
766 }
767 continue;
768 }
769 if (MovX ||
770 (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
771 I->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
772 I->getOpcode() != AMDGPU::COPY) ||
773 I->getOperand(i: 0).getReg() != X ||
774 I->getOperand(i: 0).getSubReg() != Xsub) {
775 MovX = nullptr;
776 break;
777 }
778
779 if (Size > 4 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
780 continue;
781
782 MovX = &*I;
783 }
784
785 if (!MovX)
786 continue;
787
788 LLVM_DEBUG(dbgs() << "Matched v_swap:\n" << MovT << *MovX << *MovY);
789
790 MachineBasicBlock &MBB = *MovT.getParent();
791 SmallVector<MachineInstr *, 4> Swaps;
792 if (Size == 2) {
793 auto *MIB = BuildMI(BB&: MBB, I: MovX->getIterator(), MIMD: MovT.getDebugLoc(),
794 MCID: TII->get(Opcode: AMDGPU::V_SWAP_B16))
795 .addDef(RegNo: X)
796 .addDef(RegNo: Y)
797 .addReg(RegNo: Y)
798 .addReg(RegNo: X)
799 .getInstr();
800 Swaps.push_back(Elt: MIB);
801 } else {
802 assert(Size > 0 && Size % 4 == 0);
803 for (unsigned I = 0; I < Size / 4; ++I) {
804 TargetInstrInfo::RegSubRegPair X1, Y1;
805 X1 = getSubRegForIndex(Reg: X, Sub: Xsub, I);
806 Y1 = getSubRegForIndex(Reg: Y, Sub: Ysub, I);
807 auto *MIB = BuildMI(BB&: MBB, I: MovX->getIterator(), MIMD: MovT.getDebugLoc(),
808 MCID: TII->get(Opcode: AMDGPU::V_SWAP_B32))
809 .addDef(RegNo: X1.Reg, Flags: {}, SubReg: X1.SubReg)
810 .addDef(RegNo: Y1.Reg, Flags: {}, SubReg: Y1.SubReg)
811 .addReg(RegNo: Y1.Reg, Flags: {}, SubReg: Y1.SubReg)
812 .addReg(RegNo: X1.Reg, Flags: {}, SubReg: X1.SubReg)
813 .getInstr();
814 Swaps.push_back(Elt: MIB);
815 }
816 }
817 // Drop implicit EXEC.
818 if (MovX->hasRegisterImplicitUseOperand(Reg: AMDGPU::EXEC)) {
819 for (MachineInstr *Swap : Swaps) {
820 Swap->removeOperand(OpNo: Swap->getNumExplicitOperands());
821 Swap->copyImplicitOps(MF&: *MBB.getParent(), MI: *MovX);
822 }
823 }
824 MovX->eraseFromParent();
825 dropInstructionKeepingImpDefs(MI&: *MovY);
826 MachineInstr *Next = &*std::next(x: MovT.getIterator());
827
828 if (T.isVirtual() && MRI->use_nodbg_empty(RegNo: T)) {
829 dropInstructionKeepingImpDefs(MI&: MovT);
830 } else {
831 Xop.setIsKill(false);
832 for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
833 unsigned OpNo = MovT.getNumExplicitOperands() + I;
834 const MachineOperand &Op = MovT.getOperand(i: OpNo);
835 if (Op.isKill() && TRI->regsOverlap(RegA: X, RegB: Op.getReg()))
836 MovT.removeOperand(OpNo);
837 }
838 }
839
840 return Next;
841 }
842
843 return nullptr;
844}
845
846// If an instruction has dead sdst replace it with NULL register on gfx1030+
847bool SIShrinkInstructions::tryReplaceDeadSDST(MachineInstr &MI) const {
848 if (!ST->hasGFX10_3Insts())
849 return false;
850
851 MachineOperand *Op = TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::sdst);
852 if (!Op)
853 return false;
854 Register SDstReg = Op->getReg();
855 if (SDstReg.isPhysical() || !MRI->use_nodbg_empty(RegNo: SDstReg))
856 return false;
857
858 Op->setReg(ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
859 return true;
860}
861
862bool SIShrinkInstructions::run(MachineFunction &MF) {
863
864 this->MF = &MF;
865 MRI = &MF.getRegInfo();
866 ST = &MF.getSubtarget<GCNSubtarget>();
867 TII = ST->getInstrInfo();
868 TRI = &TII->getRegisterInfo();
869 IsPostRA = MF.getProperties().hasNoVRegs();
870
871 unsigned VCCReg = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
872 bool Changed = false;
873
874 for (MachineBasicBlock &MBB : MF) {
875 MachineBasicBlock::iterator I, Next;
876 for (I = MBB.begin(); I != MBB.end(); I = Next) {
877 Next = std::next(x: I);
878 MachineInstr &MI = *I;
879
880 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
881 // If this has a literal constant source that is the same as the
882 // reversed bits of an inline immediate, replace with a bitreverse of
883 // that constant. This saves 4 bytes in the common case of materializing
884 // sign bits.
885
886 // Test if we are after regalloc. We only want to do this after any
887 // optimizations happen because this will confuse them.
888 MachineOperand &Src = MI.getOperand(i: 1);
889 if (Src.isImm() && IsPostRA) {
890 int32_t ModImm;
891 unsigned ModOpcode =
892 canModifyToInlineImmOp32(TII, Src, ModifiedImm&: ModImm, /*Scalar=*/false);
893 if (ModOpcode != 0) {
894 MI.setDesc(TII->get(Opcode: ModOpcode));
895 Src.setImm(static_cast<int64_t>(ModImm));
896 Changed = true;
897 continue;
898 }
899 }
900 }
901
902 if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
903 MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
904 MI.getOpcode() == AMDGPU::COPY)) {
905 if (auto *NextMI = matchSwap(MovT&: MI)) {
906 Next = NextMI->getIterator();
907 Changed = true;
908 continue;
909 }
910 }
911
912 // Try to use S_ADDK_I32 and S_MULK_I32.
913 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
914 MI.getOpcode() == AMDGPU::S_MUL_I32) {
915 const MachineOperand *Dest = &MI.getOperand(i: 0);
916 MachineOperand *Src0 = &MI.getOperand(i: 1);
917 MachineOperand *Src1 = &MI.getOperand(i: 2);
918
919 if (!Src0->isReg() && Src1->isReg()) {
920 if (TII->commuteInstruction(MI, NewMI: false, OpIdx1: 1, OpIdx2: 2)) {
921 std::swap(a&: Src0, b&: Src1);
922 Changed = true;
923 }
924 }
925
926 // FIXME: This could work better if hints worked with subregisters. If
927 // we have a vector add of a constant, we usually don't get the correct
928 // allocation due to the subregister usage.
929 if (Dest->getReg().isVirtual() && Src0->isReg()) {
930 MRI->setRegAllocationHint(VReg: Dest->getReg(), Type: 0, PrefReg: Src0->getReg());
931 MRI->setRegAllocationHint(VReg: Src0->getReg(), Type: 0, PrefReg: Dest->getReg());
932 continue;
933 }
934
935 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
936 if (Src1->isImm() && isKImmOperand(Src: *Src1)) {
937 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
938 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
939
940 Src1->setImm(SignExtend64(X: Src1->getImm(), B: 32));
941 MI.setDesc(TII->get(Opcode: Opc));
942 MI.tieOperands(DefIdx: 0, UseIdx: 1);
943 Changed = true;
944 }
945 }
946 }
947
948 // Try to use s_cmpk_*
949 if (MI.isCompare() && TII->isSOPC(MI)) {
950 Changed |= shrinkScalarCompare(MI);
951 continue;
952 }
953
954 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
955 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
956 const MachineOperand &Dst = MI.getOperand(i: 0);
957 MachineOperand &Src = MI.getOperand(i: 1);
958
959 if (Src.isImm() && Dst.getReg().isPhysical()) {
960 unsigned ModOpc;
961 int32_t ModImm;
962 if (isKImmOperand(Src)) {
963 MI.setDesc(TII->get(Opcode: AMDGPU::S_MOVK_I32));
964 Src.setImm(SignExtend64(X: Src.getImm(), B: 32));
965 Changed = true;
966 } else if ((ModOpc = canModifyToInlineImmOp32(TII, Src, ModifiedImm&: ModImm,
967 /*Scalar=*/true))) {
968 MI.setDesc(TII->get(Opcode: ModOpc));
969 Src.setImm(static_cast<int64_t>(ModImm));
970 Changed = true;
971 }
972 }
973
974 continue;
975 }
976
977 // Shrink scalar logic operations.
978 if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
979 MI.getOpcode() == AMDGPU::S_OR_B32 ||
980 MI.getOpcode() == AMDGPU::S_XOR_B32) {
981 ChangeKind CK = shrinkScalarLogicOp(MI);
982 if (CK == ChangeKind::UpdateHint)
983 continue;
984 Changed |= (CK == ChangeKind::UpdateInst);
985 }
986
987 if (IsPostRA && TII->isMIMG(Opcode: MI.getOpcode()) &&
988 ST->getGeneration() >= AMDGPUSubtarget::GFX10) {
989 Changed |= shrinkMIMG(MI);
990 continue;
991 }
992
993 if (!TII->isVOP3(MI))
994 continue;
995
996 if (MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
997 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
998 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
999 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
1000 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64 ||
1001 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_t16_e64 ||
1002 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_fake16_e64 ||
1003 (MI.getOpcode() == AMDGPU::V_FMA_F64_e64 &&
1004 ST->hasFmaakFmamkF64Insts())) {
1005 Changed |= shrinkMadFma(MI);
1006 continue;
1007 }
1008
1009 // If there is no chance we will shrink it and use VCC as sdst to get
1010 // a 32 bit form try to replace dead sdst with NULL.
1011 if (TII->isVOP3(Opcode: MI.getOpcode())) {
1012 Changed |= tryReplaceDeadSDST(MI);
1013 if (!TII->hasVALU32BitEncoding(Opcode: MI.getOpcode())) {
1014 continue;
1015 }
1016 }
1017
1018 if (!TII->canShrink(MI, MRI: *MRI)) {
1019 // Try commuting the instruction and see if that enables us to shrink
1020 // it.
1021 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
1022 !TII->canShrink(MI, MRI: *MRI)) {
1023 Changed |= tryReplaceDeadSDST(MI);
1024 continue;
1025 }
1026
1027 // Operands were commuted.
1028 Changed = true;
1029 }
1030
1031 int Op32 = AMDGPU::getVOPe32(Opcode: MI.getOpcode());
1032
1033 if (TII->isVOPC(Opcode: Op32)) {
1034 MachineOperand &Op0 = MI.getOperand(i: 0);
1035 if (Op0.isReg()) {
1036 // Exclude VOPCX instructions as these don't explicitly write a
1037 // dst.
1038 Register DstReg = Op0.getReg();
1039 if (DstReg.isVirtual()) {
1040 // VOPC instructions can only write to the VCC register. We can't
1041 // force them to use VCC here, because this is only one register and
1042 // cannot deal with sequences which would require multiple copies of
1043 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
1044 //
1045 // So, instead of forcing the instruction to write to VCC, we
1046 // provide a hint to the register allocator to use VCC and then we
1047 // will run this pass again after RA and shrink it if it outputs to
1048 // VCC.
1049 MRI->setRegAllocationHint(VReg: DstReg, Type: 0, PrefReg: VCCReg);
1050 continue;
1051 }
1052 if (DstReg != VCCReg)
1053 continue;
1054 }
1055 }
1056
1057 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
1058 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
1059 // instructions.
1060 const MachineOperand *Src2 =
1061 TII->getNamedOperand(MI, OperandName: AMDGPU::OpName::src2);
1062 if (!Src2->isReg())
1063 continue;
1064 Register SReg = Src2->getReg();
1065 if (SReg.isVirtual()) {
1066 MRI->setRegAllocationHint(VReg: SReg, Type: 0, PrefReg: VCCReg);
1067 continue;
1068 }
1069 if (SReg != VCCReg)
1070 continue;
1071 }
1072
1073 // Check for the bool flag output for instructions like V_ADD_I32_e64.
1074 const MachineOperand *SDst = TII->getNamedOperand(MI,
1075 OperandName: AMDGPU::OpName::sdst);
1076
1077 if (SDst) {
1078 bool Next = false;
1079
1080 if (SDst->getReg() != VCCReg) {
1081 if (SDst->getReg().isVirtual())
1082 MRI->setRegAllocationHint(VReg: SDst->getReg(), Type: 0, PrefReg: VCCReg);
1083 Next = true;
1084 }
1085
1086 // All of the instructions with carry outs also have an SGPR input in
1087 // src2.
1088 const MachineOperand *Src2 = TII->getNamedOperand(MI,
1089 OperandName: AMDGPU::OpName::src2);
1090 if (Src2 && Src2->getReg() != VCCReg) {
1091 if (Src2->getReg().isVirtual())
1092 MRI->setRegAllocationHint(VReg: Src2->getReg(), Type: 0, PrefReg: VCCReg);
1093 Next = true;
1094 }
1095
1096 if (Next)
1097 continue;
1098 }
1099
1100 // Pre-GFX10, shrinking VOP3 instructions pre-RA gave us the chance to
1101 // fold an immediate into the shrunk instruction as a literal operand. In
1102 // GFX10 VOP3 instructions can take a literal operand anyway, so there is
1103 // no advantage to doing this.
1104 // However, if 64-bit literals are allowed we still need to shrink it
1105 // for such literal to be able to fold.
1106 if (ST->hasVOP3Literal() &&
1107 (!ST->has64BitLiterals() || AMDGPU::isTrue16Inst(Opc: MI.getOpcode())) &&
1108 !IsPostRA)
1109 continue;
1110
1111 if (ST->hasTrue16BitInsts() && AMDGPU::isTrue16Inst(Opc: MI.getOpcode()) &&
1112 !shouldShrinkTrue16(MI))
1113 continue;
1114
1115 // We can shrink this instruction
1116 LLVM_DEBUG(dbgs() << "Shrinking " << MI);
1117
1118 MachineInstr *Inst32 = TII->buildShrunkInst(MI, NewOpcode: Op32);
1119 ++NumInstructionsShrunk;
1120
1121 // Copy extra operands not present in the instruction definition.
1122 copyExtraImplicitOps(NewMI&: *Inst32, MI);
1123
1124 // Copy deadness from the old explicit vcc def to the new implicit def.
1125 if (SDst && SDst->isDead())
1126 Inst32->findRegisterDefOperand(Reg: VCCReg, /*TRI=*/nullptr)->setIsDead();
1127
1128 MI.eraseFromParent();
1129 foldImmediates(MI&: *Inst32);
1130
1131 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
1132 Changed = true;
1133 }
1134 }
1135 return Changed;
1136}
1137
1138bool SIShrinkInstructionsLegacy::runOnMachineFunction(MachineFunction &MF) {
1139 if (skipFunction(F: MF.getFunction()))
1140 return false;
1141
1142 return SIShrinkInstructions().run(MF);
1143}
1144
1145PreservedAnalyses
1146SIShrinkInstructionsPass::run(MachineFunction &MF,
1147 MachineFunctionAnalysisManager &) {
1148 if (MF.getFunction().hasOptNone() || !SIShrinkInstructions().run(MF))
1149 return PreservedAnalyses::all();
1150
1151 auto PA = getMachineFunctionPassPreservedAnalyses();
1152 PA.preserveSet<CFGAnalyses>();
1153 return PA;
1154}
1155