1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
11#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12#include "llvm/CodeGen/MachineFunction.h"
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetOpcodes.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21
22using namespace llvm;
23
24void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
40MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 return BuildMI(MF&: getMF(), MIMD: {getDL(), getPCSections(), getMMRAMetadata()},
42 MCID: getTII().get(Opcode));
43}
44
45MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(I: getInsertPt(), MI: MIB);
47 recordInsertion(InsertedInstr: MIB);
48 return MIB;
49}
50
51MachineInstrBuilder
52MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
60 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
64MachineInstrBuilder
65MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
73 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
77MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(MIB: buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE)
86 .addFrameIndex(Idx: FI)
87 .addImm(Val: 0)
88 .addMetadata(MD: Variable)
89 .addMetadata(MD: Expr));
90}
91
92MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(i_nocapture: 0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(Val: NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(Val: CI);
112 else
113 MIB.addImm(Val: CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(Val: NumericConstant)) {
115 MIB.addFPImm(Val: CFP);
116 } else if (isa<ConstantPointerNull>(Val: NumericConstant)) {
117 MIB.addImm(Val: 0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(RegNo: Register());
121 }
122
123 MIB.addImm(Val: 0).addMetadata(MD: Variable).addMetadata(MD: Expr);
124 return insertInstr(MIB);
125}
126
127MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(Opcode: TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(MD: Label);
134}
135
136MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(Opcode: TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(MRI&: *getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Val: Alignment.value());
144 return MIB;
145}
146
147MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(Opcode: TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(MRI&: *getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
156MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(Opcode: TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(MRI&: *getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
169MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(Opcode: TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(MRI&: *getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
178MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
179 unsigned JTI) {
180 return buildInstr(Opc: TargetOpcode::G_JUMP_TABLE, DstOps: {PtrTy}, SrcOps: {})
181 .addJumpTableIndex(Idx: JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
189void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
201MachineInstrBuilder
202MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(Opc: TargetOpcode::G_PTR_ADD, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
209}
210
211std::optional<MachineInstrBuilder>
212MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
222 Res = getMRI()->createGenericVirtualRegister(Ty: getMRI()->getType(Reg: Op0));
223 auto Cst = buildConstant(Res: ValueTy, Val: Value);
224 return buildPtrAdd(Res, Op0, Op1: Cst.getReg(Idx: 0));
225}
226
227MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(MRI: *getMRI());
231 LLT MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(Ty: MaskTy);
233 buildConstant(Res: MaskReg, Val: maskTrailingZeros<uint64_t>(N: NumBits));
234 return buildPtrMask(Res, Op0, Op1: MaskReg);
235}
236
237MachineInstrBuilder
238MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
241 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
245 SmallVector<Register, 8> Regs;
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Elt: Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Elt: Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Res: Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(Idx: 0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Elt: Undef);
265 return buildMergeLikeInstr(Res, Ops: Regs);
266}
267
268MachineInstrBuilder
269MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
272 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Op: Unmerge.getReg(Idx: 0));
286 SmallVector<Register, 8> Regs;
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Elt: Unmerge.getReg(Idx: i));
289 return buildMergeLikeInstr(Res, Ops: Regs);
290}
291
292MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
293 return buildInstr(Opcode: TargetOpcode::G_BR).addMBB(MBB: &Dest);
294}
295
296MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(Opcode: TargetOpcode::G_BRINDIRECT).addUse(RegNo: Tgt);
299}
300
301MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(Opcode: TargetOpcode::G_BRJT)
307 .addUse(RegNo: TablePtr)
308 .addJumpTableIndex(Idx: JTI)
309 .addUse(RegNo: IndexReg);
310}
311
312MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
313 const SrcOp &Op) {
314 return buildInstr(Opc: TargetOpcode::COPY, DstOps: Res, SrcOps: Op);
315}
316
317MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
318 const ConstantInt &Val) {
319 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
320 LLT Ty = Res.getLLTTy(MRI: *getMRI());
321 LLT EltTy = Ty.getScalarType();
322 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
323 "creating constant with the wrong size");
324
325 assert(!Ty.isScalableVector() &&
326 "unexpected scalable vector in buildConstant");
327
328 if (Ty.isFixedVector()) {
329 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT)
330 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
331 .addCImm(Val: &Val);
332 return buildSplatBuildVector(Res, Src: Const);
333 }
334
335 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT);
336 Const->setDebugLoc(DebugLoc());
337 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
338 Const.addCImm(Val: &Val);
339 return Const;
340}
341
342MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
343 int64_t Val) {
344 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
345 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
346 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: Val, IsSigned: true);
347 return buildConstant(Res, Val: *CI);
348}
349
350MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
351 const ConstantFP &Val) {
352 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
353 LLT Ty = Res.getLLTTy(MRI: *getMRI());
354 LLT EltTy = Ty.getScalarType();
355
356 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
357 == EltTy.getSizeInBits() &&
358 "creating fconstant with the wrong size");
359
360 assert(!Ty.isPointer() && "invalid operand type");
361
362 assert(!Ty.isScalableVector() &&
363 "unexpected scalable vector in buildFConstant");
364
365 if (Ty.isFixedVector()) {
366 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT)
367 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
368 .addFPImm(Val: &Val);
369
370 return buildSplatBuildVector(Res, Src: Const);
371 }
372
373 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT);
374 Const->setDebugLoc(DebugLoc());
375 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
376 Const.addFPImm(Val: &Val);
377 return Const;
378}
379
380MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
381 const APInt &Val) {
382 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(), V: Val);
383 return buildConstant(Res, Val: *CI);
384}
385
386MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
387 double Val) {
388 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
389 auto &Ctx = getMF().getFunction().getContext();
390 auto *CFP =
391 ConstantFP::get(Context&: Ctx, V: getAPFloatFromSize(Val, Size: DstTy.getScalarSizeInBits()));
392 return buildFConstant(Res, Val: *CFP);
393}
394
395MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
396 const APFloat &Val) {
397 auto &Ctx = getMF().getFunction().getContext();
398 auto *CFP = ConstantFP::get(Context&: Ctx, V: Val);
399 return buildFConstant(Res, Val: *CFP);
400}
401
402MachineInstrBuilder
403MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
404 const ConstantPtrAuth *CPA,
405 Register Addr, Register AddrDisc) {
406 auto MIB = buildInstr(Opcode: TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
407 Res.addDefToMIB(MRI&: *getMRI(), MIB);
408 MIB.addUse(RegNo: Addr);
409 MIB.addImm(Val: CPA->getKey()->getZExtValue());
410 MIB.addUse(RegNo: AddrDisc);
411 MIB.addImm(Val: CPA->getDiscriminator()->getZExtValue());
412 return MIB;
413}
414
415MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
416 MachineBasicBlock &Dest) {
417 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
418
419 auto MIB = buildInstr(Opcode: TargetOpcode::G_BRCOND);
420 Tst.addSrcToMIB(MIB);
421 MIB.addMBB(MBB: &Dest);
422 return MIB;
423}
424
425MachineInstrBuilder
426MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
427 MachinePointerInfo PtrInfo, Align Alignment,
428 MachineMemOperand::Flags MMOFlags,
429 const AAMDNodes &AAInfo) {
430 MMOFlags |= MachineMemOperand::MOLoad;
431 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
432
433 LLT Ty = Dst.getLLTTy(MRI: *getMRI());
434 MachineMemOperand *MMO =
435 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
436 return buildLoad(Res: Dst, Addr, MMO&: *MMO);
437}
438
439MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
440 const DstOp &Res,
441 const SrcOp &Addr,
442 MachineMemOperand &MMO) {
443 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
444 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
445
446 auto MIB = buildInstr(Opcode);
447 Res.addDefToMIB(MRI&: *getMRI(), MIB);
448 Addr.addSrcToMIB(MIB);
449 MIB.addMemOperand(MMO: &MMO);
450 return MIB;
451}
452
453MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
454 const DstOp &Dst, const SrcOp &BasePtr,
455 MachineMemOperand &BaseMMO, int64_t Offset) {
456 LLT LoadTy = Dst.getLLTTy(MRI: *getMRI());
457 MachineMemOperand *OffsetMMO =
458 getMF().getMachineMemOperand(MMO: &BaseMMO, Offset, Ty: LoadTy);
459
460 if (Offset == 0) // This may be a size or type changing load.
461 return buildLoad(Res: Dst, Addr: BasePtr, MMO&: *OffsetMMO);
462
463 LLT PtrTy = BasePtr.getLLTTy(MRI: *getMRI());
464 LLT OffsetTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
465 auto ConstOffset = buildConstant(Res: OffsetTy, Val: Offset);
466 auto Ptr = buildPtrAdd(Res: PtrTy, Op0: BasePtr, Op1: ConstOffset);
467 return buildLoad(Res: Dst, Addr: Ptr, MMO&: *OffsetMMO);
468}
469
470MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
471 const SrcOp &Addr,
472 MachineMemOperand &MMO) {
473 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
474 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
475
476 auto MIB = buildInstr(Opcode: TargetOpcode::G_STORE);
477 Val.addSrcToMIB(MIB);
478 Addr.addSrcToMIB(MIB);
479 MIB.addMemOperand(MMO: &MMO);
480 return MIB;
481}
482
483MachineInstrBuilder
484MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
485 MachinePointerInfo PtrInfo, Align Alignment,
486 MachineMemOperand::Flags MMOFlags,
487 const AAMDNodes &AAInfo) {
488 MMOFlags |= MachineMemOperand::MOStore;
489 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
490
491 LLT Ty = Val.getLLTTy(MRI: *getMRI());
492 MachineMemOperand *MMO =
493 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
494 return buildStore(Val, Addr, MMO&: *MMO);
495}
496
497MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
498 const SrcOp &Op) {
499 return buildInstr(Opc: TargetOpcode::G_ANYEXT, DstOps: Res, SrcOps: Op);
500}
501
502MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
503 const SrcOp &Op) {
504 return buildInstr(Opc: TargetOpcode::G_SEXT, DstOps: Res, SrcOps: Op);
505}
506
507MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
508 const SrcOp &Op,
509 std::optional<unsigned> Flags) {
510 return buildInstr(Opc: TargetOpcode::G_ZEXT, DstOps: Res, SrcOps: Op, Flags);
511}
512
513unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
514 const auto *TLI = getMF().getSubtarget().getTargetLowering();
515 switch (TLI->getBooleanContents(isVec: IsVec, isFloat: IsFP)) {
516 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
517 return TargetOpcode::G_SEXT;
518 case TargetLoweringBase::ZeroOrOneBooleanContent:
519 return TargetOpcode::G_ZEXT;
520 default:
521 return TargetOpcode::G_ANYEXT;
522 }
523}
524
525MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
526 const SrcOp &Op,
527 bool IsFP) {
528 unsigned ExtOp = getBoolExtOp(IsVec: getMRI()->getType(Reg: Op.getReg()).isVector(), IsFP);
529 return buildInstr(Opc: ExtOp, DstOps: Res, SrcOps: Op);
530}
531
532MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
533 const SrcOp &Op,
534 bool IsVector,
535 bool IsFP) {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(isVec: IsVector, isFloat: IsFP)) {
538 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
539 return buildSExtInReg(Res, Op, ImmOp: 1);
540 case TargetLoweringBase::ZeroOrOneBooleanContent:
541 return buildZExtInReg(Res, Op, ImmOp: 1);
542 case TargetLoweringBase::UndefinedBooleanContent:
543 return buildCopy(Res, Op);
544 }
545
546 llvm_unreachable("unexpected BooleanContent");
547}
548
549MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
550 const DstOp &Res,
551 const SrcOp &Op) {
552 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
553 TargetOpcode::G_SEXT == ExtOpc) &&
554 "Expecting Extending Opc");
555 assert(Res.getLLTTy(*getMRI()).isScalar() ||
556 Res.getLLTTy(*getMRI()).isVector());
557 assert(Res.getLLTTy(*getMRI()).isScalar() ==
558 Op.getLLTTy(*getMRI()).isScalar());
559
560 unsigned Opcode = TargetOpcode::COPY;
561 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() >
562 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
563 Opcode = ExtOpc;
564 else if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() <
565 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
566 Opcode = TargetOpcode::G_TRUNC;
567 else
568 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
569
570 return buildInstr(Opc: Opcode, DstOps: Res, SrcOps: Op);
571}
572
573MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
574 const SrcOp &Op) {
575 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_SEXT, Res, Op);
576}
577
578MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
579 const SrcOp &Op) {
580 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ZEXT, Res, Op);
581}
582
583MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
584 const SrcOp &Op) {
585 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ANYEXT, Res, Op);
586}
587
588MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
589 const SrcOp &Op,
590 int64_t ImmOp) {
591 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
592 auto Mask = buildConstant(
593 Res: ResTy, Val: APInt::getLowBitsSet(numBits: ResTy.getScalarSizeInBits(), loBitsSet: ImmOp));
594 return buildAnd(Dst: Res, Src0: Op, Src1: Mask);
595}
596
597MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
598 const SrcOp &Src) {
599 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
600 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
601 if (SrcTy == DstTy)
602 return buildCopy(Res: Dst, Op: Src);
603
604 unsigned Opcode;
605 if (SrcTy.isPointerOrPointerVector())
606 Opcode = TargetOpcode::G_PTRTOINT;
607 else if (DstTy.isPointerOrPointerVector())
608 Opcode = TargetOpcode::G_INTTOPTR;
609 else {
610 assert(!SrcTy.isPointerOrPointerVector() &&
611 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
612 Opcode = TargetOpcode::G_BITCAST;
613 }
614
615 return buildInstr(Opc: Opcode, DstOps: Dst, SrcOps: Src);
616}
617
618MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
619 const SrcOp &Src,
620 uint64_t Index) {
621 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
622 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
623
624#ifndef NDEBUG
625 assert(SrcTy.isValid() && "invalid operand type");
626 assert(DstTy.isValid() && "invalid operand type");
627 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
628 "extracting off end of register");
629#endif
630
631 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
632 assert(Index == 0 && "insertion past the end of a register");
633 return buildCast(Dst, Src);
634 }
635
636 auto Extract = buildInstr(Opcode: TargetOpcode::G_EXTRACT);
637 Dst.addDefToMIB(MRI&: *getMRI(), MIB&: Extract);
638 Src.addSrcToMIB(MIB&: Extract);
639 Extract.addImm(Val: Index);
640 return Extract;
641}
642
643MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
644 return buildInstr(Opc: TargetOpcode::G_IMPLICIT_DEF, DstOps: {Res}, SrcOps: {});
645}
646
647MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
648 ArrayRef<Register> Ops) {
649 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
650 // we need some temporary storage for the DstOp objects. Here we use a
651 // sufficiently large SmallVector to not go through the heap.
652 SmallVector<SrcOp, 8> TmpVec(Ops);
653 assert(TmpVec.size() > 1);
654 return buildInstr(Opc: TargetOpcode::G_MERGE_VALUES, DstOps: Res, SrcOps: TmpVec);
655}
656
657MachineInstrBuilder
658MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
659 ArrayRef<Register> Ops) {
660 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
661 // we need some temporary storage for the DstOp objects. Here we use a
662 // sufficiently large SmallVector to not go through the heap.
663 SmallVector<SrcOp, 8> TmpVec(Ops);
664 assert(TmpVec.size() > 1);
665 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: TmpVec), DstOps: Res, SrcOps: TmpVec);
666}
667
668MachineInstrBuilder
669MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
670 std::initializer_list<SrcOp> Ops) {
671 assert(Ops.size() > 1);
672 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: Ops), DstOps: Res, SrcOps: Ops);
673}
674
675unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
676 ArrayRef<SrcOp> SrcOps) const {
677 if (DstOp.getLLTTy(MRI: *getMRI()).isVector()) {
678 if (SrcOps[0].getLLTTy(MRI: *getMRI()).isVector())
679 return TargetOpcode::G_CONCAT_VECTORS;
680 return TargetOpcode::G_BUILD_VECTOR;
681 }
682
683 return TargetOpcode::G_MERGE_VALUES;
684}
685
686MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
687 const SrcOp &Op) {
688 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
689 // we need some temporary storage for the DstOp objects. Here we use a
690 // sufficiently large SmallVector to not go through the heap.
691 SmallVector<DstOp, 8> TmpVec(Res);
692 assert(TmpVec.size() > 1);
693 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
694}
695
696MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
697 const SrcOp &Op) {
698 unsigned NumReg = Op.getLLTTy(MRI: *getMRI()).getSizeInBits() / Res.getSizeInBits();
699 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
700 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
701}
702
703MachineInstrBuilder
704MachineIRBuilder::buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,
705 const SrcOp &Op) {
706 LLT OpTy = Op.getLLTTy(MRI: *getMRI());
707 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
708 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
709 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
710}
711
712MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
713 const SrcOp &Op) {
714 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
715 // we need some temporary storage for the DstOp objects. Here we use a
716 // sufficiently large SmallVector to not go through the heap.
717 SmallVector<DstOp, 8> TmpVec(Res);
718 assert(TmpVec.size() > 1);
719 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
720}
721
722MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
723 ArrayRef<Register> Ops) {
724 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
725 // we need some temporary storage for the DstOp objects. Here we use a
726 // sufficiently large SmallVector to not go through the heap.
727 SmallVector<SrcOp, 8> TmpVec(Ops);
728 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
729}
730
731MachineInstrBuilder
732MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
733 ArrayRef<APInt> Ops) {
734 SmallVector<SrcOp> TmpVec;
735 TmpVec.reserve(N: Ops.size());
736 LLT EltTy = Res.getLLTTy(MRI: *getMRI()).getElementType();
737 for (const auto &Op : Ops)
738 TmpVec.push_back(Elt: buildConstant(Res: EltTy, Val: Op));
739 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
740}
741
742MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
743 const SrcOp &Src) {
744 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(MRI: *getMRI()).getNumElements(), Src);
745 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
746}
747
748MachineInstrBuilder
749MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
750 ArrayRef<Register> Ops) {
751 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
752 // we need some temporary storage for the DstOp objects. Here we use a
753 // sufficiently large SmallVector to not go through the heap.
754 SmallVector<SrcOp, 8> TmpVec(Ops);
755 if (TmpVec[0].getLLTTy(MRI: *getMRI()).getSizeInBits() ==
756 Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits())
757 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
758 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR_TRUNC, DstOps: Res, SrcOps: TmpVec);
759}
760
761MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
762 const SrcOp &Src) {
763 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
764 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
765 "Expected Src to match Dst elt ty");
766 auto UndefVec = buildUndef(Res: DstTy);
767 auto Zero = buildConstant(Res: LLT::scalar(SizeInBits: 64), Val: 0);
768 auto InsElt = buildInsertVectorElement(Res: DstTy, Val: UndefVec, Elt: Src, Idx: Zero);
769 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
770 return buildShuffleVector(Res: DstTy, Src1: InsElt, Src2: UndefVec, Mask: ZeroMask);
771}
772
773MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
774 const SrcOp &Src) {
775 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
776 "Expected Src to match Dst elt ty");
777 return buildInstr(Opc: TargetOpcode::G_SPLAT_VECTOR, DstOps: Res, SrcOps: Src);
778}
779
780MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
781 const SrcOp &Src1,
782 const SrcOp &Src2,
783 ArrayRef<int> Mask) {
784 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
785 LLT Src1Ty = Src1.getLLTTy(MRI: *getMRI());
786 LLT Src2Ty = Src2.getLLTTy(MRI: *getMRI());
787 const LLT DstElemTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
788 const LLT ElemTy1 = Src1Ty.isVector() ? Src1Ty.getElementType() : Src1Ty;
789 const LLT ElemTy2 = Src2Ty.isVector() ? Src2Ty.getElementType() : Src2Ty;
790 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
791 (void)DstElemTy;
792 (void)ElemTy1;
793 (void)ElemTy2;
794 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
795 return buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {Res}, SrcOps: {Src1, Src2})
796 .addShuffleMask(Val: MaskAlloc);
797}
798
799MachineInstrBuilder
800MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
801 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
802 // we need some temporary storage for the DstOp objects. Here we use a
803 // sufficiently large SmallVector to not go through the heap.
804 SmallVector<SrcOp, 8> TmpVec(Ops);
805 return buildInstr(Opc: TargetOpcode::G_CONCAT_VECTORS, DstOps: Res, SrcOps: TmpVec);
806}
807
808MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
809 const SrcOp &Src,
810 const SrcOp &Op,
811 unsigned Index) {
812 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
813 Res.getLLTTy(*getMRI()).getSizeInBits() &&
814 "insertion past the end of a register");
815
816 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() ==
817 Op.getLLTTy(MRI: *getMRI()).getSizeInBits()) {
818 return buildCast(Dst: Res, Src: Op);
819 }
820
821 return buildInstr(Opc: TargetOpcode::G_INSERT, DstOps: Res, SrcOps: {Src, Op, uint64_t(Index)});
822}
823
824MachineInstrBuilder MachineIRBuilder::buildStepVector(const DstOp &Res,
825 unsigned Step) {
826 unsigned Bitwidth = Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits();
827 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(),
828 V: APInt(Bitwidth, Step));
829 auto StepVector = buildInstr(Opcode: TargetOpcode::G_STEP_VECTOR);
830 StepVector->setDebugLoc(DebugLoc());
831 Res.addDefToMIB(MRI&: *getMRI(), MIB&: StepVector);
832 StepVector.addCImm(Val: CI);
833 return StepVector;
834}
835
836MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
837 unsigned MinElts) {
838
839 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
840 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
841 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: MinElts);
842 return buildVScale(Res, MinElts: *CI);
843}
844
845MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
846 const ConstantInt &MinElts) {
847 auto VScale = buildInstr(Opcode: TargetOpcode::G_VSCALE);
848 VScale->setDebugLoc(DebugLoc());
849 Res.addDefToMIB(MRI&: *getMRI(), MIB&: VScale);
850 VScale.addCImm(Val: &MinElts);
851 return VScale;
852}
853
854MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
855 const APInt &MinElts) {
856 ConstantInt *CI =
857 ConstantInt::get(Context&: getMF().getFunction().getContext(), V: MinElts);
858 return buildVScale(Res, MinElts: *CI);
859}
860
861static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
862 if (HasSideEffects && IsConvergent)
863 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
864 if (HasSideEffects)
865 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
866 if (IsConvergent)
867 return TargetOpcode::G_INTRINSIC_CONVERGENT;
868 return TargetOpcode::G_INTRINSIC;
869}
870
871MachineInstrBuilder
872MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
873 ArrayRef<Register> ResultRegs,
874 bool HasSideEffects, bool isConvergent) {
875 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
876 for (Register ResultReg : ResultRegs)
877 MIB.addDef(RegNo: ResultReg);
878 MIB.addIntrinsicID(ID);
879 return MIB;
880}
881
882MachineInstrBuilder
883MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
884 ArrayRef<Register> ResultRegs) {
885 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
886 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
887 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
888 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
889}
890
891MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
892 ArrayRef<DstOp> Results,
893 bool HasSideEffects,
894 bool isConvergent) {
895 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
896 for (DstOp Result : Results)
897 Result.addDefToMIB(MRI&: *getMRI(), MIB);
898 MIB.addIntrinsicID(ID);
899 return MIB;
900}
901
902MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
903 ArrayRef<DstOp> Results) {
904 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
905 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
906 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
907 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
908}
909
910MachineInstrBuilder
911MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
912 std::optional<unsigned> Flags) {
913 return buildInstr(Opc: TargetOpcode::G_TRUNC, DstOps: Res, SrcOps: Op, Flags);
914}
915
916MachineInstrBuilder
917MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
918 std::optional<unsigned> Flags) {
919 return buildInstr(Opc: TargetOpcode::G_FPTRUNC, DstOps: Res, SrcOps: Op, Flags);
920}
921
922MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
923 const DstOp &Res,
924 const SrcOp &Op0,
925 const SrcOp &Op1,
926 std::optional<unsigned> Flags) {
927 return buildInstr(Opc: TargetOpcode::G_ICMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
928}
929
930MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
931 const DstOp &Res,
932 const SrcOp &Op0,
933 const SrcOp &Op1,
934 std::optional<unsigned> Flags) {
935
936 return buildInstr(Opc: TargetOpcode::G_FCMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
937}
938
939MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
940 const SrcOp &Op0,
941 const SrcOp &Op1) {
942 return buildInstr(Opc: TargetOpcode::G_SCMP, DstOps: Res, SrcOps: {Op0, Op1});
943}
944
945MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
946 const SrcOp &Op0,
947 const SrcOp &Op1) {
948 return buildInstr(Opc: TargetOpcode::G_UCMP, DstOps: Res, SrcOps: {Op0, Op1});
949}
950
951MachineInstrBuilder
952MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
953 const SrcOp &Op0, const SrcOp &Op1,
954 std::optional<unsigned> Flags) {
955
956 return buildInstr(Opc: TargetOpcode::G_SELECT, DstOps: {Res}, SrcOps: {Tst, Op0, Op1}, Flags);
957}
958
959MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
960 const SrcOp &Src0,
961 const SrcOp &Src1,
962 unsigned Idx) {
963 return buildInstr(Opc: TargetOpcode::G_INSERT_SUBVECTOR, DstOps: Res,
964 SrcOps: {Src0, Src1, uint64_t(Idx)});
965}
966
967MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
968 const SrcOp &Src,
969 unsigned Idx) {
970 return buildInstr(Opc: TargetOpcode::G_EXTRACT_SUBVECTOR, DstOps: Res,
971 SrcOps: {Src, uint64_t(Idx)});
972}
973
974MachineInstrBuilder
975MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
976 const SrcOp &Elt, const SrcOp &Idx) {
977 return buildInstr(Opc: TargetOpcode::G_INSERT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Elt, Idx});
978}
979
980MachineInstrBuilder
981MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
982 const SrcOp &Idx) {
983 return buildInstr(Opc: TargetOpcode::G_EXTRACT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Idx});
984}
985
986MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
987 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
988 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
989#ifndef NDEBUG
990 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
991 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
992 LLT AddrTy = Addr.getLLTTy(*getMRI());
993 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
994 LLT NewValTy = NewVal.getLLTTy(*getMRI());
995 assert(OldValResTy.isScalar() && "invalid operand type");
996 assert(SuccessResTy.isScalar() && "invalid operand type");
997 assert(AddrTy.isPointer() && "invalid operand type");
998 assert(CmpValTy.isValid() && "invalid operand type");
999 assert(NewValTy.isValid() && "invalid operand type");
1000 assert(OldValResTy == CmpValTy && "type mismatch");
1001 assert(OldValResTy == NewValTy && "type mismatch");
1002#endif
1003
1004 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1005 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1006 SuccessRes.addDefToMIB(MRI&: *getMRI(), MIB);
1007 Addr.addSrcToMIB(MIB);
1008 CmpVal.addSrcToMIB(MIB);
1009 NewVal.addSrcToMIB(MIB);
1010 MIB.addMemOperand(MMO: &MMO);
1011 return MIB;
1012}
1013
1014MachineInstrBuilder
1015MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
1016 const SrcOp &CmpVal, const SrcOp &NewVal,
1017 MachineMemOperand &MMO) {
1018#ifndef NDEBUG
1019 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1020 LLT AddrTy = Addr.getLLTTy(*getMRI());
1021 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1022 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1023 assert(OldValResTy.isScalar() && "invalid operand type");
1024 assert(AddrTy.isPointer() && "invalid operand type");
1025 assert(CmpValTy.isValid() && "invalid operand type");
1026 assert(NewValTy.isValid() && "invalid operand type");
1027 assert(OldValResTy == CmpValTy && "type mismatch");
1028 assert(OldValResTy == NewValTy && "type mismatch");
1029#endif
1030
1031 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG);
1032 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1033 Addr.addSrcToMIB(MIB);
1034 CmpVal.addSrcToMIB(MIB);
1035 NewVal.addSrcToMIB(MIB);
1036 MIB.addMemOperand(MMO: &MMO);
1037 return MIB;
1038}
1039
1040MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1041 unsigned Opcode, const DstOp &OldValRes,
1042 const SrcOp &Addr, const SrcOp &Val,
1043 MachineMemOperand &MMO) {
1044
1045#ifndef NDEBUG
1046 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1047 LLT AddrTy = Addr.getLLTTy(*getMRI());
1048 LLT ValTy = Val.getLLTTy(*getMRI());
1049 assert(AddrTy.isPointer() && "invalid operand type");
1050 assert(ValTy.isValid() && "invalid operand type");
1051 assert(OldValResTy == ValTy && "type mismatch");
1052 assert(MMO.isAtomic() && "not atomic mem operand");
1053#endif
1054
1055 auto MIB = buildInstr(Opcode);
1056 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1057 Addr.addSrcToMIB(MIB);
1058 Val.addSrcToMIB(MIB);
1059 MIB.addMemOperand(MMO: &MMO);
1060 return MIB;
1061}
1062
1063MachineInstrBuilder
1064MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1065 Register Val, MachineMemOperand &MMO) {
1066 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1067 MMO);
1068}
1069MachineInstrBuilder
1070MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1071 Register Val, MachineMemOperand &MMO) {
1072 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1073 MMO);
1074}
1075MachineInstrBuilder
1076MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1077 Register Val, MachineMemOperand &MMO) {
1078 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1079 MMO);
1080}
1081MachineInstrBuilder
1082MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1083 Register Val, MachineMemOperand &MMO) {
1084 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1085 MMO);
1086}
1087MachineInstrBuilder
1088MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1089 Register Val, MachineMemOperand &MMO) {
1090 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1091 MMO);
1092}
1093MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1094 Register Addr,
1095 Register Val,
1096 MachineMemOperand &MMO) {
1097 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1098 MMO);
1099}
1100MachineInstrBuilder
1101MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1102 Register Val, MachineMemOperand &MMO) {
1103 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1104 MMO);
1105}
1106MachineInstrBuilder
1107MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1108 Register Val, MachineMemOperand &MMO) {
1109 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1110 MMO);
1111}
1112MachineInstrBuilder
1113MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1114 Register Val, MachineMemOperand &MMO) {
1115 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1116 MMO);
1117}
1118MachineInstrBuilder
1119MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1120 Register Val, MachineMemOperand &MMO) {
1121 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1122 MMO);
1123}
1124MachineInstrBuilder
1125MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1126 Register Val, MachineMemOperand &MMO) {
1127 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1128 MMO);
1129}
1130
1131MachineInstrBuilder
1132MachineIRBuilder::buildAtomicRMWFAdd(
1133 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1134 MachineMemOperand &MMO) {
1135 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1136 MMO);
1137}
1138
1139MachineInstrBuilder
1140MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1141 MachineMemOperand &MMO) {
1142 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1143 MMO);
1144}
1145
1146MachineInstrBuilder
1147MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1148 const SrcOp &Val, MachineMemOperand &MMO) {
1149 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1150 MMO);
1151}
1152
1153MachineInstrBuilder
1154MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1155 const SrcOp &Val, MachineMemOperand &MMO) {
1156 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1157 MMO);
1158}
1159
1160MachineInstrBuilder
1161MachineIRBuilder::buildAtomicRMWFMaximum(const DstOp &OldValRes,
1162 const SrcOp &Addr, const SrcOp &Val,
1163 MachineMemOperand &MMO) {
1164 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1165 Val, MMO);
1166}
1167
1168MachineInstrBuilder
1169MachineIRBuilder::buildAtomicRMWFMinimum(const DstOp &OldValRes,
1170 const SrcOp &Addr, const SrcOp &Val,
1171 MachineMemOperand &MMO) {
1172 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1173 Val, MMO);
1174}
1175
1176MachineInstrBuilder
1177MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1178 return buildInstr(Opcode: TargetOpcode::G_FENCE)
1179 .addImm(Val: Ordering)
1180 .addImm(Val: Scope);
1181}
1182
1183MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1184 unsigned RW,
1185 unsigned Locality,
1186 unsigned CacheType,
1187 MachineMemOperand &MMO) {
1188 auto MIB = buildInstr(Opcode: TargetOpcode::G_PREFETCH);
1189 Addr.addSrcToMIB(MIB);
1190 MIB.addImm(Val: RW).addImm(Val: Locality).addImm(Val: CacheType);
1191 MIB.addMemOperand(MMO: &MMO);
1192 return MIB;
1193}
1194
1195MachineInstrBuilder
1196MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1197#ifndef NDEBUG
1198 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1199#endif
1200
1201 return buildInstr(Opcode: TargetOpcode::G_BLOCK_ADDR).addDef(RegNo: Res).addBlockAddress(BA);
1202}
1203
1204void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1205 bool IsExtend) {
1206#ifndef NDEBUG
1207 if (DstTy.isVector()) {
1208 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1209 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1210 "different number of elements in a trunc/ext");
1211 } else
1212 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1213
1214 if (IsExtend)
1215 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1216 "invalid narrowing extend");
1217 else
1218 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1219 "invalid widening trunc");
1220#endif
1221}
1222
1223void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1224 const LLT Op0Ty, const LLT Op1Ty) {
1225#ifndef NDEBUG
1226 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1227 "invalid operand type");
1228 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1229 if (ResTy.isScalar() || ResTy.isPointer())
1230 assert(TstTy.isScalar() && "type mismatch");
1231 else
1232 assert((TstTy.isScalar() ||
1233 (TstTy.isVector() &&
1234 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1235 "type mismatch");
1236#endif
1237}
1238
1239MachineInstrBuilder
1240MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1241 ArrayRef<SrcOp> SrcOps,
1242 std::optional<unsigned> Flags) {
1243 switch (Opc) {
1244 default:
1245 break;
1246 case TargetOpcode::G_SELECT: {
1247 assert(DstOps.size() == 1 && "Invalid select");
1248 assert(SrcOps.size() == 3 && "Invalid select");
1249 validateSelectOp(
1250 ResTy: DstOps[0].getLLTTy(MRI: *getMRI()), TstTy: SrcOps[0].getLLTTy(MRI: *getMRI()),
1251 Op0Ty: SrcOps[1].getLLTTy(MRI: *getMRI()), Op1Ty: SrcOps[2].getLLTTy(MRI: *getMRI()));
1252 break;
1253 }
1254 case TargetOpcode::G_FNEG:
1255 case TargetOpcode::G_ABS:
1256 // All these are unary ops.
1257 assert(DstOps.size() == 1 && "Invalid Dst");
1258 assert(SrcOps.size() == 1 && "Invalid Srcs");
1259 validateUnaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1260 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()));
1261 break;
1262 case TargetOpcode::G_ADD:
1263 case TargetOpcode::G_AND:
1264 case TargetOpcode::G_MUL:
1265 case TargetOpcode::G_OR:
1266 case TargetOpcode::G_SUB:
1267 case TargetOpcode::G_XOR:
1268 case TargetOpcode::G_UDIV:
1269 case TargetOpcode::G_SDIV:
1270 case TargetOpcode::G_UREM:
1271 case TargetOpcode::G_SREM:
1272 case TargetOpcode::G_SMIN:
1273 case TargetOpcode::G_SMAX:
1274 case TargetOpcode::G_UMIN:
1275 case TargetOpcode::G_UMAX:
1276 case TargetOpcode::G_UADDSAT:
1277 case TargetOpcode::G_SADDSAT:
1278 case TargetOpcode::G_USUBSAT:
1279 case TargetOpcode::G_SSUBSAT: {
1280 // All these are binary ops.
1281 assert(DstOps.size() == 1 && "Invalid Dst");
1282 assert(SrcOps.size() == 2 && "Invalid Srcs");
1283 validateBinaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1284 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1285 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1286 break;
1287 }
1288 case TargetOpcode::G_SHL:
1289 case TargetOpcode::G_ASHR:
1290 case TargetOpcode::G_LSHR:
1291 case TargetOpcode::G_USHLSAT:
1292 case TargetOpcode::G_SSHLSAT: {
1293 assert(DstOps.size() == 1 && "Invalid Dst");
1294 assert(SrcOps.size() == 2 && "Invalid Srcs");
1295 validateShiftOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1296 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1297 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1298 break;
1299 }
1300 case TargetOpcode::G_SEXT:
1301 case TargetOpcode::G_ZEXT:
1302 case TargetOpcode::G_ANYEXT:
1303 assert(DstOps.size() == 1 && "Invalid Dst");
1304 assert(SrcOps.size() == 1 && "Invalid Srcs");
1305 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1306 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: true);
1307 break;
1308 case TargetOpcode::G_TRUNC:
1309 case TargetOpcode::G_FPTRUNC: {
1310 assert(DstOps.size() == 1 && "Invalid Dst");
1311 assert(SrcOps.size() == 1 && "Invalid Srcs");
1312 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1313 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: false);
1314 break;
1315 }
1316 case TargetOpcode::G_BITCAST: {
1317 assert(DstOps.size() == 1 && "Invalid Dst");
1318 assert(SrcOps.size() == 1 && "Invalid Srcs");
1319 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1320 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1321 break;
1322 }
1323 case TargetOpcode::COPY:
1324 assert(DstOps.size() == 1 && "Invalid Dst");
1325 // If the caller wants to add a subreg source it has to be done separately
1326 // so we may not have any SrcOps at this point yet.
1327 break;
1328 case TargetOpcode::G_FCMP:
1329 case TargetOpcode::G_ICMP: {
1330 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1331 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1332 // For F/ICMP, the first src operand is the predicate, followed by
1333 // the two comparands.
1334 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1335 "Expecting predicate");
1336 assert([&]() -> bool {
1337 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1338 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1339 : CmpInst::isFPPredicate(Pred);
1340 }() && "Invalid predicate");
1341 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1342 "Type mismatch");
1343 assert([&]() -> bool {
1344 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1345 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1346 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1347 return DstTy.isScalar();
1348 else
1349 return DstTy.isVector() &&
1350 DstTy.getElementCount() == Op0Ty.getElementCount();
1351 }() && "Type Mismatch");
1352 break;
1353 }
1354 case TargetOpcode::G_UNMERGE_VALUES: {
1355 assert(!DstOps.empty() && "Invalid trivial sequence");
1356 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1357 assert(llvm::all_of(DstOps,
1358 [&, this](const DstOp &Op) {
1359 return Op.getLLTTy(*getMRI()) ==
1360 DstOps[0].getLLTTy(*getMRI());
1361 }) &&
1362 "type mismatch in output list");
1363 assert((TypeSize::ScalarTy)DstOps.size() *
1364 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1365 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1366 "input operands do not cover output register");
1367 break;
1368 }
1369 case TargetOpcode::G_MERGE_VALUES: {
1370 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1371 assert(DstOps.size() == 1 && "Invalid Dst");
1372 assert(llvm::all_of(SrcOps,
1373 [&, this](const SrcOp &Op) {
1374 return Op.getLLTTy(*getMRI()) ==
1375 SrcOps[0].getLLTTy(*getMRI());
1376 }) &&
1377 "type mismatch in input list");
1378 assert((TypeSize::ScalarTy)SrcOps.size() *
1379 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1380 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1381 "input operands do not cover output register");
1382 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1383 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1384 break;
1385 }
1386 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1387 assert(DstOps.size() == 1 && "Invalid Dst size");
1388 assert(SrcOps.size() == 2 && "Invalid Src size");
1389 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1390 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1391 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1392 "Invalid operand type");
1393 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1394 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1395 DstOps[0].getLLTTy(*getMRI()) &&
1396 "Type mismatch");
1397 break;
1398 }
1399 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1400 assert(DstOps.size() == 1 && "Invalid dst size");
1401 assert(SrcOps.size() == 3 && "Invalid src size");
1402 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1403 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1404 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1405 SrcOps[1].getLLTTy(*getMRI()) &&
1406 "Type mismatch");
1407 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1408 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1409 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1410 "Type mismatch");
1411 break;
1412 }
1413 case TargetOpcode::G_BUILD_VECTOR: {
1414 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1415 "Must have at least 2 operands");
1416 assert(DstOps.size() == 1 && "Invalid DstOps");
1417 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1418 "Res type must be a vector");
1419 assert(llvm::all_of(SrcOps,
1420 [&, this](const SrcOp &Op) {
1421 return Op.getLLTTy(*getMRI()) ==
1422 SrcOps[0].getLLTTy(*getMRI());
1423 }) &&
1424 "type mismatch in input list");
1425 assert((TypeSize::ScalarTy)SrcOps.size() *
1426 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1427 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1428 "input scalars do not exactly cover the output vector register");
1429 break;
1430 }
1431 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1432 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1433 "Must have at least 2 operands");
1434 assert(DstOps.size() == 1 && "Invalid DstOps");
1435 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1436 "Res type must be a vector");
1437 assert(llvm::all_of(SrcOps,
1438 [&, this](const SrcOp &Op) {
1439 return Op.getLLTTy(*getMRI()) ==
1440 SrcOps[0].getLLTTy(*getMRI());
1441 }) &&
1442 "type mismatch in input list");
1443 break;
1444 }
1445 case TargetOpcode::G_CONCAT_VECTORS: {
1446 assert(DstOps.size() == 1 && "Invalid DstOps");
1447 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1448 "Must have at least 2 operands");
1449 assert(llvm::all_of(SrcOps,
1450 [&, this](const SrcOp &Op) {
1451 return (Op.getLLTTy(*getMRI()).isVector() &&
1452 Op.getLLTTy(*getMRI()) ==
1453 SrcOps[0].getLLTTy(*getMRI()));
1454 }) &&
1455 "type mismatch in input list");
1456 assert((TypeSize::ScalarTy)SrcOps.size() *
1457 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1458 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1459 "input vectors do not exactly cover the output vector register");
1460 break;
1461 }
1462 case TargetOpcode::G_UADDE: {
1463 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1464 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1465 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1466 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1467 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1468 "Invalid operand");
1469 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1470 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1471 "type mismatch");
1472 break;
1473 }
1474 }
1475
1476 auto MIB = buildInstr(Opcode: Opc);
1477 for (const DstOp &Op : DstOps)
1478 Op.addDefToMIB(MRI&: *getMRI(), MIB);
1479 for (const SrcOp &Op : SrcOps)
1480 Op.addSrcToMIB(MIB);
1481 if (Flags)
1482 MIB->setFlags(*Flags);
1483 return MIB;
1484}
1485