1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
11#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12#include "llvm/CodeGen/MachineFunction.h"
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetOpcodes.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21
22using namespace llvm;
23
24void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
40MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 return BuildMI(
42 MF&: getMF(),
43 MIMD: {getDL(), getPCSections(), getMMRAMetadata(), getDeactivationSymbol()},
44 MCID: getTII().get(Opcode));
45}
46
47MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
48 getMBB().insert(I: getInsertPt(), MI: MIB);
49 recordInsertion(InsertedInstr: MIB);
50 return MIB;
51}
52
53MachineInstrBuilder
54MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
62 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
66MachineInstrBuilder
67MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
75 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
79MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(MIB: buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE)
88 .addFrameIndex(Idx: FI)
89 .addImm(Val: 0)
90 .addMetadata(MD: Variable)
91 .addMetadata(MD: Expr));
92}
93
94MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(i_nocapture: 0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(Val: NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(Val: CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(Val: CI->getZExtValue());
116 else
117 MIB.addImm(Val: CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(Val: NumericConstant)) {
119 MIB.addFPImm(Val: CFP);
120 } else if (isa<ConstantPointerNull>(Val: NumericConstant)) {
121 MIB.addImm(Val: 0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(RegNo: Register());
125 }
126
127 MIB.addImm(Val: 0).addMetadata(MD: Variable).addMetadata(MD: Expr);
128 return insertInstr(MIB);
129}
130
131MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(Opcode: TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(MD: Label);
138}
139
140MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(Opcode: TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(MRI&: *getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Val: Alignment.value());
148 return MIB;
149}
150
151MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(Opcode: TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(MRI&: *getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
160MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
163 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(Opcode: TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(MRI&: *getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
173MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(Opcode: TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(MRI&: *getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
182MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
183 unsigned JTI) {
184 return buildInstr(Opc: TargetOpcode::G_JUMP_TABLE, DstOps: {PtrTy}, SrcOps: {})
185 .addJumpTableIndex(Idx: JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
193void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
205MachineInstrBuilder
206MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(Opc: TargetOpcode::G_PTR_ADD, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
213}
214
215MachineInstrBuilder MachineIRBuilder::buildObjectPtrOffset(const DstOp &Res,
216 const SrcOp &Op0,
217 const SrcOp &Op1) {
218 return buildPtrAdd(Res, Op0, Op1,
219 Flags: MachineInstr::MIFlag::NoUWrap |
220 MachineInstr::MIFlag::InBounds);
221}
222
223std::optional<MachineInstrBuilder>
224MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
235 Res = getMRI()->createGenericVirtualRegister(Ty: getMRI()->getType(Reg: Op0));
236 auto Cst = buildConstant(Res: ValueTy, Val: Value);
237 return buildPtrAdd(Res, Op0, Op1: Cst.getReg(Idx: 0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
243 Flags: MachineInstr::MIFlag::NoUWrap |
244 MachineInstr::MIFlag::InBounds);
245}
246
247MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(MRI: *getMRI());
251 LLT MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(Ty: MaskTy);
253 buildConstant(Res: MaskReg, Val: maskTrailingZeros<uint64_t>(N: NumBits));
254 return buildPtrMask(Res, Op0, Op1: MaskReg);
255}
256
257MachineInstrBuilder
258MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
261 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
265 SmallVector<Register, 8> Regs;
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Elt: Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Elt: Op0.getReg());
279 }
280 Register Undef =
281 buildUndef(Res: Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(Idx: 0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Elt: Undef);
285 return buildMergeLikeInstr(Res, Ops: Regs);
286}
287
288MachineInstrBuilder
289MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
292 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Op: Unmerge.getReg(Idx: 0));
306 SmallVector<Register, 8> Regs;
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Elt: Unmerge.getReg(Idx: i));
309 return buildMergeLikeInstr(Res, Ops: Regs);
310}
311
312MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
313 return buildInstr(Opcode: TargetOpcode::G_BR).addMBB(MBB: &Dest);
314}
315
316MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(Opcode: TargetOpcode::G_BRINDIRECT).addUse(RegNo: Tgt);
319}
320
321MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(Opcode: TargetOpcode::G_BRJT)
327 .addUse(RegNo: TablePtr)
328 .addJumpTableIndex(Idx: JTI)
329 .addUse(RegNo: IndexReg);
330}
331
332MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
333 const SrcOp &Op) {
334 return buildInstr(Opc: TargetOpcode::COPY, DstOps: Res, SrcOps: Op);
335}
336
337MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(MRI: *getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT)
350 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
351 .addCImm(Val: &Val);
352 return buildSplatBuildVector(Res, Src: Const);
353 }
354
355 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
358 Const.addCImm(Val: &Val);
359 return Const;
360}
361
362MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
363 int64_t Val) {
364 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
365 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
366 // TODO: Avoid implicit trunc?
367 // See https://github.com/llvm/llvm-project/issues/112510.
368 ConstantInt *CI = ConstantInt::getSigned(Ty: IntN, V: Val, /*implicitTrunc=*/ImplicitTrunc: true);
369 return buildConstant(Res, Val: *CI);
370}
371
372MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
373 const ConstantFP &Val) {
374 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
375 LLT Ty = Res.getLLTTy(MRI: *getMRI());
376 LLT EltTy = Ty.getScalarType();
377
378 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
379 == EltTy.getSizeInBits() &&
380 "creating fconstant with the wrong size");
381
382 assert(!Ty.isPointer() && "invalid operand type");
383
384 assert(!Ty.isScalableVector() &&
385 "unexpected scalable vector in buildFConstant");
386
387 if (Ty.isFixedVector()) {
388 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT)
389 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
390 .addFPImm(Val: &Val);
391
392 return buildSplatBuildVector(Res, Src: Const);
393 }
394
395 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT);
396 Const->setDebugLoc(DebugLoc());
397 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
398 Const.addFPImm(Val: &Val);
399 return Const;
400}
401
402MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
403 const APInt &Val) {
404 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(), V: Val);
405 return buildConstant(Res, Val: *CI);
406}
407
408MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
409 double Val) {
410 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
411 auto &Ctx = getMF().getFunction().getContext();
412 auto *CFP =
413 ConstantFP::get(Context&: Ctx, V: getAPFloatFromSize(Val, Size: DstTy.getScalarSizeInBits()));
414 return buildFConstant(Res, Val: *CFP);
415}
416
417MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
418 const APFloat &Val) {
419 auto &Ctx = getMF().getFunction().getContext();
420 auto *CFP = ConstantFP::get(Context&: Ctx, V: Val);
421 return buildFConstant(Res, Val: *CFP);
422}
423
424MachineInstrBuilder
425MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
426 const ConstantPtrAuth *CPA,
427 Register Addr, Register AddrDisc) {
428 auto MIB = buildInstr(Opcode: TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
429 Res.addDefToMIB(MRI&: *getMRI(), MIB);
430 MIB.addUse(RegNo: Addr);
431 MIB.addImm(Val: CPA->getKey()->getZExtValue());
432 MIB.addUse(RegNo: AddrDisc);
433 MIB.addImm(Val: CPA->getDiscriminator()->getZExtValue());
434 return MIB;
435}
436
437MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
438 MachineBasicBlock &Dest) {
439 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
440
441 auto MIB = buildInstr(Opcode: TargetOpcode::G_BRCOND);
442 Tst.addSrcToMIB(MIB);
443 MIB.addMBB(MBB: &Dest);
444 return MIB;
445}
446
447MachineInstrBuilder
448MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
449 MachinePointerInfo PtrInfo, Align Alignment,
450 MachineMemOperand::Flags MMOFlags,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOLoad;
453 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
454
455 LLT Ty = Dst.getLLTTy(MRI: *getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
458 return buildLoad(Res: Dst, Addr, MMO&: *MMO);
459}
460
461MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
462 const DstOp &Res,
463 const SrcOp &Addr,
464 MachineMemOperand &MMO) {
465 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
466 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
467
468 auto MIB = buildInstr(Opcode);
469 Res.addDefToMIB(MRI&: *getMRI(), MIB);
470 Addr.addSrcToMIB(MIB);
471 MIB.addMemOperand(MMO: &MMO);
472 return MIB;
473}
474
475MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
476 const DstOp &Dst, const SrcOp &BasePtr,
477 MachineMemOperand &BaseMMO, int64_t Offset) {
478 LLT LoadTy = Dst.getLLTTy(MRI: *getMRI());
479 MachineMemOperand *OffsetMMO =
480 getMF().getMachineMemOperand(MMO: &BaseMMO, Offset, Ty: LoadTy);
481
482 if (Offset == 0) // This may be a size or type changing load.
483 return buildLoad(Res: Dst, Addr: BasePtr, MMO&: *OffsetMMO);
484
485 LLT PtrTy = BasePtr.getLLTTy(MRI: *getMRI());
486 LLT OffsetTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
487 auto ConstOffset = buildConstant(Res: OffsetTy, Val: Offset);
488 auto Ptr = buildPtrAdd(Res: PtrTy, Op0: BasePtr, Op1: ConstOffset);
489 return buildLoad(Res: Dst, Addr: Ptr, MMO&: *OffsetMMO);
490}
491
492MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
493 const SrcOp &Addr,
494 MachineMemOperand &MMO) {
495 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
496 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
497
498 auto MIB = buildInstr(Opcode: TargetOpcode::G_STORE);
499 Val.addSrcToMIB(MIB);
500 Addr.addSrcToMIB(MIB);
501 MIB.addMemOperand(MMO: &MMO);
502 return MIB;
503}
504
505MachineInstrBuilder
506MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
507 MachinePointerInfo PtrInfo, Align Alignment,
508 MachineMemOperand::Flags MMOFlags,
509 const AAMDNodes &AAInfo) {
510 MMOFlags |= MachineMemOperand::MOStore;
511 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
512
513 LLT Ty = Val.getLLTTy(MRI: *getMRI());
514 MachineMemOperand *MMO =
515 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
516 return buildStore(Val, Addr, MMO&: *MMO);
517}
518
519MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
520 const SrcOp &Op) {
521 return buildInstr(Opc: TargetOpcode::G_ANYEXT, DstOps: Res, SrcOps: Op);
522}
523
524MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
525 const SrcOp &Op) {
526 return buildInstr(Opc: TargetOpcode::G_SEXT, DstOps: Res, SrcOps: Op);
527}
528
529MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
530 const SrcOp &Op,
531 std::optional<unsigned> Flags) {
532 return buildInstr(Opc: TargetOpcode::G_ZEXT, DstOps: Res, SrcOps: Op, Flags);
533}
534
535unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(isVec: IsVec, isFloat: IsFP)) {
538 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
539 return TargetOpcode::G_SEXT;
540 case TargetLoweringBase::ZeroOrOneBooleanContent:
541 return TargetOpcode::G_ZEXT;
542 default:
543 return TargetOpcode::G_ANYEXT;
544 }
545}
546
547MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
548 const SrcOp &Op,
549 bool IsFP) {
550 unsigned ExtOp = getBoolExtOp(IsVec: getMRI()->getType(Reg: Op.getReg()).isVector(), IsFP);
551 return buildInstr(Opc: ExtOp, DstOps: Res, SrcOps: Op);
552}
553
554MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
555 const SrcOp &Op,
556 bool IsVector,
557 bool IsFP) {
558 const auto *TLI = getMF().getSubtarget().getTargetLowering();
559 switch (TLI->getBooleanContents(isVec: IsVector, isFloat: IsFP)) {
560 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
561 return buildSExtInReg(Res, Op, ImmOp: 1);
562 case TargetLoweringBase::ZeroOrOneBooleanContent:
563 return buildZExtInReg(Res, Op, ImmOp: 1);
564 case TargetLoweringBase::UndefinedBooleanContent:
565 return buildCopy(Res, Op);
566 }
567
568 llvm_unreachable("unexpected BooleanContent");
569}
570
571MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
572 const DstOp &Res,
573 const SrcOp &Op) {
574 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
575 TargetOpcode::G_SEXT == ExtOpc) &&
576 "Expecting Extending Opc");
577 assert(Res.getLLTTy(*getMRI()).isScalar() ||
578 Res.getLLTTy(*getMRI()).isVector());
579 assert(Res.getLLTTy(*getMRI()).isScalar() ==
580 Op.getLLTTy(*getMRI()).isScalar());
581
582 unsigned Opcode = TargetOpcode::COPY;
583 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() >
584 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
585 Opcode = ExtOpc;
586 else if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() <
587 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
588 Opcode = TargetOpcode::G_TRUNC;
589 else
590 assert(Res.getLLTTy(*getMRI()).getSizeInBits() ==
591 Op.getLLTTy(*getMRI()).getSizeInBits());
592
593 return buildInstr(Opc: Opcode, DstOps: Res, SrcOps: Op);
594}
595
596MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
597 const SrcOp &Op) {
598 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_SEXT, Res, Op);
599}
600
601MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
602 const SrcOp &Op) {
603 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ZEXT, Res, Op);
604}
605
606MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
607 const SrcOp &Op) {
608 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ANYEXT, Res, Op);
609}
610
611MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
612 const SrcOp &Op,
613 int64_t ImmOp) {
614 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
615 auto Mask = buildConstant(
616 Res: ResTy, Val: APInt::getLowBitsSet(numBits: ResTy.getScalarSizeInBits(), loBitsSet: ImmOp));
617 return buildAnd(Dst: Res, Src0: Op, Src1: Mask);
618}
619
620MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
621 const SrcOp &Src) {
622 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
623 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
624 if (SrcTy == DstTy)
625 return buildCopy(Res: Dst, Op: Src);
626
627 unsigned Opcode;
628 if (SrcTy.isPointerOrPointerVector())
629 Opcode = TargetOpcode::G_PTRTOINT;
630 else if (DstTy.isPointerOrPointerVector())
631 Opcode = TargetOpcode::G_INTTOPTR;
632 else {
633 assert(!SrcTy.isPointerOrPointerVector() &&
634 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
635 Opcode = TargetOpcode::G_BITCAST;
636 }
637
638 return buildInstr(Opc: Opcode, DstOps: Dst, SrcOps: Src);
639}
640
641MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
642 const SrcOp &Src,
643 uint64_t Index) {
644 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
645 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
646
647#ifndef NDEBUG
648 assert(SrcTy.isValid() && "invalid operand type");
649 assert(DstTy.isValid() && "invalid operand type");
650 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
651 "extracting off end of register");
652#endif
653
654 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
655 assert(Index == 0 && "insertion past the end of a register");
656 return buildCast(Dst, Src);
657 }
658
659 auto Extract = buildInstr(Opcode: TargetOpcode::G_EXTRACT);
660 Dst.addDefToMIB(MRI&: *getMRI(), MIB&: Extract);
661 Src.addSrcToMIB(MIB&: Extract);
662 Extract.addImm(Val: Index);
663 return Extract;
664}
665
666MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
667 return buildInstr(Opc: TargetOpcode::G_IMPLICIT_DEF, DstOps: {Res}, SrcOps: {});
668}
669
670MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
671 ArrayRef<Register> Ops) {
672 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
673 // we need some temporary storage for the DstOp objects. Here we use a
674 // sufficiently large SmallVector to not go through the heap.
675 SmallVector<SrcOp, 8> TmpVec(Ops);
676 assert(TmpVec.size() > 1);
677 return buildInstr(Opc: TargetOpcode::G_MERGE_VALUES, DstOps: Res, SrcOps: TmpVec);
678}
679
680MachineInstrBuilder
681MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
682 ArrayRef<Register> Ops) {
683 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
684 // we need some temporary storage for the DstOp objects. Here we use a
685 // sufficiently large SmallVector to not go through the heap.
686 SmallVector<SrcOp, 8> TmpVec(Ops);
687 assert(TmpVec.size() > 1);
688 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: TmpVec), DstOps: Res, SrcOps: TmpVec);
689}
690
691MachineInstrBuilder
692MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
693 std::initializer_list<SrcOp> Ops) {
694 assert(Ops.size() > 1);
695 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: Ops), DstOps: Res, SrcOps: Ops);
696}
697
698unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
699 ArrayRef<SrcOp> SrcOps) const {
700 if (DstOp.getLLTTy(MRI: *getMRI()).isVector()) {
701 if (SrcOps[0].getLLTTy(MRI: *getMRI()).isVector())
702 return TargetOpcode::G_CONCAT_VECTORS;
703 return TargetOpcode::G_BUILD_VECTOR;
704 }
705
706 return TargetOpcode::G_MERGE_VALUES;
707}
708
709MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
710 const SrcOp &Op) {
711 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
712 // we need some temporary storage for the DstOp objects. Here we use a
713 // sufficiently large SmallVector to not go through the heap.
714 SmallVector<DstOp, 8> TmpVec(Res);
715 assert(TmpVec.size() > 1);
716 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
717}
718
719MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
720 const SrcOp &Op) {
721 unsigned NumReg = Op.getLLTTy(MRI: *getMRI()).getSizeInBits() / Res.getSizeInBits();
722 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
723 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
724}
725
726MachineInstrBuilder
727MachineIRBuilder::buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,
728 const SrcOp &Op) {
729 LLT OpTy = Op.getLLTTy(MRI: *getMRI());
730 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
731 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
732 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
733}
734
735MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
736 const SrcOp &Op) {
737 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
738 // we need some temporary storage for the DstOp objects. Here we use a
739 // sufficiently large SmallVector to not go through the heap.
740 SmallVector<DstOp, 8> TmpVec(Res);
741 assert(TmpVec.size() > 1);
742 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
743}
744
745MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
746 ArrayRef<Register> Ops) {
747 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
748 // we need some temporary storage for the DstOp objects. Here we use a
749 // sufficiently large SmallVector to not go through the heap.
750 SmallVector<SrcOp, 8> TmpVec(Ops);
751 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
752}
753
754MachineInstrBuilder
755MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
756 ArrayRef<APInt> Ops) {
757 SmallVector<SrcOp> TmpVec;
758 TmpVec.reserve(N: Ops.size());
759 LLT EltTy = Res.getLLTTy(MRI: *getMRI()).getElementType();
760 for (const auto &Op : Ops)
761 TmpVec.push_back(Elt: buildConstant(Res: EltTy, Val: Op));
762 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
763}
764
765MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
766 const SrcOp &Src) {
767 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(MRI: *getMRI()).getNumElements(), Src);
768 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
769}
770
771MachineInstrBuilder
772MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
773 ArrayRef<Register> Ops) {
774 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
775 // we need some temporary storage for the DstOp objects. Here we use a
776 // sufficiently large SmallVector to not go through the heap.
777 SmallVector<SrcOp, 8> TmpVec(Ops);
778 if (TmpVec[0].getLLTTy(MRI: *getMRI()).getSizeInBits() ==
779 Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits())
780 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
781 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR_TRUNC, DstOps: Res, SrcOps: TmpVec);
782}
783
784MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
785 const SrcOp &Src) {
786 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
787 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
788 "Expected Src to match Dst elt ty");
789 auto UndefVec = buildUndef(Res: DstTy);
790 auto Zero = buildConstant(Res: LLT::integer(SizeInBits: 64), Val: 0);
791 auto InsElt = buildInsertVectorElement(Res: DstTy, Val: UndefVec, Elt: Src, Idx: Zero);
792 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
793 return buildShuffleVector(Res: DstTy, Src1: InsElt, Src2: UndefVec, Mask: ZeroMask);
794}
795
796MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
797 const SrcOp &Src) {
798 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
799 "Expected Src to match Dst elt ty");
800 return buildInstr(Opc: TargetOpcode::G_SPLAT_VECTOR, DstOps: Res, SrcOps: Src);
801}
802
803MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
804 const SrcOp &Src1,
805 const SrcOp &Src2,
806 ArrayRef<int> Mask) {
807 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
808 LLT Src1Ty = Src1.getLLTTy(MRI: *getMRI());
809 LLT Src2Ty = Src2.getLLTTy(MRI: *getMRI());
810 const LLT DstElemTy = DstTy.getScalarType();
811 const LLT ElemTy1 = Src1Ty.getScalarType();
812 const LLT ElemTy2 = Src2Ty.getScalarType();
813 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
814 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
815 (void)DstElemTy;
816 (void)ElemTy1;
817 (void)ElemTy2;
818 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
819 return buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {Res}, SrcOps: {Src1, Src2})
820 .addShuffleMask(Val: MaskAlloc);
821}
822
823MachineInstrBuilder
824MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
825 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
826 // we need some temporary storage for the DstOp objects. Here we use a
827 // sufficiently large SmallVector to not go through the heap.
828 SmallVector<SrcOp, 8> TmpVec(Ops);
829 return buildInstr(Opc: TargetOpcode::G_CONCAT_VECTORS, DstOps: Res, SrcOps: TmpVec);
830}
831
832MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
833 const SrcOp &Src,
834 const SrcOp &Op,
835 unsigned Index) {
836 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
837 Res.getLLTTy(*getMRI()).getSizeInBits() &&
838 "insertion past the end of a register");
839
840 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() ==
841 Op.getLLTTy(MRI: *getMRI()).getSizeInBits()) {
842 return buildCast(Dst: Res, Src: Op);
843 }
844
845 return buildInstr(Opc: TargetOpcode::G_INSERT, DstOps: Res, SrcOps: {Src, Op, uint64_t(Index)});
846}
847
848MachineInstrBuilder MachineIRBuilder::buildStepVector(const DstOp &Res,
849 unsigned Step) {
850 unsigned Bitwidth = Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits();
851 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(),
852 V: APInt(Bitwidth, Step));
853 auto StepVector = buildInstr(Opcode: TargetOpcode::G_STEP_VECTOR);
854 StepVector->setDebugLoc(DebugLoc());
855 Res.addDefToMIB(MRI&: *getMRI(), MIB&: StepVector);
856 StepVector.addCImm(Val: CI);
857 return StepVector;
858}
859
860MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
861 unsigned MinElts) {
862
863 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
864 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
865 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: MinElts);
866 return buildVScale(Res, MinElts: *CI);
867}
868
869MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
870 const ConstantInt &MinElts) {
871 auto VScale = buildInstr(Opcode: TargetOpcode::G_VSCALE);
872 VScale->setDebugLoc(DebugLoc());
873 Res.addDefToMIB(MRI&: *getMRI(), MIB&: VScale);
874 VScale.addCImm(Val: &MinElts);
875 return VScale;
876}
877
878MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
879 const APInt &MinElts) {
880 ConstantInt *CI =
881 ConstantInt::get(Context&: getMF().getFunction().getContext(), V: MinElts);
882 return buildVScale(Res, MinElts: *CI);
883}
884
885static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
886 if (HasSideEffects && IsConvergent)
887 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
888 if (HasSideEffects)
889 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
890 if (IsConvergent)
891 return TargetOpcode::G_INTRINSIC_CONVERGENT;
892 return TargetOpcode::G_INTRINSIC;
893}
894
895MachineInstrBuilder
896MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
897 ArrayRef<Register> ResultRegs,
898 bool HasSideEffects, bool isConvergent) {
899 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
900 for (Register ResultReg : ResultRegs)
901 MIB.addDef(RegNo: ResultReg);
902 MIB.addIntrinsicID(ID);
903 return MIB;
904}
905
906MachineInstrBuilder
907MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
908 ArrayRef<Register> ResultRegs) {
909 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
910 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
911 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
912 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
913}
914
915MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
916 ArrayRef<DstOp> Results,
917 bool HasSideEffects,
918 bool isConvergent) {
919 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
920 for (DstOp Result : Results)
921 Result.addDefToMIB(MRI&: *getMRI(), MIB);
922 MIB.addIntrinsicID(ID);
923 return MIB;
924}
925
926MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
927 ArrayRef<DstOp> Results) {
928 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
929 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
930 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
931 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
932}
933
934MachineInstrBuilder
935MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
936 std::optional<unsigned> Flags) {
937 return buildInstr(Opc: TargetOpcode::G_TRUNC, DstOps: Res, SrcOps: Op, Flags);
938}
939
940MachineInstrBuilder
941MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
942 std::optional<unsigned> Flags) {
943 return buildInstr(Opc: TargetOpcode::G_FPTRUNC, DstOps: Res, SrcOps: Op, Flags);
944}
945
946MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
947 const DstOp &Res,
948 const SrcOp &Op0,
949 const SrcOp &Op1,
950 std::optional<unsigned> Flags) {
951 return buildInstr(Opc: TargetOpcode::G_ICMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
952}
953
954MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
955 const DstOp &Res,
956 const SrcOp &Op0,
957 const SrcOp &Op1,
958 std::optional<unsigned> Flags) {
959
960 return buildInstr(Opc: TargetOpcode::G_FCMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
961}
962
963MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
964 const SrcOp &Op0,
965 const SrcOp &Op1) {
966 return buildInstr(Opc: TargetOpcode::G_SCMP, DstOps: Res, SrcOps: {Op0, Op1});
967}
968
969MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
970 const SrcOp &Op0,
971 const SrcOp &Op1) {
972 return buildInstr(Opc: TargetOpcode::G_UCMP, DstOps: Res, SrcOps: {Op0, Op1});
973}
974
975MachineInstrBuilder
976MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
977 const SrcOp &Op0, const SrcOp &Op1,
978 std::optional<unsigned> Flags) {
979
980 return buildInstr(Opc: TargetOpcode::G_SELECT, DstOps: {Res}, SrcOps: {Tst, Op0, Op1}, Flags);
981}
982
983MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
984 const SrcOp &Src0,
985 const SrcOp &Src1,
986 unsigned Idx) {
987 return buildInstr(Opc: TargetOpcode::G_INSERT_SUBVECTOR, DstOps: Res,
988 SrcOps: {Src0, Src1, uint64_t(Idx)});
989}
990
991MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
992 const SrcOp &Src,
993 unsigned Idx) {
994 return buildInstr(Opc: TargetOpcode::G_EXTRACT_SUBVECTOR, DstOps: Res,
995 SrcOps: {Src, uint64_t(Idx)});
996}
997
998MachineInstrBuilder
999MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
1000 const SrcOp &Elt, const SrcOp &Idx) {
1001 return buildInstr(Opc: TargetOpcode::G_INSERT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Elt, Idx});
1002}
1003
1004MachineInstrBuilder
1005MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
1006 const SrcOp &Idx) {
1007 return buildInstr(Opc: TargetOpcode::G_EXTRACT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Idx});
1008}
1009
1010MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
1011 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1012 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1013#ifndef NDEBUG
1014 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1015 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1016 LLT AddrTy = Addr.getLLTTy(*getMRI());
1017 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1018 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1019 assert(OldValResTy.isScalar() && "invalid operand type");
1020 assert(SuccessResTy.isScalar() && "invalid operand type");
1021 assert(AddrTy.isPointer() && "invalid operand type");
1022 assert(CmpValTy.isValid() && "invalid operand type");
1023 assert(NewValTy.isValid() && "invalid operand type");
1024 assert(OldValResTy == CmpValTy && "type mismatch");
1025 assert(OldValResTy == NewValTy && "type mismatch");
1026#endif
1027
1028 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1029 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1030 SuccessRes.addDefToMIB(MRI&: *getMRI(), MIB);
1031 Addr.addSrcToMIB(MIB);
1032 CmpVal.addSrcToMIB(MIB);
1033 NewVal.addSrcToMIB(MIB);
1034 MIB.addMemOperand(MMO: &MMO);
1035 return MIB;
1036}
1037
1038MachineInstrBuilder
1039MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
1040 const SrcOp &CmpVal, const SrcOp &NewVal,
1041 MachineMemOperand &MMO) {
1042#ifndef NDEBUG
1043 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1044 LLT AddrTy = Addr.getLLTTy(*getMRI());
1045 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1046 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1047 assert(OldValResTy.isScalar() && "invalid operand type");
1048 assert(AddrTy.isPointer() && "invalid operand type");
1049 assert(CmpValTy.isValid() && "invalid operand type");
1050 assert(NewValTy.isValid() && "invalid operand type");
1051 assert(OldValResTy == CmpValTy && "type mismatch");
1052 assert(OldValResTy == NewValTy && "type mismatch");
1053#endif
1054
1055 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG);
1056 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1057 Addr.addSrcToMIB(MIB);
1058 CmpVal.addSrcToMIB(MIB);
1059 NewVal.addSrcToMIB(MIB);
1060 MIB.addMemOperand(MMO: &MMO);
1061 return MIB;
1062}
1063
1064MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1065 unsigned Opcode, const DstOp &OldValRes,
1066 const SrcOp &Addr, const SrcOp &Val,
1067 MachineMemOperand &MMO) {
1068
1069#ifndef NDEBUG
1070 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1071 LLT AddrTy = Addr.getLLTTy(*getMRI());
1072 LLT ValTy = Val.getLLTTy(*getMRI());
1073 assert(AddrTy.isPointer() && "invalid operand type");
1074 assert(ValTy.isValid() && "invalid operand type");
1075 assert(OldValResTy == ValTy && "type mismatch");
1076 assert(MMO.isAtomic() && "not atomic mem operand");
1077#endif
1078
1079 auto MIB = buildInstr(Opcode);
1080 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1081 Addr.addSrcToMIB(MIB);
1082 Val.addSrcToMIB(MIB);
1083 MIB.addMemOperand(MMO: &MMO);
1084 return MIB;
1085}
1086
1087MachineInstrBuilder
1088MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1089 Register Val, MachineMemOperand &MMO) {
1090 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1091 MMO);
1092}
1093MachineInstrBuilder
1094MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1095 Register Val, MachineMemOperand &MMO) {
1096 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1097 MMO);
1098}
1099MachineInstrBuilder
1100MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1101 Register Val, MachineMemOperand &MMO) {
1102 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1103 MMO);
1104}
1105MachineInstrBuilder
1106MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1107 Register Val, MachineMemOperand &MMO) {
1108 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1109 MMO);
1110}
1111MachineInstrBuilder
1112MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1113 Register Val, MachineMemOperand &MMO) {
1114 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1115 MMO);
1116}
1117MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1118 Register Addr,
1119 Register Val,
1120 MachineMemOperand &MMO) {
1121 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1122 MMO);
1123}
1124MachineInstrBuilder
1125MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1126 Register Val, MachineMemOperand &MMO) {
1127 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1128 MMO);
1129}
1130MachineInstrBuilder
1131MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1132 Register Val, MachineMemOperand &MMO) {
1133 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1134 MMO);
1135}
1136MachineInstrBuilder
1137MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1138 Register Val, MachineMemOperand &MMO) {
1139 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1140 MMO);
1141}
1142MachineInstrBuilder
1143MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1144 Register Val, MachineMemOperand &MMO) {
1145 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1146 MMO);
1147}
1148MachineInstrBuilder
1149MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1150 Register Val, MachineMemOperand &MMO) {
1151 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1152 MMO);
1153}
1154
1155MachineInstrBuilder
1156MachineIRBuilder::buildAtomicRMWFAdd(
1157 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1158 MachineMemOperand &MMO) {
1159 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1160 MMO);
1161}
1162
1163MachineInstrBuilder
1164MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1165 MachineMemOperand &MMO) {
1166 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1167 MMO);
1168}
1169
1170MachineInstrBuilder
1171MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1172 const SrcOp &Val, MachineMemOperand &MMO) {
1173 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1174 MMO);
1175}
1176
1177MachineInstrBuilder
1178MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1179 const SrcOp &Val, MachineMemOperand &MMO) {
1180 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1181 MMO);
1182}
1183
1184MachineInstrBuilder
1185MachineIRBuilder::buildAtomicRMWFMaximum(const DstOp &OldValRes,
1186 const SrcOp &Addr, const SrcOp &Val,
1187 MachineMemOperand &MMO) {
1188 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1189 Val, MMO);
1190}
1191
1192MachineInstrBuilder
1193MachineIRBuilder::buildAtomicRMWFMinimum(const DstOp &OldValRes,
1194 const SrcOp &Addr, const SrcOp &Val,
1195 MachineMemOperand &MMO) {
1196 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1197 Val, MMO);
1198}
1199
1200MachineInstrBuilder
1201MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1202 return buildInstr(Opcode: TargetOpcode::G_FENCE)
1203 .addImm(Val: Ordering)
1204 .addImm(Val: Scope);
1205}
1206
1207MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1208 unsigned RW,
1209 unsigned Locality,
1210 unsigned CacheType,
1211 MachineMemOperand &MMO) {
1212 auto MIB = buildInstr(Opcode: TargetOpcode::G_PREFETCH);
1213 Addr.addSrcToMIB(MIB);
1214 MIB.addImm(Val: RW).addImm(Val: Locality).addImm(Val: CacheType);
1215 MIB.addMemOperand(MMO: &MMO);
1216 return MIB;
1217}
1218
1219MachineInstrBuilder
1220MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1221#ifndef NDEBUG
1222 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1223#endif
1224
1225 return buildInstr(Opcode: TargetOpcode::G_BLOCK_ADDR).addDef(RegNo: Res).addBlockAddress(BA);
1226}
1227
1228void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1229 bool IsExtend) {
1230#ifndef NDEBUG
1231 if (DstTy.isVector()) {
1232 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1233 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1234 "different number of elements in a trunc/ext");
1235 } else
1236 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1237
1238 if (IsExtend)
1239 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1240 "invalid narrowing extend");
1241 else
1242 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1243 "invalid widening trunc");
1244#endif
1245}
1246
1247void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1248 const LLT Op0Ty, const LLT Op1Ty) {
1249#ifndef NDEBUG
1250 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1251 "invalid operand type");
1252 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1253 if (ResTy.isScalar() || ResTy.isPointer())
1254 assert(TstTy.isScalar() && "type mismatch");
1255 else
1256 assert((TstTy.isScalar() ||
1257 (TstTy.isVector() &&
1258 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1259 "type mismatch");
1260#endif
1261}
1262
1263MachineInstrBuilder
1264MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1265 ArrayRef<SrcOp> SrcOps,
1266 std::optional<unsigned> Flags) {
1267 switch (Opc) {
1268 default:
1269 break;
1270 case TargetOpcode::G_SELECT: {
1271 assert(DstOps.size() == 1 && "Invalid select");
1272 assert(SrcOps.size() == 3 && "Invalid select");
1273 validateSelectOp(
1274 ResTy: DstOps[0].getLLTTy(MRI: *getMRI()), TstTy: SrcOps[0].getLLTTy(MRI: *getMRI()),
1275 Op0Ty: SrcOps[1].getLLTTy(MRI: *getMRI()), Op1Ty: SrcOps[2].getLLTTy(MRI: *getMRI()));
1276 break;
1277 }
1278 case TargetOpcode::G_FNEG:
1279 case TargetOpcode::G_ABS:
1280 // All these are unary ops.
1281 assert(DstOps.size() == 1 && "Invalid Dst");
1282 assert(SrcOps.size() == 1 && "Invalid Srcs");
1283 validateUnaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1284 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()));
1285 break;
1286 case TargetOpcode::G_ADD:
1287 case TargetOpcode::G_AND:
1288 case TargetOpcode::G_MUL:
1289 case TargetOpcode::G_OR:
1290 case TargetOpcode::G_SUB:
1291 case TargetOpcode::G_XOR:
1292 case TargetOpcode::G_UDIV:
1293 case TargetOpcode::G_SDIV:
1294 case TargetOpcode::G_UREM:
1295 case TargetOpcode::G_SREM:
1296 case TargetOpcode::G_SMIN:
1297 case TargetOpcode::G_SMAX:
1298 case TargetOpcode::G_UMIN:
1299 case TargetOpcode::G_UMAX:
1300 case TargetOpcode::G_UADDSAT:
1301 case TargetOpcode::G_SADDSAT:
1302 case TargetOpcode::G_USUBSAT:
1303 case TargetOpcode::G_SSUBSAT: {
1304 // All these are binary ops.
1305 assert(DstOps.size() == 1 && "Invalid Dst");
1306 assert(SrcOps.size() == 2 && "Invalid Srcs");
1307 validateBinaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1308 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1309 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1310 break;
1311 }
1312 case TargetOpcode::G_SHL:
1313 case TargetOpcode::G_ASHR:
1314 case TargetOpcode::G_LSHR:
1315 case TargetOpcode::G_USHLSAT:
1316 case TargetOpcode::G_SSHLSAT: {
1317 assert(DstOps.size() == 1 && "Invalid Dst");
1318 assert(SrcOps.size() == 2 && "Invalid Srcs");
1319 validateShiftOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1320 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1321 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1322 break;
1323 }
1324 case TargetOpcode::G_SEXT:
1325 case TargetOpcode::G_ZEXT:
1326 case TargetOpcode::G_ANYEXT:
1327 assert(DstOps.size() == 1 && "Invalid Dst");
1328 assert(SrcOps.size() == 1 && "Invalid Srcs");
1329 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1330 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: true);
1331 break;
1332 case TargetOpcode::G_TRUNC:
1333 case TargetOpcode::G_FPTRUNC: {
1334 assert(DstOps.size() == 1 && "Invalid Dst");
1335 assert(SrcOps.size() == 1 && "Invalid Srcs");
1336 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1337 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: false);
1338 break;
1339 }
1340 case TargetOpcode::G_BITCAST: {
1341 assert(DstOps.size() == 1 && "Invalid Dst");
1342 assert(SrcOps.size() == 1 && "Invalid Srcs");
1343 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1344 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1345 break;
1346 }
1347 case TargetOpcode::COPY:
1348 assert(DstOps.size() == 1 && "Invalid Dst");
1349 // If the caller wants to add a subreg source it has to be done separately
1350 // so we may not have any SrcOps at this point yet.
1351 break;
1352 case TargetOpcode::G_FCMP:
1353 case TargetOpcode::G_ICMP: {
1354 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1355 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1356 // For F/ICMP, the first src operand is the predicate, followed by
1357 // the two comparands.
1358 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1359 "Expecting predicate");
1360 assert([&]() -> bool {
1361 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1362 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1363 : CmpInst::isFPPredicate(Pred);
1364 }() && "Invalid predicate");
1365 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1366 "Type mismatch");
1367 assert([&]() -> bool {
1368 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1369 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1370 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1371 return DstTy.isScalar();
1372 else
1373 return DstTy.isVector() &&
1374 DstTy.getElementCount() == Op0Ty.getElementCount();
1375 }() && "Type Mismatch");
1376 break;
1377 }
1378 case TargetOpcode::G_UNMERGE_VALUES: {
1379 assert(!DstOps.empty() && "Invalid trivial sequence");
1380 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1381 assert(llvm::all_of(DstOps,
1382 [&, this](const DstOp &Op) {
1383 return Op.getLLTTy(*getMRI()) ==
1384 DstOps[0].getLLTTy(*getMRI());
1385 }) &&
1386 "type mismatch in output list");
1387 assert((TypeSize::ScalarTy)DstOps.size() *
1388 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1389 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1390 "input operands do not cover output register");
1391 break;
1392 }
1393 case TargetOpcode::G_MERGE_VALUES: {
1394 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1395 assert(DstOps.size() == 1 && "Invalid Dst");
1396 assert(llvm::all_of(SrcOps,
1397 [&, this](const SrcOp &Op) {
1398 return Op.getLLTTy(*getMRI()) ==
1399 SrcOps[0].getLLTTy(*getMRI());
1400 }) &&
1401 "type mismatch in input list");
1402 assert((TypeSize::ScalarTy)SrcOps.size() *
1403 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1404 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1405 "input operands do not cover output register");
1406 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1407 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1408 break;
1409 }
1410 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1411 assert(DstOps.size() == 1 && "Invalid Dst size");
1412 assert(SrcOps.size() == 2 && "Invalid Src size");
1413 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1414 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1415 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1416 "Invalid operand type");
1417 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1418 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1419 DstOps[0].getLLTTy(*getMRI()) &&
1420 "Type mismatch");
1421 break;
1422 }
1423 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1424 assert(DstOps.size() == 1 && "Invalid dst size");
1425 assert(SrcOps.size() == 3 && "Invalid src size");
1426 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1427 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1428 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1429 SrcOps[1].getLLTTy(*getMRI()) &&
1430 "Type mismatch");
1431 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1432 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1433 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1434 "Type mismatch");
1435 break;
1436 }
1437 case TargetOpcode::G_BUILD_VECTOR: {
1438 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1439 "Must have at least 2 operands");
1440 assert(DstOps.size() == 1 && "Invalid DstOps");
1441 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1442 "Res type must be a vector");
1443 assert(llvm::all_of(SrcOps,
1444 [&, this](const SrcOp &Op) {
1445 return Op.getLLTTy(*getMRI()) ==
1446 SrcOps[0].getLLTTy(*getMRI());
1447 }) &&
1448 "type mismatch in input list");
1449 assert((TypeSize::ScalarTy)SrcOps.size() *
1450 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1451 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1452 "input scalars do not exactly cover the output vector register");
1453 break;
1454 }
1455 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1456 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1457 "Must have at least 2 operands");
1458 assert(DstOps.size() == 1 && "Invalid DstOps");
1459 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1460 "Res type must be a vector");
1461 assert(llvm::all_of(SrcOps,
1462 [&, this](const SrcOp &Op) {
1463 return Op.getLLTTy(*getMRI()) ==
1464 SrcOps[0].getLLTTy(*getMRI());
1465 }) &&
1466 "type mismatch in input list");
1467 break;
1468 }
1469 case TargetOpcode::G_CONCAT_VECTORS: {
1470 assert(DstOps.size() == 1 && "Invalid DstOps");
1471 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1472 "Must have at least 2 operands");
1473 assert(llvm::all_of(SrcOps,
1474 [&, this](const SrcOp &Op) {
1475 return (Op.getLLTTy(*getMRI()).isVector() &&
1476 Op.getLLTTy(*getMRI()) ==
1477 SrcOps[0].getLLTTy(*getMRI()));
1478 }) &&
1479 "type mismatch in input list");
1480 assert((TypeSize::ScalarTy)SrcOps.size() *
1481 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1482 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1483 "input vectors do not exactly cover the output vector register");
1484 break;
1485 }
1486 case TargetOpcode::G_UADDE: {
1487 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1488 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1489 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1490 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1491 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1492 "Invalid operand");
1493 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1494 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1495 "type mismatch");
1496 break;
1497 }
1498 }
1499
1500 auto MIB = buildInstr(Opcode: Opc);
1501 for (const DstOp &Op : DstOps)
1502 Op.addDefToMIB(MRI&: *getMRI(), MIB);
1503 for (const SrcOp &Op : SrcOps)
1504 Op.addSrcToMIB(MIB);
1505 if (Flags)
1506 MIB->setFlags(*Flags);
1507 return MIB;
1508}
1509