1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
11#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12#include "llvm/CodeGen/MachineFunction.h"
13#include "llvm/CodeGen/MachineInstr.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetOpcodes.h"
19#include "llvm/CodeGen/TargetSubtargetInfo.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21
22using namespace llvm;
23
24void MachineIRBuilder::setMF(MachineFunction &MF) {
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
40MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 return BuildMI(
42 MF&: getMF(),
43 MIMD: {getDL(), getPCSections(), getMMRAMetadata(), getDeactivationSymbol()},
44 MCID: getTII().get(Opcode));
45}
46
47MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
48 getMBB().insert(I: getInsertPt(), MI: MIB);
49 recordInsertion(InsertedInstr: MIB);
50 return MIB;
51}
52
53MachineInstrBuilder
54MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
62 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
66MachineInstrBuilder
67MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(MIB: BuildMI(MF&: getMF(), DL: getDL(),
75 MCID: getTII().get(Opcode: TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
79MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(MIB: buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE)
88 .addFrameIndex(Idx: FI)
89 .addImm(Val: 0)
90 .addMetadata(MD: Variable)
91 .addMetadata(MD: Expr));
92}
93
94MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(Opcode: TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(Val: &C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(i_nocapture: 0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(Val: NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(Val: CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(Val: CI->getZExtValue());
116 else
117 MIB.addImm(Val: CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(Val: NumericConstant)) {
119 MIB.addFPImm(Val: CFP);
120 } else if (isa<ConstantPointerNull>(Val: NumericConstant)) {
121 MIB.addImm(Val: 0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(RegNo: Register());
125 }
126
127 MIB.addImm(Val: 0).addMetadata(MD: Variable).addMetadata(MD: Expr);
128 return insertInstr(MIB);
129}
130
131MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(Opcode: TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(MD: Label);
138}
139
140MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(Opcode: TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(MRI&: *getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Val: Alignment.value());
148 return MIB;
149}
150
151MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(Opcode: TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(MRI&: *getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
160MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
163 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(Opcode: TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(MRI&: *getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
173MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(Opcode: TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(MRI&: *getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
182MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
183 unsigned JTI) {
184 return buildInstr(Opc: TargetOpcode::G_JUMP_TABLE, DstOps: {PtrTy}, SrcOps: {})
185 .addJumpTableIndex(Idx: JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
193void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
205MachineInstrBuilder
206MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(Opc: TargetOpcode::G_PTR_ADD, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
213}
214
215MachineInstrBuilder MachineIRBuilder::buildObjectPtrOffset(const DstOp &Res,
216 const SrcOp &Op0,
217 const SrcOp &Op1) {
218 return buildPtrAdd(Res, Op0, Op1,
219 Flags: MachineInstr::MIFlag::NoUWrap |
220 MachineInstr::MIFlag::InBounds);
221}
222
223std::optional<MachineInstrBuilder>
224MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
235 Res = getMRI()->createGenericVirtualRegister(Ty: getMRI()->getType(Reg: Op0));
236 auto Cst = buildConstant(Res: ValueTy, Val: Value);
237 return buildPtrAdd(Res, Op0, Op1: Cst.getReg(Idx: 0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
243 Flags: MachineInstr::MIFlag::NoUWrap |
244 MachineInstr::MIFlag::InBounds);
245}
246
247MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(MRI: *getMRI());
251 LLT MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(Ty: MaskTy);
253 buildConstant(Res: MaskReg, Val: maskTrailingZeros<uint64_t>(N: NumBits));
254 return buildPtrMask(Res, Op0, Op1: MaskReg);
255}
256
257MachineInstrBuilder
258MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
261 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
265 SmallVector<Register, 8> Regs;
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Elt: Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Elt: Op0.getReg());
279 }
280 Register Undef =
281 buildUndef(Res: Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(Idx: 0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Elt: Undef);
285 return buildMergeLikeInstr(Res, Ops: Regs);
286}
287
288MachineInstrBuilder
289MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
292 LLT Op0Ty = Op0.getLLTTy(MRI: *getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Res: Op0Ty.getElementType(), Op: Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Op: Unmerge.getReg(Idx: 0));
306 SmallVector<Register, 8> Regs;
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Elt: Unmerge.getReg(Idx: i));
309 return buildMergeLikeInstr(Res, Ops: Regs);
310}
311
312MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
313 return buildInstr(Opcode: TargetOpcode::G_BR).addMBB(MBB: &Dest);
314}
315
316MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(Opcode: TargetOpcode::G_BRINDIRECT).addUse(RegNo: Tgt);
319}
320
321MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(Opcode: TargetOpcode::G_BRJT)
327 .addUse(RegNo: TablePtr)
328 .addJumpTableIndex(Idx: JTI)
329 .addUse(RegNo: IndexReg);
330}
331
332MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
333 const SrcOp &Op) {
334 return buildInstr(Opc: TargetOpcode::COPY, DstOps: Res, SrcOps: Op);
335}
336
337MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(MRI: *getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT)
350 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
351 .addCImm(Val: &Val);
352 return buildSplatBuildVector(Res, Src: Const);
353 }
354
355 auto Const = buildInstr(Opcode: TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
358 Const.addCImm(Val: &Val);
359 return Const;
360}
361
362MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
363 int64_t Val) {
364 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
365 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
366 // TODO: Avoid implicit trunc?
367 // See https://github.com/llvm/llvm-project/issues/112510.
368 ConstantInt *CI = ConstantInt::getSigned(Ty: IntN, V: Val, /*implicitTrunc=*/ImplicitTrunc: true);
369 return buildConstant(Res, Val: *CI);
370}
371
372MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
373 const ConstantFP &Val) {
374 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
375 LLT Ty = Res.getLLTTy(MRI: *getMRI());
376 LLT EltTy = Ty.getScalarType();
377
378 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
379 == EltTy.getSizeInBits() &&
380 "creating fconstant with the wrong size");
381
382 assert(!Ty.isPointer() && "invalid operand type");
383
384 assert(!Ty.isScalableVector() &&
385 "unexpected scalable vector in buildFConstant");
386
387 if (Ty.isFixedVector()) {
388 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT)
389 .addDef(RegNo: getMRI()->createGenericVirtualRegister(Ty: EltTy))
390 .addFPImm(Val: &Val);
391
392 return buildSplatBuildVector(Res, Src: Const);
393 }
394
395 auto Const = buildInstr(Opcode: TargetOpcode::G_FCONSTANT);
396 Const->setDebugLoc(DebugLoc());
397 Res.addDefToMIB(MRI&: *getMRI(), MIB&: Const);
398 Const.addFPImm(Val: &Val);
399 return Const;
400}
401
402MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
403 const APInt &Val) {
404 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(), V: Val);
405 return buildConstant(Res, Val: *CI);
406}
407
408MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
409 double Val) {
410 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
411 auto &Ctx = getMF().getFunction().getContext();
412 auto *CFP =
413 ConstantFP::get(Context&: Ctx, V: getAPFloatFromSize(Val, Size: DstTy.getScalarSizeInBits()));
414 return buildFConstant(Res, Val: *CFP);
415}
416
417MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
418 const APFloat &Val) {
419 auto &Ctx = getMF().getFunction().getContext();
420 auto *CFP = ConstantFP::get(Context&: Ctx, V: Val);
421 return buildFConstant(Res, Val: *CFP);
422}
423
424MachineInstrBuilder
425MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
426 const ConstantPtrAuth *CPA,
427 Register Addr, Register AddrDisc) {
428 auto MIB = buildInstr(Opcode: TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
429 Res.addDefToMIB(MRI&: *getMRI(), MIB);
430 MIB.addUse(RegNo: Addr);
431 MIB.addImm(Val: CPA->getKey()->getZExtValue());
432 MIB.addUse(RegNo: AddrDisc);
433 MIB.addImm(Val: CPA->getDiscriminator()->getZExtValue());
434 return MIB;
435}
436
437MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
438 MachineBasicBlock &Dest) {
439 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
440
441 auto MIB = buildInstr(Opcode: TargetOpcode::G_BRCOND);
442 Tst.addSrcToMIB(MIB);
443 MIB.addMBB(MBB: &Dest);
444 return MIB;
445}
446
447MachineInstrBuilder
448MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
449 MachinePointerInfo PtrInfo, Align Alignment,
450 MachineMemOperand::Flags MMOFlags,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOLoad;
453 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
454
455 LLT Ty = Dst.getLLTTy(MRI: *getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
458 return buildLoad(Res: Dst, Addr, MMO&: *MMO);
459}
460
461MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
462 const DstOp &Res,
463 const SrcOp &Addr,
464 MachineMemOperand &MMO) {
465 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
466 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
467
468 auto MIB = buildInstr(Opcode);
469 Res.addDefToMIB(MRI&: *getMRI(), MIB);
470 Addr.addSrcToMIB(MIB);
471 MIB.addMemOperand(MMO: &MMO);
472 return MIB;
473}
474
475MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
476 const DstOp &Dst, const SrcOp &BasePtr,
477 MachineMemOperand &BaseMMO, int64_t Offset) {
478 LLT LoadTy = Dst.getLLTTy(MRI: *getMRI());
479 MachineMemOperand *OffsetMMO =
480 getMF().getMachineMemOperand(MMO: &BaseMMO, Offset, Ty: LoadTy);
481
482 if (Offset == 0) // This may be a size or type changing load.
483 return buildLoad(Res: Dst, Addr: BasePtr, MMO&: *OffsetMMO);
484
485 LLT PtrTy = BasePtr.getLLTTy(MRI: *getMRI());
486 LLT OffsetTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
487 auto ConstOffset = buildConstant(Res: OffsetTy, Val: Offset);
488 auto Ptr = buildPtrAdd(Res: PtrTy, Op0: BasePtr, Op1: ConstOffset);
489 return buildLoad(Res: Dst, Addr: Ptr, MMO&: *OffsetMMO);
490}
491
492MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
493 const SrcOp &Addr,
494 MachineMemOperand &MMO) {
495 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
496 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
497
498 auto MIB = buildInstr(Opcode: TargetOpcode::G_STORE);
499 Val.addSrcToMIB(MIB);
500 Addr.addSrcToMIB(MIB);
501 MIB.addMemOperand(MMO: &MMO);
502 return MIB;
503}
504
505MachineInstrBuilder
506MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
507 MachinePointerInfo PtrInfo, Align Alignment,
508 MachineMemOperand::Flags MMOFlags,
509 const AAMDNodes &AAInfo) {
510 MMOFlags |= MachineMemOperand::MOStore;
511 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
512
513 LLT Ty = Val.getLLTTy(MRI: *getMRI());
514 MachineMemOperand *MMO =
515 getMF().getMachineMemOperand(PtrInfo, f: MMOFlags, MemTy: Ty, base_alignment: Alignment, AAInfo);
516 return buildStore(Val, Addr, MMO&: *MMO);
517}
518
519MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
520 const SrcOp &Op) {
521 return buildInstr(Opc: TargetOpcode::G_ANYEXT, DstOps: Res, SrcOps: Op);
522}
523
524MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
525 const SrcOp &Op) {
526 return buildInstr(Opc: TargetOpcode::G_SEXT, DstOps: Res, SrcOps: Op);
527}
528
529MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
530 const SrcOp &Op,
531 std::optional<unsigned> Flags) {
532 return buildInstr(Opc: TargetOpcode::G_ZEXT, DstOps: Res, SrcOps: Op, Flags);
533}
534
535unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(isVec: IsVec, isFloat: IsFP)) {
538 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
539 return TargetOpcode::G_SEXT;
540 case TargetLoweringBase::ZeroOrOneBooleanContent:
541 return TargetOpcode::G_ZEXT;
542 default:
543 return TargetOpcode::G_ANYEXT;
544 }
545}
546
547MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
548 const SrcOp &Op,
549 bool IsFP) {
550 unsigned ExtOp = getBoolExtOp(IsVec: getMRI()->getType(Reg: Op.getReg()).isVector(), IsFP);
551 return buildInstr(Opc: ExtOp, DstOps: Res, SrcOps: Op);
552}
553
554MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
555 const SrcOp &Op,
556 bool IsVector,
557 bool IsFP) {
558 const auto *TLI = getMF().getSubtarget().getTargetLowering();
559 switch (TLI->getBooleanContents(isVec: IsVector, isFloat: IsFP)) {
560 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
561 return buildSExtInReg(Res, Op, ImmOp: 1);
562 case TargetLoweringBase::ZeroOrOneBooleanContent:
563 return buildZExtInReg(Res, Op, ImmOp: 1);
564 case TargetLoweringBase::UndefinedBooleanContent:
565 return buildCopy(Res, Op);
566 }
567
568 llvm_unreachable("unexpected BooleanContent");
569}
570
571MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
572 const DstOp &Res,
573 const SrcOp &Op) {
574 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
575 TargetOpcode::G_SEXT == ExtOpc) &&
576 "Expecting Extending Opc");
577 assert(Res.getLLTTy(*getMRI()).isScalar() ||
578 Res.getLLTTy(*getMRI()).isVector());
579 assert(Res.getLLTTy(*getMRI()).isScalar() ==
580 Op.getLLTTy(*getMRI()).isScalar());
581
582 unsigned Opcode = TargetOpcode::COPY;
583 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() >
584 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
585 Opcode = ExtOpc;
586 else if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() <
587 Op.getLLTTy(MRI: *getMRI()).getSizeInBits())
588 Opcode = TargetOpcode::G_TRUNC;
589 else
590 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
591
592 return buildInstr(Opc: Opcode, DstOps: Res, SrcOps: Op);
593}
594
595MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
596 const SrcOp &Op) {
597 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_SEXT, Res, Op);
598}
599
600MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
601 const SrcOp &Op) {
602 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ZEXT, Res, Op);
603}
604
605MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
606 const SrcOp &Op) {
607 return buildExtOrTrunc(ExtOpc: TargetOpcode::G_ANYEXT, Res, Op);
608}
609
610MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
611 const SrcOp &Op,
612 int64_t ImmOp) {
613 LLT ResTy = Res.getLLTTy(MRI: *getMRI());
614 auto Mask = buildConstant(
615 Res: ResTy, Val: APInt::getLowBitsSet(numBits: ResTy.getScalarSizeInBits(), loBitsSet: ImmOp));
616 return buildAnd(Dst: Res, Src0: Op, Src1: Mask);
617}
618
619MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
620 const SrcOp &Src) {
621 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
622 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
623 if (SrcTy == DstTy)
624 return buildCopy(Res: Dst, Op: Src);
625
626 unsigned Opcode;
627 if (SrcTy.isPointerOrPointerVector())
628 Opcode = TargetOpcode::G_PTRTOINT;
629 else if (DstTy.isPointerOrPointerVector())
630 Opcode = TargetOpcode::G_INTTOPTR;
631 else {
632 assert(!SrcTy.isPointerOrPointerVector() &&
633 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
634 Opcode = TargetOpcode::G_BITCAST;
635 }
636
637 return buildInstr(Opc: Opcode, DstOps: Dst, SrcOps: Src);
638}
639
640MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
641 const SrcOp &Src,
642 uint64_t Index) {
643 LLT SrcTy = Src.getLLTTy(MRI: *getMRI());
644 LLT DstTy = Dst.getLLTTy(MRI: *getMRI());
645
646#ifndef NDEBUG
647 assert(SrcTy.isValid() && "invalid operand type");
648 assert(DstTy.isValid() && "invalid operand type");
649 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
650 "extracting off end of register");
651#endif
652
653 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
654 assert(Index == 0 && "insertion past the end of a register");
655 return buildCast(Dst, Src);
656 }
657
658 auto Extract = buildInstr(Opcode: TargetOpcode::G_EXTRACT);
659 Dst.addDefToMIB(MRI&: *getMRI(), MIB&: Extract);
660 Src.addSrcToMIB(MIB&: Extract);
661 Extract.addImm(Val: Index);
662 return Extract;
663}
664
665MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
666 return buildInstr(Opc: TargetOpcode::G_IMPLICIT_DEF, DstOps: {Res}, SrcOps: {});
667}
668
669MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
670 ArrayRef<Register> Ops) {
671 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
672 // we need some temporary storage for the DstOp objects. Here we use a
673 // sufficiently large SmallVector to not go through the heap.
674 SmallVector<SrcOp, 8> TmpVec(Ops);
675 assert(TmpVec.size() > 1);
676 return buildInstr(Opc: TargetOpcode::G_MERGE_VALUES, DstOps: Res, SrcOps: TmpVec);
677}
678
679MachineInstrBuilder
680MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
681 ArrayRef<Register> Ops) {
682 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
683 // we need some temporary storage for the DstOp objects. Here we use a
684 // sufficiently large SmallVector to not go through the heap.
685 SmallVector<SrcOp, 8> TmpVec(Ops);
686 assert(TmpVec.size() > 1);
687 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: TmpVec), DstOps: Res, SrcOps: TmpVec);
688}
689
690MachineInstrBuilder
691MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
692 std::initializer_list<SrcOp> Ops) {
693 assert(Ops.size() > 1);
694 return buildInstr(Opc: getOpcodeForMerge(DstOp: Res, SrcOps: Ops), DstOps: Res, SrcOps: Ops);
695}
696
697unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
698 ArrayRef<SrcOp> SrcOps) const {
699 if (DstOp.getLLTTy(MRI: *getMRI()).isVector()) {
700 if (SrcOps[0].getLLTTy(MRI: *getMRI()).isVector())
701 return TargetOpcode::G_CONCAT_VECTORS;
702 return TargetOpcode::G_BUILD_VECTOR;
703 }
704
705 return TargetOpcode::G_MERGE_VALUES;
706}
707
708MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
709 const SrcOp &Op) {
710 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
711 // we need some temporary storage for the DstOp objects. Here we use a
712 // sufficiently large SmallVector to not go through the heap.
713 SmallVector<DstOp, 8> TmpVec(Res);
714 assert(TmpVec.size() > 1);
715 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
716}
717
718MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
719 const SrcOp &Op) {
720 unsigned NumReg = Op.getLLTTy(MRI: *getMRI()).getSizeInBits() / Res.getSizeInBits();
721 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
722 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
723}
724
725MachineInstrBuilder
726MachineIRBuilder::buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,
727 const SrcOp &Op) {
728 LLT OpTy = Op.getLLTTy(MRI: *getMRI());
729 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
730 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
731 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
732}
733
734MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
735 const SrcOp &Op) {
736 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
737 // we need some temporary storage for the DstOp objects. Here we use a
738 // sufficiently large SmallVector to not go through the heap.
739 SmallVector<DstOp, 8> TmpVec(Res);
740 assert(TmpVec.size() > 1);
741 return buildInstr(Opc: TargetOpcode::G_UNMERGE_VALUES, DstOps: TmpVec, SrcOps: Op);
742}
743
744MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
745 ArrayRef<Register> Ops) {
746 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
747 // we need some temporary storage for the DstOp objects. Here we use a
748 // sufficiently large SmallVector to not go through the heap.
749 SmallVector<SrcOp, 8> TmpVec(Ops);
750 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
751}
752
753MachineInstrBuilder
754MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
755 ArrayRef<APInt> Ops) {
756 SmallVector<SrcOp> TmpVec;
757 TmpVec.reserve(N: Ops.size());
758 LLT EltTy = Res.getLLTTy(MRI: *getMRI()).getElementType();
759 for (const auto &Op : Ops)
760 TmpVec.push_back(Elt: buildConstant(Res: EltTy, Val: Op));
761 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
762}
763
764MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
765 const SrcOp &Src) {
766 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(MRI: *getMRI()).getNumElements(), Src);
767 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
768}
769
770MachineInstrBuilder
771MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
772 ArrayRef<Register> Ops) {
773 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
774 // we need some temporary storage for the DstOp objects. Here we use a
775 // sufficiently large SmallVector to not go through the heap.
776 SmallVector<SrcOp, 8> TmpVec(Ops);
777 if (TmpVec[0].getLLTTy(MRI: *getMRI()).getSizeInBits() ==
778 Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits())
779 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR, DstOps: Res, SrcOps: TmpVec);
780 return buildInstr(Opc: TargetOpcode::G_BUILD_VECTOR_TRUNC, DstOps: Res, SrcOps: TmpVec);
781}
782
783MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
784 const SrcOp &Src) {
785 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
786 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
787 "Expected Src to match Dst elt ty");
788 auto UndefVec = buildUndef(Res: DstTy);
789 auto Zero = buildConstant(Res: LLT::scalar(SizeInBits: 64), Val: 0);
790 auto InsElt = buildInsertVectorElement(Res: DstTy, Val: UndefVec, Elt: Src, Idx: Zero);
791 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
792 return buildShuffleVector(Res: DstTy, Src1: InsElt, Src2: UndefVec, Mask: ZeroMask);
793}
794
795MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
796 const SrcOp &Src) {
797 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
798 "Expected Src to match Dst elt ty");
799 return buildInstr(Opc: TargetOpcode::G_SPLAT_VECTOR, DstOps: Res, SrcOps: Src);
800}
801
802MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
803 const SrcOp &Src1,
804 const SrcOp &Src2,
805 ArrayRef<int> Mask) {
806 LLT DstTy = Res.getLLTTy(MRI: *getMRI());
807 LLT Src1Ty = Src1.getLLTTy(MRI: *getMRI());
808 LLT Src2Ty = Src2.getLLTTy(MRI: *getMRI());
809 const LLT DstElemTy = DstTy.getScalarType();
810 const LLT ElemTy1 = Src1Ty.getScalarType();
811 const LLT ElemTy2 = Src2Ty.getScalarType();
812 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
813 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
814 (void)DstElemTy;
815 (void)ElemTy1;
816 (void)ElemTy2;
817 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
818 return buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {Res}, SrcOps: {Src1, Src2})
819 .addShuffleMask(Val: MaskAlloc);
820}
821
822MachineInstrBuilder
823MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
824 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
825 // we need some temporary storage for the DstOp objects. Here we use a
826 // sufficiently large SmallVector to not go through the heap.
827 SmallVector<SrcOp, 8> TmpVec(Ops);
828 return buildInstr(Opc: TargetOpcode::G_CONCAT_VECTORS, DstOps: Res, SrcOps: TmpVec);
829}
830
831MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
832 const SrcOp &Src,
833 const SrcOp &Op,
834 unsigned Index) {
835 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
836 Res.getLLTTy(*getMRI()).getSizeInBits() &&
837 "insertion past the end of a register");
838
839 if (Res.getLLTTy(MRI: *getMRI()).getSizeInBits() ==
840 Op.getLLTTy(MRI: *getMRI()).getSizeInBits()) {
841 return buildCast(Dst: Res, Src: Op);
842 }
843
844 return buildInstr(Opc: TargetOpcode::G_INSERT, DstOps: Res, SrcOps: {Src, Op, uint64_t(Index)});
845}
846
847MachineInstrBuilder MachineIRBuilder::buildStepVector(const DstOp &Res,
848 unsigned Step) {
849 unsigned Bitwidth = Res.getLLTTy(MRI: *getMRI()).getElementType().getSizeInBits();
850 ConstantInt *CI = ConstantInt::get(Context&: getMF().getFunction().getContext(),
851 V: APInt(Bitwidth, Step));
852 auto StepVector = buildInstr(Opcode: TargetOpcode::G_STEP_VECTOR);
853 StepVector->setDebugLoc(DebugLoc());
854 Res.addDefToMIB(MRI&: *getMRI(), MIB&: StepVector);
855 StepVector.addCImm(Val: CI);
856 return StepVector;
857}
858
859MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
860 unsigned MinElts) {
861
862 auto IntN = IntegerType::get(C&: getMF().getFunction().getContext(),
863 NumBits: Res.getLLTTy(MRI: *getMRI()).getScalarSizeInBits());
864 ConstantInt *CI = ConstantInt::get(Ty: IntN, V: MinElts);
865 return buildVScale(Res, MinElts: *CI);
866}
867
868MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
869 const ConstantInt &MinElts) {
870 auto VScale = buildInstr(Opcode: TargetOpcode::G_VSCALE);
871 VScale->setDebugLoc(DebugLoc());
872 Res.addDefToMIB(MRI&: *getMRI(), MIB&: VScale);
873 VScale.addCImm(Val: &MinElts);
874 return VScale;
875}
876
877MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
878 const APInt &MinElts) {
879 ConstantInt *CI =
880 ConstantInt::get(Context&: getMF().getFunction().getContext(), V: MinElts);
881 return buildVScale(Res, MinElts: *CI);
882}
883
884static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
885 if (HasSideEffects && IsConvergent)
886 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
887 if (HasSideEffects)
888 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
889 if (IsConvergent)
890 return TargetOpcode::G_INTRINSIC_CONVERGENT;
891 return TargetOpcode::G_INTRINSIC;
892}
893
894MachineInstrBuilder
895MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
896 ArrayRef<Register> ResultRegs,
897 bool HasSideEffects, bool isConvergent) {
898 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
899 for (Register ResultReg : ResultRegs)
900 MIB.addDef(RegNo: ResultReg);
901 MIB.addIntrinsicID(ID);
902 return MIB;
903}
904
905MachineInstrBuilder
906MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
907 ArrayRef<Register> ResultRegs) {
908 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
909 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
910 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
911 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
912}
913
914MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
915 ArrayRef<DstOp> Results,
916 bool HasSideEffects,
917 bool isConvergent) {
918 auto MIB = buildInstr(Opcode: getIntrinsicOpcode(HasSideEffects, IsConvergent: isConvergent));
919 for (DstOp Result : Results)
920 Result.addDefToMIB(MRI&: *getMRI(), MIB);
921 MIB.addIntrinsicID(ID);
922 return MIB;
923}
924
925MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
926 ArrayRef<DstOp> Results) {
927 AttributeSet Attrs = Intrinsic::getFnAttributes(C&: getContext(), id: ID);
928 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
929 bool isConvergent = Attrs.hasAttribute(Kind: Attribute::Convergent);
930 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
931}
932
933MachineInstrBuilder
934MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
935 std::optional<unsigned> Flags) {
936 return buildInstr(Opc: TargetOpcode::G_TRUNC, DstOps: Res, SrcOps: Op, Flags);
937}
938
939MachineInstrBuilder
940MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
941 std::optional<unsigned> Flags) {
942 return buildInstr(Opc: TargetOpcode::G_FPTRUNC, DstOps: Res, SrcOps: Op, Flags);
943}
944
945MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
946 const DstOp &Res,
947 const SrcOp &Op0,
948 const SrcOp &Op1,
949 std::optional<unsigned> Flags) {
950 return buildInstr(Opc: TargetOpcode::G_ICMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
951}
952
953MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
954 const DstOp &Res,
955 const SrcOp &Op0,
956 const SrcOp &Op1,
957 std::optional<unsigned> Flags) {
958
959 return buildInstr(Opc: TargetOpcode::G_FCMP, DstOps: Res, SrcOps: {Pred, Op0, Op1}, Flags);
960}
961
962MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
963 const SrcOp &Op0,
964 const SrcOp &Op1) {
965 return buildInstr(Opc: TargetOpcode::G_SCMP, DstOps: Res, SrcOps: {Op0, Op1});
966}
967
968MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
969 const SrcOp &Op0,
970 const SrcOp &Op1) {
971 return buildInstr(Opc: TargetOpcode::G_UCMP, DstOps: Res, SrcOps: {Op0, Op1});
972}
973
974MachineInstrBuilder
975MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
976 const SrcOp &Op0, const SrcOp &Op1,
977 std::optional<unsigned> Flags) {
978
979 return buildInstr(Opc: TargetOpcode::G_SELECT, DstOps: {Res}, SrcOps: {Tst, Op0, Op1}, Flags);
980}
981
982MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
983 const SrcOp &Src0,
984 const SrcOp &Src1,
985 unsigned Idx) {
986 return buildInstr(Opc: TargetOpcode::G_INSERT_SUBVECTOR, DstOps: Res,
987 SrcOps: {Src0, Src1, uint64_t(Idx)});
988}
989
990MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
991 const SrcOp &Src,
992 unsigned Idx) {
993 return buildInstr(Opc: TargetOpcode::G_EXTRACT_SUBVECTOR, DstOps: Res,
994 SrcOps: {Src, uint64_t(Idx)});
995}
996
997MachineInstrBuilder
998MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
999 const SrcOp &Elt, const SrcOp &Idx) {
1000 return buildInstr(Opc: TargetOpcode::G_INSERT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Elt, Idx});
1001}
1002
1003MachineInstrBuilder
1004MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
1005 const SrcOp &Idx) {
1006 return buildInstr(Opc: TargetOpcode::G_EXTRACT_VECTOR_ELT, DstOps: Res, SrcOps: {Val, Idx});
1007}
1008
1009MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
1010 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1011 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1012#ifndef NDEBUG
1013 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1014 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1015 LLT AddrTy = Addr.getLLTTy(*getMRI());
1016 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1017 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1018 assert(OldValResTy.isScalar() && "invalid operand type");
1019 assert(SuccessResTy.isScalar() && "invalid operand type");
1020 assert(AddrTy.isPointer() && "invalid operand type");
1021 assert(CmpValTy.isValid() && "invalid operand type");
1022 assert(NewValTy.isValid() && "invalid operand type");
1023 assert(OldValResTy == CmpValTy && "type mismatch");
1024 assert(OldValResTy == NewValTy && "type mismatch");
1025#endif
1026
1027 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1028 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1029 SuccessRes.addDefToMIB(MRI&: *getMRI(), MIB);
1030 Addr.addSrcToMIB(MIB);
1031 CmpVal.addSrcToMIB(MIB);
1032 NewVal.addSrcToMIB(MIB);
1033 MIB.addMemOperand(MMO: &MMO);
1034 return MIB;
1035}
1036
1037MachineInstrBuilder
1038MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
1039 const SrcOp &CmpVal, const SrcOp &NewVal,
1040 MachineMemOperand &MMO) {
1041#ifndef NDEBUG
1042 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1043 LLT AddrTy = Addr.getLLTTy(*getMRI());
1044 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1045 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1046 assert(OldValResTy.isScalar() && "invalid operand type");
1047 assert(AddrTy.isPointer() && "invalid operand type");
1048 assert(CmpValTy.isValid() && "invalid operand type");
1049 assert(NewValTy.isValid() && "invalid operand type");
1050 assert(OldValResTy == CmpValTy && "type mismatch");
1051 assert(OldValResTy == NewValTy && "type mismatch");
1052#endif
1053
1054 auto MIB = buildInstr(Opcode: TargetOpcode::G_ATOMIC_CMPXCHG);
1055 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1056 Addr.addSrcToMIB(MIB);
1057 CmpVal.addSrcToMIB(MIB);
1058 NewVal.addSrcToMIB(MIB);
1059 MIB.addMemOperand(MMO: &MMO);
1060 return MIB;
1061}
1062
1063MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1064 unsigned Opcode, const DstOp &OldValRes,
1065 const SrcOp &Addr, const SrcOp &Val,
1066 MachineMemOperand &MMO) {
1067
1068#ifndef NDEBUG
1069 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1070 LLT AddrTy = Addr.getLLTTy(*getMRI());
1071 LLT ValTy = Val.getLLTTy(*getMRI());
1072 assert(AddrTy.isPointer() && "invalid operand type");
1073 assert(ValTy.isValid() && "invalid operand type");
1074 assert(OldValResTy == ValTy && "type mismatch");
1075 assert(MMO.isAtomic() && "not atomic mem operand");
1076#endif
1077
1078 auto MIB = buildInstr(Opcode);
1079 OldValRes.addDefToMIB(MRI&: *getMRI(), MIB);
1080 Addr.addSrcToMIB(MIB);
1081 Val.addSrcToMIB(MIB);
1082 MIB.addMemOperand(MMO: &MMO);
1083 return MIB;
1084}
1085
1086MachineInstrBuilder
1087MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1088 Register Val, MachineMemOperand &MMO) {
1089 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1090 MMO);
1091}
1092MachineInstrBuilder
1093MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1094 Register Val, MachineMemOperand &MMO) {
1095 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1096 MMO);
1097}
1098MachineInstrBuilder
1099MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1100 Register Val, MachineMemOperand &MMO) {
1101 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1102 MMO);
1103}
1104MachineInstrBuilder
1105MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1106 Register Val, MachineMemOperand &MMO) {
1107 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1108 MMO);
1109}
1110MachineInstrBuilder
1111MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1112 Register Val, MachineMemOperand &MMO) {
1113 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1114 MMO);
1115}
1116MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1117 Register Addr,
1118 Register Val,
1119 MachineMemOperand &MMO) {
1120 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1121 MMO);
1122}
1123MachineInstrBuilder
1124MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1125 Register Val, MachineMemOperand &MMO) {
1126 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1127 MMO);
1128}
1129MachineInstrBuilder
1130MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1131 Register Val, MachineMemOperand &MMO) {
1132 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1133 MMO);
1134}
1135MachineInstrBuilder
1136MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1137 Register Val, MachineMemOperand &MMO) {
1138 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1139 MMO);
1140}
1141MachineInstrBuilder
1142MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1143 Register Val, MachineMemOperand &MMO) {
1144 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1145 MMO);
1146}
1147MachineInstrBuilder
1148MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1149 Register Val, MachineMemOperand &MMO) {
1150 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1151 MMO);
1152}
1153
1154MachineInstrBuilder
1155MachineIRBuilder::buildAtomicRMWFAdd(
1156 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1157 MachineMemOperand &MMO) {
1158 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1159 MMO);
1160}
1161
1162MachineInstrBuilder
1163MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1164 MachineMemOperand &MMO) {
1165 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1166 MMO);
1167}
1168
1169MachineInstrBuilder
1170MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1171 const SrcOp &Val, MachineMemOperand &MMO) {
1172 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1173 MMO);
1174}
1175
1176MachineInstrBuilder
1177MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1178 const SrcOp &Val, MachineMemOperand &MMO) {
1179 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1180 MMO);
1181}
1182
1183MachineInstrBuilder
1184MachineIRBuilder::buildAtomicRMWFMaximum(const DstOp &OldValRes,
1185 const SrcOp &Addr, const SrcOp &Val,
1186 MachineMemOperand &MMO) {
1187 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1188 Val, MMO);
1189}
1190
1191MachineInstrBuilder
1192MachineIRBuilder::buildAtomicRMWFMinimum(const DstOp &OldValRes,
1193 const SrcOp &Addr, const SrcOp &Val,
1194 MachineMemOperand &MMO) {
1195 return buildAtomicRMW(Opcode: TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1196 Val, MMO);
1197}
1198
1199MachineInstrBuilder
1200MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1201 return buildInstr(Opcode: TargetOpcode::G_FENCE)
1202 .addImm(Val: Ordering)
1203 .addImm(Val: Scope);
1204}
1205
1206MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1207 unsigned RW,
1208 unsigned Locality,
1209 unsigned CacheType,
1210 MachineMemOperand &MMO) {
1211 auto MIB = buildInstr(Opcode: TargetOpcode::G_PREFETCH);
1212 Addr.addSrcToMIB(MIB);
1213 MIB.addImm(Val: RW).addImm(Val: Locality).addImm(Val: CacheType);
1214 MIB.addMemOperand(MMO: &MMO);
1215 return MIB;
1216}
1217
1218MachineInstrBuilder
1219MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1220#ifndef NDEBUG
1221 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1222#endif
1223
1224 return buildInstr(Opcode: TargetOpcode::G_BLOCK_ADDR).addDef(RegNo: Res).addBlockAddress(BA);
1225}
1226
1227void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1228 bool IsExtend) {
1229#ifndef NDEBUG
1230 if (DstTy.isVector()) {
1231 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1232 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1233 "different number of elements in a trunc/ext");
1234 } else
1235 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1236
1237 if (IsExtend)
1238 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1239 "invalid narrowing extend");
1240 else
1241 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1242 "invalid widening trunc");
1243#endif
1244}
1245
1246void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1247 const LLT Op0Ty, const LLT Op1Ty) {
1248#ifndef NDEBUG
1249 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1250 "invalid operand type");
1251 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1252 if (ResTy.isScalar() || ResTy.isPointer())
1253 assert(TstTy.isScalar() && "type mismatch");
1254 else
1255 assert((TstTy.isScalar() ||
1256 (TstTy.isVector() &&
1257 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1258 "type mismatch");
1259#endif
1260}
1261
1262MachineInstrBuilder
1263MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1264 ArrayRef<SrcOp> SrcOps,
1265 std::optional<unsigned> Flags) {
1266 switch (Opc) {
1267 default:
1268 break;
1269 case TargetOpcode::G_SELECT: {
1270 assert(DstOps.size() == 1 && "Invalid select");
1271 assert(SrcOps.size() == 3 && "Invalid select");
1272 validateSelectOp(
1273 ResTy: DstOps[0].getLLTTy(MRI: *getMRI()), TstTy: SrcOps[0].getLLTTy(MRI: *getMRI()),
1274 Op0Ty: SrcOps[1].getLLTTy(MRI: *getMRI()), Op1Ty: SrcOps[2].getLLTTy(MRI: *getMRI()));
1275 break;
1276 }
1277 case TargetOpcode::G_FNEG:
1278 case TargetOpcode::G_ABS:
1279 // All these are unary ops.
1280 assert(DstOps.size() == 1 && "Invalid Dst");
1281 assert(SrcOps.size() == 1 && "Invalid Srcs");
1282 validateUnaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1283 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()));
1284 break;
1285 case TargetOpcode::G_ADD:
1286 case TargetOpcode::G_AND:
1287 case TargetOpcode::G_MUL:
1288 case TargetOpcode::G_OR:
1289 case TargetOpcode::G_SUB:
1290 case TargetOpcode::G_XOR:
1291 case TargetOpcode::G_UDIV:
1292 case TargetOpcode::G_SDIV:
1293 case TargetOpcode::G_UREM:
1294 case TargetOpcode::G_SREM:
1295 case TargetOpcode::G_SMIN:
1296 case TargetOpcode::G_SMAX:
1297 case TargetOpcode::G_UMIN:
1298 case TargetOpcode::G_UMAX:
1299 case TargetOpcode::G_UADDSAT:
1300 case TargetOpcode::G_SADDSAT:
1301 case TargetOpcode::G_USUBSAT:
1302 case TargetOpcode::G_SSUBSAT: {
1303 // All these are binary ops.
1304 assert(DstOps.size() == 1 && "Invalid Dst");
1305 assert(SrcOps.size() == 2 && "Invalid Srcs");
1306 validateBinaryOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1307 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1308 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1309 break;
1310 }
1311 case TargetOpcode::G_SHL:
1312 case TargetOpcode::G_ASHR:
1313 case TargetOpcode::G_LSHR:
1314 case TargetOpcode::G_USHLSAT:
1315 case TargetOpcode::G_SSHLSAT: {
1316 assert(DstOps.size() == 1 && "Invalid Dst");
1317 assert(SrcOps.size() == 2 && "Invalid Srcs");
1318 validateShiftOp(Res: DstOps[0].getLLTTy(MRI: *getMRI()),
1319 Op0: SrcOps[0].getLLTTy(MRI: *getMRI()),
1320 Op1: SrcOps[1].getLLTTy(MRI: *getMRI()));
1321 break;
1322 }
1323 case TargetOpcode::G_SEXT:
1324 case TargetOpcode::G_ZEXT:
1325 case TargetOpcode::G_ANYEXT:
1326 assert(DstOps.size() == 1 && "Invalid Dst");
1327 assert(SrcOps.size() == 1 && "Invalid Srcs");
1328 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1329 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: true);
1330 break;
1331 case TargetOpcode::G_TRUNC:
1332 case TargetOpcode::G_FPTRUNC: {
1333 assert(DstOps.size() == 1 && "Invalid Dst");
1334 assert(SrcOps.size() == 1 && "Invalid Srcs");
1335 validateTruncExt(DstTy: DstOps[0].getLLTTy(MRI: *getMRI()),
1336 SrcTy: SrcOps[0].getLLTTy(MRI: *getMRI()), IsExtend: false);
1337 break;
1338 }
1339 case TargetOpcode::G_BITCAST: {
1340 assert(DstOps.size() == 1 && "Invalid Dst");
1341 assert(SrcOps.size() == 1 && "Invalid Srcs");
1342 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1343 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1344 break;
1345 }
1346 case TargetOpcode::COPY:
1347 assert(DstOps.size() == 1 && "Invalid Dst");
1348 // If the caller wants to add a subreg source it has to be done separately
1349 // so we may not have any SrcOps at this point yet.
1350 break;
1351 case TargetOpcode::G_FCMP:
1352 case TargetOpcode::G_ICMP: {
1353 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1354 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1355 // For F/ICMP, the first src operand is the predicate, followed by
1356 // the two comparands.
1357 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1358 "Expecting predicate");
1359 assert([&]() -> bool {
1360 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1361 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1362 : CmpInst::isFPPredicate(Pred);
1363 }() && "Invalid predicate");
1364 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1365 "Type mismatch");
1366 assert([&]() -> bool {
1367 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1368 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1369 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1370 return DstTy.isScalar();
1371 else
1372 return DstTy.isVector() &&
1373 DstTy.getElementCount() == Op0Ty.getElementCount();
1374 }() && "Type Mismatch");
1375 break;
1376 }
1377 case TargetOpcode::G_UNMERGE_VALUES: {
1378 assert(!DstOps.empty() && "Invalid trivial sequence");
1379 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1380 assert(llvm::all_of(DstOps,
1381 [&, this](const DstOp &Op) {
1382 return Op.getLLTTy(*getMRI()) ==
1383 DstOps[0].getLLTTy(*getMRI());
1384 }) &&
1385 "type mismatch in output list");
1386 assert((TypeSize::ScalarTy)DstOps.size() *
1387 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1388 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1389 "input operands do not cover output register");
1390 break;
1391 }
1392 case TargetOpcode::G_MERGE_VALUES: {
1393 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1394 assert(DstOps.size() == 1 && "Invalid Dst");
1395 assert(llvm::all_of(SrcOps,
1396 [&, this](const SrcOp &Op) {
1397 return Op.getLLTTy(*getMRI()) ==
1398 SrcOps[0].getLLTTy(*getMRI());
1399 }) &&
1400 "type mismatch in input list");
1401 assert((TypeSize::ScalarTy)SrcOps.size() *
1402 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1403 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1404 "input operands do not cover output register");
1405 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1406 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1407 break;
1408 }
1409 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1410 assert(DstOps.size() == 1 && "Invalid Dst size");
1411 assert(SrcOps.size() == 2 && "Invalid Src size");
1412 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1413 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1414 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1415 "Invalid operand type");
1416 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1417 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1418 DstOps[0].getLLTTy(*getMRI()) &&
1419 "Type mismatch");
1420 break;
1421 }
1422 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1423 assert(DstOps.size() == 1 && "Invalid dst size");
1424 assert(SrcOps.size() == 3 && "Invalid src size");
1425 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1426 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1427 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1428 SrcOps[1].getLLTTy(*getMRI()) &&
1429 "Type mismatch");
1430 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1431 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1432 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1433 "Type mismatch");
1434 break;
1435 }
1436 case TargetOpcode::G_BUILD_VECTOR: {
1437 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1438 "Must have at least 2 operands");
1439 assert(DstOps.size() == 1 && "Invalid DstOps");
1440 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1441 "Res type must be a vector");
1442 assert(llvm::all_of(SrcOps,
1443 [&, this](const SrcOp &Op) {
1444 return Op.getLLTTy(*getMRI()) ==
1445 SrcOps[0].getLLTTy(*getMRI());
1446 }) &&
1447 "type mismatch in input list");
1448 assert((TypeSize::ScalarTy)SrcOps.size() *
1449 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1450 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1451 "input scalars do not exactly cover the output vector register");
1452 break;
1453 }
1454 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1455 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1456 "Must have at least 2 operands");
1457 assert(DstOps.size() == 1 && "Invalid DstOps");
1458 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1459 "Res type must be a vector");
1460 assert(llvm::all_of(SrcOps,
1461 [&, this](const SrcOp &Op) {
1462 return Op.getLLTTy(*getMRI()) ==
1463 SrcOps[0].getLLTTy(*getMRI());
1464 }) &&
1465 "type mismatch in input list");
1466 break;
1467 }
1468 case TargetOpcode::G_CONCAT_VECTORS: {
1469 assert(DstOps.size() == 1 && "Invalid DstOps");
1470 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1471 "Must have at least 2 operands");
1472 assert(llvm::all_of(SrcOps,
1473 [&, this](const SrcOp &Op) {
1474 return (Op.getLLTTy(*getMRI()).isVector() &&
1475 Op.getLLTTy(*getMRI()) ==
1476 SrcOps[0].getLLTTy(*getMRI()));
1477 }) &&
1478 "type mismatch in input list");
1479 assert((TypeSize::ScalarTy)SrcOps.size() *
1480 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1481 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1482 "input vectors do not exactly cover the output vector register");
1483 break;
1484 }
1485 case TargetOpcode::G_UADDE: {
1486 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1487 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1488 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1489 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1490 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1491 "Invalid operand");
1492 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1493 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1494 "type mismatch");
1495 break;
1496 }
1497 }
1498
1499 auto MIB = buildInstr(Opcode: Opc);
1500 for (const DstOp &Op : DstOps)
1501 Op.addDefToMIB(MRI&: *getMRI(), MIB);
1502 for (const SrcOp &Op : SrcOps)
1503 Op.addSrcToMIB(MIB);
1504 if (Flags)
1505 MIB->setFlags(*Flags);
1506 return MIB;
1507}
1508