1//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11///
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16#include "llvm/CodeGen/MachineFrameInfo.h"
17#include "llvm/CodeGen/MachineOperand.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/CodeGen/TargetLowering.h"
20#include "llvm/IR/Module.h"
21
22#define DEBUG_TYPE "inline-asm-lowering"
23
24using namespace llvm;
25
26void InlineAsmLowering::anchor() {}
27
28namespace {
29
30/// GISelAsmOperandInfo - This contains information for each constraint that we
31/// are lowering.
32class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
33public:
34 /// Regs - If this is a register or register class operand, this
35 /// contains the set of assigned registers corresponding to the operand.
36 SmallVector<Register, 1> Regs;
37
38 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
39 : TargetLowering::AsmOperandInfo(Info) {}
40};
41
42using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
43
44class ExtraFlags {
45 unsigned Flags = 0;
46
47public:
48 explicit ExtraFlags(const CallBase &CB) {
49 const InlineAsm *IA = cast<InlineAsm>(Val: CB.getCalledOperand());
50 if (IA->hasSideEffects())
51 Flags |= InlineAsm::Extra_HasSideEffects;
52 if (IA->isAlignStack())
53 Flags |= InlineAsm::Extra_IsAlignStack;
54 if (IA->canThrow())
55 Flags |= InlineAsm::Extra_MayUnwind;
56 if (CB.isConvergent())
57 Flags |= InlineAsm::Extra_IsConvergent;
58 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
59 }
60
61 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
62 // Ideally, we would only check against memory constraints. However, the
63 // meaning of an Other constraint can be target-specific and we can't easily
64 // reason about it. Therefore, be conservative and set MayLoad/MayStore
65 // for Other constraints as well.
66 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
67 OpInfo.ConstraintType == TargetLowering::C_Other) {
68 if (OpInfo.Type == InlineAsm::isInput)
69 Flags |= InlineAsm::Extra_MayLoad;
70 else if (OpInfo.Type == InlineAsm::isOutput)
71 Flags |= InlineAsm::Extra_MayStore;
72 else if (OpInfo.Type == InlineAsm::isClobber)
73 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
74 }
75 }
76
77 unsigned get() const { return Flags; }
78};
79
80} // namespace
81
82/// Assign virtual/physical registers for the specified register operand.
83static void getRegistersForValue(MachineFunction &MF,
84 MachineIRBuilder &MIRBuilder,
85 GISelAsmOperandInfo &OpInfo,
86 GISelAsmOperandInfo &RefOpInfo) {
87
88 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
89 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
90
91 // No work to do for memory operations.
92 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
93 return;
94
95 // If this is a constraint for a single physreg, or a constraint for a
96 // register class, find it.
97 Register AssignedReg;
98 const TargetRegisterClass *RC;
99 std::tie(args&: AssignedReg, args&: RC) = TLI.getRegForInlineAsmConstraint(
100 TRI: &TRI, Constraint: RefOpInfo.ConstraintCode, VT: RefOpInfo.ConstraintVT);
101 // RC is unset only on failure. Return immediately.
102 if (!RC)
103 return;
104
105 // No need to allocate a matching input constraint since the constraint it's
106 // matching to has already been allocated.
107 if (OpInfo.isMatchingInputConstraint())
108 return;
109
110 // Initialize NumRegs.
111 unsigned NumRegs = 1;
112 if (OpInfo.ConstraintVT != MVT::Other)
113 NumRegs =
114 TLI.getNumRegisters(Context&: MF.getFunction().getContext(), VT: OpInfo.ConstraintVT);
115
116 // If this is a constraint for a specific physical register, but the type of
117 // the operand requires more than one register to be passed, we allocate the
118 // required amount of physical registers, starting from the selected physical
119 // register.
120 // For this, first retrieve a register iterator for the given register class
121 TargetRegisterClass::iterator I = RC->begin();
122 MachineRegisterInfo &RegInfo = MF.getRegInfo();
123
124 // Advance the iterator to the assigned register (if set)
125 if (AssignedReg) {
126 for (; *I != AssignedReg; ++I)
127 assert(I != RC->end() && "AssignedReg should be a member of provided RC");
128 }
129
130 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
131 // registers with the provided register class
132 for (; NumRegs; --NumRegs, ++I) {
133 assert(I != RC->end() && "Ran out of registers to allocate!");
134 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RegClass: RC);
135 OpInfo.Regs.push_back(Elt: R);
136 }
137}
138
139static void computeConstraintToUse(const TargetLowering *TLI,
140 TargetLowering::AsmOperandInfo &OpInfo) {
141 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
142
143 // Single-letter constraints ('r') are very common.
144 if (OpInfo.Codes.size() == 1) {
145 OpInfo.ConstraintCode = OpInfo.Codes[0];
146 OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode);
147 } else {
148 TargetLowering::ConstraintGroup G = TLI->getConstraintPreferences(OpInfo);
149 if (G.empty())
150 return;
151 // FIXME: prefer immediate constraints if the target allows it
152 unsigned BestIdx = 0;
153 for (const unsigned E = G.size();
154 BestIdx < E && (G[BestIdx].second == TargetLowering::C_Other ||
155 G[BestIdx].second == TargetLowering::C_Immediate);
156 ++BestIdx)
157 ;
158 OpInfo.ConstraintCode = G[BestIdx].first;
159 OpInfo.ConstraintType = G[BestIdx].second;
160 }
161
162 // 'X' matches anything.
163 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
164 // Labels and constants are handled elsewhere ('X' is the only thing
165 // that matches labels). For Functions, the type here is the type of
166 // the result, which is not what we want to look at; leave them alone.
167 Value *Val = OpInfo.CallOperandVal;
168 if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
169 return;
170
171 // Otherwise, try to resolve it to something we know about by looking at
172 // the actual operand type.
173 if (const char *Repl = TLI->LowerXConstraint(ConstraintVT: OpInfo.ConstraintVT)) {
174 OpInfo.ConstraintCode = Repl;
175 OpInfo.ConstraintType = TLI->getConstraintType(Constraint: OpInfo.ConstraintCode);
176 }
177 }
178}
179
180static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
181 const InlineAsm::Flag F(I.getOperand(i: OpIdx).getImm());
182 return F.getNumOperandRegisters();
183}
184
185static bool buildAnyextOrCopy(Register Dst, Register Src,
186 MachineIRBuilder &MIRBuilder) {
187 const TargetRegisterInfo *TRI =
188 MIRBuilder.getMF().getSubtarget().getRegisterInfo();
189 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
190
191 auto SrcTy = MRI->getType(Reg: Src);
192 if (!SrcTy.isValid()) {
193 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
194 return false;
195 }
196 unsigned SrcSize = TRI->getRegSizeInBits(Reg: Src, MRI: *MRI);
197 unsigned DstSize = TRI->getRegSizeInBits(Reg: Dst, MRI: *MRI);
198
199 if (DstSize < SrcSize) {
200 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
201 return false;
202 }
203
204 // Attempt to anyext small scalar sources.
205 if (DstSize > SrcSize) {
206 if (!SrcTy.isScalar()) {
207 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
208 "destination register class\n");
209 return false;
210 }
211 Src = MIRBuilder.buildAnyExt(Res: LLT::scalar(SizeInBits: DstSize), Op: Src).getReg(Idx: 0);
212 }
213
214 MIRBuilder.buildCopy(Res: Dst, Op: Src);
215 return true;
216}
217
218bool InlineAsmLowering::lowerInlineAsm(
219 MachineIRBuilder &MIRBuilder, const CallBase &Call,
220 std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
221 const {
222 const InlineAsm *IA = cast<InlineAsm>(Val: Call.getCalledOperand());
223
224 /// ConstraintOperands - Information about all of the constraints.
225 GISelAsmOperandInfoVector ConstraintOperands;
226
227 MachineFunction &MF = MIRBuilder.getMF();
228 const Function &F = MF.getFunction();
229 const DataLayout &DL = F.getDataLayout();
230 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
231
232 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
233
234 TargetLowering::AsmOperandInfoVector TargetConstraints =
235 TLI->ParseConstraints(DL, TRI, Call);
236
237 ExtraFlags ExtraInfo(Call);
238 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
239 unsigned ResNo = 0; // ResNo - The result number of the next output.
240 for (auto &T : TargetConstraints) {
241 ConstraintOperands.push_back(Elt: GISelAsmOperandInfo(T));
242 GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
243
244 // Compute the value type for each operand.
245 if (OpInfo.hasArg()) {
246 OpInfo.CallOperandVal = Call.getArgOperand(i: ArgNo);
247
248 if (isa<BasicBlock>(Val: OpInfo.CallOperandVal)) {
249 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
250 return false;
251 }
252
253 Type *OpTy = OpInfo.CallOperandVal->getType();
254
255 // If this is an indirect operand, the operand is a pointer to the
256 // accessed type.
257 if (OpInfo.isIndirect) {
258 OpTy = Call.getParamElementType(ArgNo);
259 assert(OpTy && "Indirect operand must have elementtype attribute");
260 }
261
262 // FIXME: Support aggregate input operands
263 if (!OpTy->isSingleValueType()) {
264 LLVM_DEBUG(
265 dbgs() << "Aggregate input operands are not supported yet\n");
266 return false;
267 }
268
269 OpInfo.ConstraintVT =
270 TLI->getAsmOperandValueType(DL, Ty: OpTy, AllowUnknown: true).getSimpleVT();
271 ++ArgNo;
272 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
273 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
274 if (StructType *STy = dyn_cast<StructType>(Val: Call.getType())) {
275 OpInfo.ConstraintVT =
276 TLI->getSimpleValueType(DL, Ty: STy->getElementType(N: ResNo));
277 } else {
278 assert(ResNo == 0 && "Asm only has one result!");
279 OpInfo.ConstraintVT =
280 TLI->getAsmOperandValueType(DL, Ty: Call.getType()).getSimpleVT();
281 }
282 ++ResNo;
283 } else {
284 assert(OpInfo.Type != InlineAsm::isLabel &&
285 "GlobalISel currently doesn't support callbr");
286 OpInfo.ConstraintVT = MVT::Other;
287 }
288
289 if (OpInfo.ConstraintVT == MVT::i64x8)
290 return false;
291
292 // Compute the constraint code and ConstraintType to use.
293 computeConstraintToUse(TLI, OpInfo);
294
295 // The selected constraint type might expose new sideeffects
296 ExtraInfo.update(OpInfo);
297 }
298
299 // At this point, all operand types are decided.
300 // Create the MachineInstr, but don't insert it yet since input
301 // operands still need to insert instructions before this one
302 auto Inst = MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::INLINEASM)
303 .addExternalSymbol(FnName: IA->getAsmString().data())
304 .addImm(Val: ExtraInfo.get());
305
306 // Starting from this operand: flag followed by register(s) will be added as
307 // operands to Inst for each constraint. Used for matching input constraints.
308 unsigned StartIdx = Inst->getNumOperands();
309
310 // Collects the output operands for later processing
311 GISelAsmOperandInfoVector OutputOperands;
312
313 for (auto &OpInfo : ConstraintOperands) {
314 GISelAsmOperandInfo &RefOpInfo =
315 OpInfo.isMatchingInputConstraint()
316 ? ConstraintOperands[OpInfo.getMatchedOperand()]
317 : OpInfo;
318
319 // Assign registers for register operands
320 getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
321
322 switch (OpInfo.Type) {
323 case InlineAsm::isOutput:
324 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
325 const InlineAsm::ConstraintCode ConstraintID =
326 TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode);
327 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
328 "Failed to convert memory constraint code to constraint id.");
329
330 // Add information to the INLINEASM instruction to know about this
331 // output.
332 InlineAsm::Flag Flag(InlineAsm::Kind::Mem, 1);
333 Flag.setMemConstraint(ConstraintID);
334 Inst.addImm(Val: Flag);
335 ArrayRef<Register> SourceRegs =
336 GetOrCreateVRegs(*OpInfo.CallOperandVal);
337 assert(
338 SourceRegs.size() == 1 &&
339 "Expected the memory output to fit into a single virtual register");
340 Inst.addReg(RegNo: SourceRegs[0]);
341 } else {
342 // Otherwise, this outputs to a register (directly for C_Register /
343 // C_RegisterClass/C_Other.
344 assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
345 OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
346 OpInfo.ConstraintType == TargetLowering::C_Other);
347
348 // Find a register that we can use.
349 if (OpInfo.Regs.empty()) {
350 LLVM_DEBUG(dbgs()
351 << "Couldn't allocate output register for constraint\n");
352 return false;
353 }
354
355 // Add information to the INLINEASM instruction to know that this
356 // register is set.
357 InlineAsm::Flag Flag(OpInfo.isEarlyClobber
358 ? InlineAsm::Kind::RegDefEarlyClobber
359 : InlineAsm::Kind::RegDef,
360 OpInfo.Regs.size());
361 if (OpInfo.Regs.front().isVirtual()) {
362 // Put the register class of the virtual registers in the flag word.
363 // That way, later passes can recompute register class constraints for
364 // inline assembly as well as normal instructions. Don't do this for
365 // tied operands that can use the regclass information from the def.
366 const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front());
367 Flag.setRegClass(RC->getID());
368 }
369
370 Inst.addImm(Val: Flag);
371
372 for (Register Reg : OpInfo.Regs) {
373 Inst.addReg(RegNo: Reg, Flags: RegState::Define |
374 getImplRegState(B: Reg.isPhysical()) |
375 getEarlyClobberRegState(B: OpInfo.isEarlyClobber));
376 }
377
378 // Remember this output operand for later processing
379 OutputOperands.push_back(Elt: OpInfo);
380 }
381
382 break;
383 case InlineAsm::isInput:
384 case InlineAsm::isLabel: {
385 if (OpInfo.isMatchingInputConstraint()) {
386 unsigned DefIdx = OpInfo.getMatchedOperand();
387 // Find operand with register def that corresponds to DefIdx.
388 unsigned InstFlagIdx = StartIdx;
389 for (unsigned i = 0; i < DefIdx; ++i)
390 InstFlagIdx += getNumOpRegs(I: *Inst, OpIdx: InstFlagIdx) + 1;
391 assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
392
393 const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(i: InstFlagIdx).getImm());
394 if (MatchedOperandFlag.isMemKind()) {
395 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
396 "supported. This should be target specific.\n");
397 return false;
398 }
399 if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) {
400 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
401 return false;
402 }
403
404 // We want to tie input to register in next operand.
405 unsigned DefRegIdx = InstFlagIdx + 1;
406 Register Def = Inst->getOperand(i: DefRegIdx).getReg();
407
408 ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
409 assert(SrcRegs.size() == 1 && "Single register is expected here");
410
411 // We need the tied input to live in the same register class as the def.
412 //
413 // - if Def is a vreg, we can just use its regclass.
414 // - if Def is a physreg, create a vreg in the minimal regclass for that
415 // physreg.
416 //
417 // Otherwise RegBankSelect may leave it in the wrong bank (e.g. GPR even
418 // though it's tied to an FP physreg).
419 const TargetRegisterClass *RC = Def.isVirtual()
420 ? MRI->getRegClass(Reg: Def)
421 : TRI->getMinimalPhysRegClass(Reg: Def);
422
423 // Materialize `In` in a new vreg that has a register class that matches
424 // the register class of `Def`.
425 Register In = MRI->createVirtualRegister(RegClass: RC);
426 if (!buildAnyextOrCopy(Dst: In, Src: SrcRegs[0], MIRBuilder))
427 return false;
428
429 // Add Flag and input register operand (In) to Inst. Tie In to Def.
430 InlineAsm::Flag UseFlag(InlineAsm::Kind::RegUse, 1);
431 UseFlag.setMatchingOp(DefIdx);
432 Inst.addImm(Val: UseFlag);
433 Inst.addReg(RegNo: In);
434 Inst->tieOperands(DefIdx: DefRegIdx, UseIdx: Inst->getNumOperands() - 1);
435 break;
436 }
437
438 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
439 OpInfo.isIndirect) {
440 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
441 "not supported yet\n");
442 return false;
443 }
444
445 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
446 OpInfo.ConstraintType == TargetLowering::C_Other) {
447
448 std::vector<MachineOperand> Ops;
449 if (!lowerAsmOperandForConstraint(Val: OpInfo.CallOperandVal,
450 Constraint: OpInfo.ConstraintCode, Ops,
451 MIRBuilder)) {
452 LLVM_DEBUG(dbgs() << "Don't support constraint: "
453 << OpInfo.ConstraintCode << " yet\n");
454 return false;
455 }
456
457 assert(Ops.size() > 0 &&
458 "Expected constraint to be lowered to at least one operand");
459
460 // Add information to the INLINEASM node to know about this input.
461 const unsigned OpFlags =
462 InlineAsm::Flag(InlineAsm::Kind::Imm, Ops.size());
463 Inst.addImm(Val: OpFlags);
464 Inst.add(MOs: Ops);
465 break;
466 }
467
468 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
469 const InlineAsm::ConstraintCode ConstraintID =
470 TLI->getInlineAsmMemConstraint(ConstraintCode: OpInfo.ConstraintCode);
471 InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
472 OpFlags.setMemConstraint(ConstraintID);
473 Inst.addImm(Val: OpFlags);
474
475 if (OpInfo.isIndirect) {
476 // already indirect
477 ArrayRef<Register> SourceRegs =
478 GetOrCreateVRegs(*OpInfo.CallOperandVal);
479 if (SourceRegs.size() != 1) {
480 LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a "
481 "single virtual register "
482 "for constraint '"
483 << OpInfo.ConstraintCode << "'\n");
484 return false;
485 }
486 Inst.addReg(RegNo: SourceRegs[0]);
487 break;
488 }
489
490 // Needs to be made indirect. Store the value on the stack and use
491 // a pointer to it.
492 Value *OpVal = OpInfo.CallOperandVal;
493 TypeSize Bytes = DL.getTypeStoreSize(Ty: OpVal->getType());
494 Align Alignment = DL.getPrefTypeAlign(Ty: OpVal->getType());
495 int FrameIdx =
496 MF.getFrameInfo().CreateStackObject(Size: Bytes, Alignment, isSpillSlot: false);
497
498 unsigned AddrSpace = DL.getAllocaAddrSpace();
499 LLT FramePtrTy =
500 LLT::pointer(AddressSpace: AddrSpace, SizeInBits: DL.getPointerSizeInBits(AS: AddrSpace));
501 auto Ptr = MIRBuilder.buildFrameIndex(Res: FramePtrTy, Idx: FrameIdx).getReg(Idx: 0);
502 ArrayRef<Register> SourceRegs =
503 GetOrCreateVRegs(*OpInfo.CallOperandVal);
504 if (SourceRegs.size() != 1) {
505 LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a single "
506 "virtual register "
507 "for constraint '"
508 << OpInfo.ConstraintCode << "'\n");
509 return false;
510 }
511 MIRBuilder.buildStore(Val: SourceRegs[0], Addr: Ptr,
512 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI: FrameIdx),
513 Alignment);
514 Inst.addReg(RegNo: Ptr);
515 break;
516 }
517
518 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
519 OpInfo.ConstraintType == TargetLowering::C_Register) &&
520 "Unknown constraint type!");
521
522 if (OpInfo.isIndirect) {
523 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
524 "for constraint '"
525 << OpInfo.ConstraintCode << "'\n");
526 return false;
527 }
528
529 // Copy the input into the appropriate registers.
530 if (OpInfo.Regs.empty()) {
531 LLVM_DEBUG(
532 dbgs()
533 << "Couldn't allocate input register for register constraint\n");
534 return false;
535 }
536
537 unsigned NumRegs = OpInfo.Regs.size();
538 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
539 assert(NumRegs == SourceRegs.size() &&
540 "Expected the number of input registers to match the number of "
541 "source registers");
542
543 if (NumRegs > 1) {
544 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
545 "not supported yet\n");
546 return false;
547 }
548
549 InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs);
550 if (OpInfo.Regs.front().isVirtual()) {
551 // Put the register class of the virtual registers in the flag word.
552 const TargetRegisterClass *RC = MRI->getRegClass(Reg: OpInfo.Regs.front());
553 Flag.setRegClass(RC->getID());
554 }
555 Inst.addImm(Val: Flag);
556 if (!buildAnyextOrCopy(Dst: OpInfo.Regs[0], Src: SourceRegs[0], MIRBuilder))
557 return false;
558 Inst.addReg(RegNo: OpInfo.Regs[0]);
559 break;
560 }
561
562 case InlineAsm::isClobber: {
563
564 const unsigned NumRegs = OpInfo.Regs.size();
565 if (NumRegs > 0) {
566 unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs);
567 Inst.addImm(Val: Flag);
568
569 for (Register Reg : OpInfo.Regs) {
570 Inst.addReg(RegNo: Reg, Flags: RegState::Define | RegState::EarlyClobber |
571 getImplRegState(B: Reg.isPhysical()));
572 }
573 }
574 break;
575 }
576 }
577 }
578
579 if (auto Bundle = Call.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) {
580 auto *Token = Bundle->Inputs[0].get();
581 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
582 assert(SourceRegs.size() == 1 &&
583 "Expected the control token to fit into a single virtual register");
584 Inst.addUse(RegNo: SourceRegs[0], Flags: RegState::Implicit);
585 }
586
587 if (const MDNode *SrcLoc = Call.getMetadata(Kind: "srcloc"))
588 Inst.addMetadata(MD: SrcLoc);
589
590 // Add rounding control registers as implicit def for inline asm.
591 if (MF.getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) {
592 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
593 for (MCPhysReg Reg : RCRegs)
594 Inst.addReg(RegNo: Reg, Flags: RegState::ImplicitDefine);
595 }
596
597 // All inputs are handled, insert the instruction now
598 MIRBuilder.insertInstr(MIB: Inst);
599
600 // Finally, copy the output operands into the output registers
601 ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
602 if (ResRegs.size() != OutputOperands.size()) {
603 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
604 "number of destination registers\n");
605 return false;
606 }
607 for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
608 GISelAsmOperandInfo &OpInfo = OutputOperands[i];
609
610 if (OpInfo.Regs.empty())
611 continue;
612
613 switch (OpInfo.ConstraintType) {
614 case TargetLowering::C_Register:
615 case TargetLowering::C_RegisterClass: {
616 if (OpInfo.Regs.size() > 1) {
617 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
618 "registers are not supported yet\n");
619 return false;
620 }
621
622 Register SrcReg = OpInfo.Regs[0];
623 unsigned SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI);
624 LLT ResTy = MRI->getType(Reg: ResRegs[i]);
625 if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
626 // First copy the non-typed virtual register into a generic virtual
627 // register
628 Register Tmp1Reg =
629 MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: SrcSize));
630 MIRBuilder.buildCopy(Res: Tmp1Reg, Op: SrcReg);
631 // Need to truncate the result of the register
632 MIRBuilder.buildTrunc(Res: ResRegs[i], Op: Tmp1Reg);
633 } else if (ResTy.getSizeInBits() == SrcSize) {
634 MIRBuilder.buildCopy(Res: ResRegs[i], Op: SrcReg);
635 } else {
636 LLVM_DEBUG(dbgs() << "Unhandled output operand with "
637 "mismatched register size\n");
638 return false;
639 }
640
641 break;
642 }
643 case TargetLowering::C_Immediate:
644 case TargetLowering::C_Other:
645 LLVM_DEBUG(
646 dbgs() << "Cannot lower target specific output constraints yet\n");
647 return false;
648 case TargetLowering::C_Memory:
649 break; // Already handled.
650 case TargetLowering::C_Address:
651 break; // Silence warning.
652 case TargetLowering::C_Unknown:
653 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
654 return false;
655 }
656 }
657
658 return true;
659}
660
661bool InlineAsmLowering::lowerAsmOperandForConstraint(
662 Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
663 MachineIRBuilder &MIRBuilder) const {
664 if (Constraint.size() > 1)
665 return false;
666
667 char ConstraintLetter = Constraint[0];
668 switch (ConstraintLetter) {
669 default:
670 return false;
671 case 's': // Integer immediate not known at compile time
672 if (const auto *GV = dyn_cast<GlobalValue>(Val)) {
673 Ops.push_back(x: MachineOperand::CreateGA(GV, /*Offset=*/0));
674 return true;
675 }
676 return false;
677 case 'i': // Simple Integer or Relocatable Constant
678 if (const auto *GV = dyn_cast<GlobalValue>(Val)) {
679 Ops.push_back(x: MachineOperand::CreateGA(GV, /*Offset=*/0));
680 return true;
681 }
682 [[fallthrough]];
683 case 'n': // immediate integer with a known value.
684 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
685 assert(CI->getBitWidth() <= 64 &&
686 "expected immediate to fit into 64-bits");
687 // Boolean constants should be zero-extended, others are sign-extended
688 bool IsBool = CI->getBitWidth() == 1;
689 int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
690 Ops.push_back(x: MachineOperand::CreateImm(Val: ExtVal));
691 return true;
692 }
693 return false;
694 }
695}
696