1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the XCoreTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "XCoreISelLowering.h"
14#include "XCore.h"
15#include "XCoreMachineFunctionInfo.h"
16#include "XCoreSubtarget.h"
17#include "XCoreTargetMachine.h"
18#include "XCoreTargetObjectFile.h"
19#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/ValueTypes.h"
26#include "llvm/IR/CallingConv.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsXCore.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/KnownBits.h"
36#include "llvm/Support/raw_ostream.h"
37#include <algorithm>
38
39using namespace llvm;
40
41#define DEBUG_TYPE "xcore-lower"
42
43XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
44 const XCoreSubtarget &Subtarget)
45 : TargetLowering(TM, Subtarget), TM(TM), Subtarget(Subtarget) {
46
47 // Set up the register classes.
48 addRegisterClass(VT: MVT::i32, RC: &XCore::GRRegsRegClass);
49
50 // Compute derived properties from the register classes
51 computeRegisterProperties(TRI: Subtarget.getRegisterInfo());
52
53 setStackPointerRegisterToSaveRestore(XCore::SP);
54
55 setSchedulingPreference(Sched::Source);
56
57 // Use i32 for setcc operations results (slt, sgt, ...).
58 setBooleanContents(ZeroOrOneBooleanContent);
59 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
60
61 // XCore does not have the NodeTypes below.
62 setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Expand);
63 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Expand);
64
65 // 64bit
66 setOperationAction(Op: ISD::ADD, VT: MVT::i64, Action: Custom);
67 setOperationAction(Op: ISD::SUB, VT: MVT::i64, Action: Custom);
68 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i32, Action: Custom);
69 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i32, Action: Custom);
70 setOperationAction(Op: ISD::MULHS, VT: MVT::i32, Action: Expand);
71 setOperationAction(Op: ISD::MULHU, VT: MVT::i32, Action: Expand);
72 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Expand);
73 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Expand);
74 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Expand);
75
76 // Bit Manipulation
77 setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand);
78 setOperationAction(Op: ISD::ROTL , VT: MVT::i32, Action: Expand);
79 setOperationAction(Op: ISD::ROTR , VT: MVT::i32, Action: Expand);
80 setOperationAction(Op: ISD::BITREVERSE , VT: MVT::i32, Action: Legal);
81
82 setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal);
83
84 // Jump tables.
85 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom);
86
87 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom);
88 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32 , Action: Custom);
89
90 // Conversion of i64 -> double produces constantpool nodes
91 setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom);
92
93 // Loads
94 for (MVT VT : MVT::integer_valuetypes()) {
95 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
96 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
97 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
98
99 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i8, Action: Expand);
100 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i16, Action: Expand);
101 }
102
103 // Custom expand misaligned loads / stores.
104 setOperationAction(Op: ISD::LOAD, VT: MVT::i32, Action: Custom);
105 setOperationAction(Op: ISD::STORE, VT: MVT::i32, Action: Custom);
106
107 // Varargs
108 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
109 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
110 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Custom);
111 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
112
113 // Dynamic stack
114 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
115 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
116 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand);
117
118 // Exception handling
119 setOperationAction(Op: ISD::EH_RETURN, VT: MVT::Other, Action: Custom);
120 setOperationAction(Op: ISD::FRAME_TO_ARGS_OFFSET, VT: MVT::i32, Action: Custom);
121
122 setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom);
123
124 // TRAMPOLINE is custom lowered.
125 setOperationAction(Op: ISD::INIT_TRAMPOLINE, VT: MVT::Other, Action: Custom);
126 setOperationAction(Op: ISD::ADJUST_TRAMPOLINE, VT: MVT::Other, Action: Custom);
127
128 // We want to custom lower some of our intrinsics.
129 setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom);
130
131 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
132 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
133 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
134
135 // We have target-specific dag combine patterns for the following nodes:
136 setTargetDAGCombine(
137 {ISD::STORE, ISD::ADD, ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN});
138
139 setMinFunctionAlignment(Align(2));
140 setPrefFunctionAlignment(Align(4));
141
142 // This target doesn't implement native atomics.
143 setMaxAtomicSizeInBitsSupported(0);
144}
145
146bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
147 if (Val.getOpcode() != ISD::LOAD)
148 return false;
149
150 EVT VT1 = Val.getValueType();
151 if (!VT1.isSimple() || !VT1.isInteger() ||
152 !VT2.isSimple() || !VT2.isInteger())
153 return false;
154
155 switch (VT1.getSimpleVT().SimpleTy) {
156 default: break;
157 case MVT::i8:
158 return true;
159 }
160
161 return false;
162}
163
164SDValue XCoreTargetLowering::
165LowerOperation(SDValue Op, SelectionDAG &DAG) const {
166 switch (Op.getOpcode())
167 {
168 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
169 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
170 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
171 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
172 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
173 case ISD::LOAD: return LowerLOAD(Op, DAG);
174 case ISD::STORE: return LowerSTORE(Op, DAG);
175 case ISD::VAARG: return LowerVAARG(Op, DAG);
176 case ISD::VASTART: return LowerVASTART(Op, DAG);
177 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
178 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
179 // FIXME: Remove these when LegalizeDAGTypes lands.
180 case ISD::ADD:
181 case ISD::SUB: return ExpandADDSUB(Op: Op.getNode(), DAG);
182 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
183 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
184 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
185 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
186 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
187 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
188 case ISD::ATOMIC_FENCE:
189 return LowerATOMIC_FENCE(Op, DAG);
190 default:
191 llvm_unreachable("unimplemented operand");
192 }
193}
194
195/// ReplaceNodeResults - Replace the results of node with an illegal result
196/// type with new values built out of custom code.
197void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
198 SmallVectorImpl<SDValue>&Results,
199 SelectionDAG &DAG) const {
200 switch (N->getOpcode()) {
201 default:
202 llvm_unreachable("Don't know how to custom expand this!");
203 case ISD::ADD:
204 case ISD::SUB:
205 Results.push_back(Elt: ExpandADDSUB(Op: N, DAG));
206 return;
207 }
208}
209
210//===----------------------------------------------------------------------===//
211// Misc Lower Operation implementation
212//===----------------------------------------------------------------------===//
213
214SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
215 const GlobalValue *GV,
216 SelectionDAG &DAG) const {
217 // FIXME there is no actual debug info here
218 SDLoc dl(GA);
219
220 if (GV->getValueType()->isFunctionTy())
221 return DAG.getNode(Opcode: XCoreISD::PCRelativeWrapper, DL: dl, VT: MVT::i32, Operand: GA);
222
223 const auto *GVar = dyn_cast<GlobalVariable>(Val: GV);
224 if ((GV->hasSection() && GV->getSection().starts_with(Prefix: ".cp.")) ||
225 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
226 return DAG.getNode(Opcode: XCoreISD::CPRelativeWrapper, DL: dl, VT: MVT::i32, Operand: GA);
227
228 return DAG.getNode(Opcode: XCoreISD::DPRelativeWrapper, DL: dl, VT: MVT::i32, Operand: GA);
229}
230
231static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
232 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small)
233 return true;
234
235 Type *ObjType = GV->getValueType();
236 if (!ObjType->isSized())
237 return false;
238
239 auto &DL = GV->getDataLayout();
240 unsigned ObjSize = DL.getTypeAllocSize(Ty: ObjType);
241 return ObjSize < CodeModelLargeSize && ObjSize != 0;
242}
243
244SDValue XCoreTargetLowering::
245LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
246{
247 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Val&: Op);
248 const GlobalValue *GV = GN->getGlobal();
249 SDLoc DL(GN);
250 int64_t Offset = GN->getOffset();
251 if (IsSmallObject(GV, XTL: *this)) {
252 // We can only fold positive offsets that are a multiple of the word size.
253 int64_t FoldedOffset = std::max(a: Offset & ~3, b: (int64_t)0);
254 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, VT: MVT::i32, offset: FoldedOffset);
255 GA = getGlobalAddressWrapper(GA, GV, DAG);
256 // Handle the rest of the offset.
257 if (Offset != FoldedOffset) {
258 SDValue Remaining =
259 DAG.getSignedConstant(Val: Offset - FoldedOffset, DL, VT: MVT::i32);
260 GA = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: GA, N2: Remaining);
261 }
262 return GA;
263 } else {
264 // Ideally we would not fold in offset with an index <= 11.
265 Type *Ty = Type::getInt32Ty(C&: *DAG.getContext());
266 Constant *Idx = ConstantInt::get(Ty, V: Offset);
267 Constant *GAI = ConstantExpr::getGetElementPtr(
268 Ty: Type::getInt8Ty(C&: *DAG.getContext()), C: const_cast<GlobalValue *>(GV), Idx);
269 SDValue CP = DAG.getConstantPool(C: GAI, VT: MVT::i32);
270 return DAG.getLoad(VT: getPointerTy(DL: DAG.getDataLayout()), dl: DL,
271 Chain: DAG.getEntryNode(), Ptr: CP, PtrInfo: MachinePointerInfo());
272 }
273}
274
275SDValue XCoreTargetLowering::
276LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
277{
278 SDLoc DL(Op);
279 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
280 const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress();
281 SDValue Result = DAG.getTargetBlockAddress(BA, VT: PtrVT);
282
283 return DAG.getNode(Opcode: XCoreISD::PCRelativeWrapper, DL, VT: PtrVT, Operand: Result);
284}
285
286SDValue XCoreTargetLowering::
287LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
288{
289 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Val&: Op);
290 // FIXME there isn't really debug info here
291 SDLoc dl(CP);
292 EVT PtrVT = Op.getValueType();
293 SDValue Res;
294 if (CP->isMachineConstantPoolEntry()) {
295 Res = DAG.getTargetConstantPool(C: CP->getMachineCPVal(), VT: PtrVT,
296 Align: CP->getAlign(), Offset: CP->getOffset());
297 } else {
298 Res = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CP->getAlign(),
299 Offset: CP->getOffset());
300 }
301 return DAG.getNode(Opcode: XCoreISD::CPRelativeWrapper, DL: dl, VT: MVT::i32, Operand: Res);
302}
303
304unsigned XCoreTargetLowering::getJumpTableEncoding() const {
305 return MachineJumpTableInfo::EK_Inline;
306}
307
308SDValue XCoreTargetLowering::
309LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
310{
311 SDValue Chain = Op.getOperand(i: 0);
312 SDValue Table = Op.getOperand(i: 1);
313 SDValue Index = Op.getOperand(i: 2);
314 SDLoc dl(Op);
315 JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Table);
316 unsigned JTI = JT->getIndex();
317 MachineFunction &MF = DAG.getMachineFunction();
318 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
319 SDValue TargetJT = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: MVT::i32);
320
321 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
322 if (NumEntries <= 32) {
323 return DAG.getNode(Opcode: XCoreISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: TargetJT, N3: Index);
324 }
325 assert((NumEntries >> 31) == 0);
326 SDValue ScaledIndex = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: Index,
327 N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
328 return DAG.getNode(Opcode: XCoreISD::BR_JT32, DL: dl, VT: MVT::Other, N1: Chain, N2: TargetJT,
329 N3: ScaledIndex);
330}
331
332SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
333 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
334 SelectionDAG &DAG) const {
335 auto PtrVT = getPointerTy(DL: DAG.getDataLayout());
336 if ((Offset & 0x3) == 0) {
337 return DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: Base, PtrInfo: MachinePointerInfo());
338 }
339 // Lower to pair of consecutive word aligned loads plus some bit shifting.
340 int32_t HighOffset = alignTo(Value: Offset, Align: 4);
341 int32_t LowOffset = HighOffset - 4;
342 SDValue LowAddr, HighAddr;
343 if (GlobalAddressSDNode *GASD =
344 dyn_cast<GlobalAddressSDNode>(Val: Base.getNode())) {
345 LowAddr = DAG.getGlobalAddress(GV: GASD->getGlobal(), DL, VT: Base.getValueType(),
346 offset: LowOffset);
347 HighAddr = DAG.getGlobalAddress(GV: GASD->getGlobal(), DL, VT: Base.getValueType(),
348 offset: HighOffset);
349 } else {
350 LowAddr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: Base,
351 N2: DAG.getConstant(Val: LowOffset, DL, VT: MVT::i32));
352 HighAddr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: Base,
353 N2: DAG.getConstant(Val: HighOffset, DL, VT: MVT::i32));
354 }
355 SDValue LowShift = DAG.getConstant(Val: (Offset - LowOffset) * 8, DL, VT: MVT::i32);
356 SDValue HighShift = DAG.getConstant(Val: (HighOffset - Offset) * 8, DL, VT: MVT::i32);
357
358 SDValue Low = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: LowAddr, PtrInfo: MachinePointerInfo());
359 SDValue High = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: HighAddr, PtrInfo: MachinePointerInfo());
360 SDValue LowShifted = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Low, N2: LowShift);
361 SDValue HighShifted = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: High, N2: HighShift);
362 SDValue Result = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: LowShifted, N2: HighShifted);
363 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, N1: Low.getValue(R: 1),
364 N2: High.getValue(R: 1));
365 SDValue Ops[] = { Result, Chain };
366 return DAG.getMergeValues(Ops, dl: DL);
367}
368
369static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
370{
371 KnownBits Known = DAG.computeKnownBits(Op: Value);
372 return Known.countMinTrailingZeros() >= 2;
373}
374
375SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
376 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
377 LLVMContext &Context = *DAG.getContext();
378 LoadSDNode *LD = cast<LoadSDNode>(Val&: Op);
379 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
380 "Unexpected extension type");
381 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
382
383 if (allowsMemoryAccessForAlignment(Context, DL: DAG.getDataLayout(),
384 VT: LD->getMemoryVT(), MMO: *LD->getMemOperand()))
385 return SDValue();
386
387 SDValue Chain = LD->getChain();
388 SDValue BasePtr = LD->getBasePtr();
389 SDLoc DL(Op);
390
391 if (!LD->isVolatile()) {
392 const GlobalValue *GV;
393 int64_t Offset = 0;
394 if (DAG.isBaseWithConstantOffset(Op: BasePtr) &&
395 isWordAligned(Value: BasePtr->getOperand(Num: 0), DAG)) {
396 SDValue NewBasePtr = BasePtr->getOperand(Num: 0);
397 Offset = cast<ConstantSDNode>(Val: BasePtr->getOperand(Num: 1))->getSExtValue();
398 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, Base: NewBasePtr,
399 Offset, DAG);
400 }
401 if (TLI.isGAPlusOffset(N: BasePtr.getNode(), GA&: GV, Offset) &&
402 GV->getPointerAlignment(DL: DAG.getDataLayout()) >= 4) {
403 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
404 VT: BasePtr->getValueType(ResNo: 0));
405 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, Base: NewBasePtr,
406 Offset, DAG);
407 }
408 }
409
410 if (LD->getAlign() == Align(2)) {
411 SDValue Low = DAG.getExtLoad(ExtType: ISD::ZEXTLOAD, dl: DL, VT: MVT::i32, Chain, Ptr: BasePtr,
412 PtrInfo: LD->getPointerInfo(), MemVT: MVT::i16, Alignment: Align(2),
413 MMOFlags: LD->getMemOperand()->getFlags());
414 SDValue HighAddr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr,
415 N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32));
416 SDValue High =
417 DAG.getExtLoad(ExtType: ISD::EXTLOAD, dl: DL, VT: MVT::i32, Chain, Ptr: HighAddr,
418 PtrInfo: LD->getPointerInfo().getWithOffset(O: 2), MemVT: MVT::i16,
419 Alignment: Align(2), MMOFlags: LD->getMemOperand()->getFlags());
420 SDValue HighShifted = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: High,
421 N2: DAG.getConstant(Val: 16, DL, VT: MVT::i32));
422 SDValue Result = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Low, N2: HighShifted);
423 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, N1: Low.getValue(R: 1),
424 N2: High.getValue(R: 1));
425 SDValue Ops[] = { Result, Chain };
426 return DAG.getMergeValues(Ops, dl: DL);
427 }
428
429 // Lower to a call to __misaligned_load(BasePtr).
430 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(C&: Context);
431 TargetLowering::ArgListTy Args;
432 Args.emplace_back(args&: BasePtr, args&: IntPtrTy);
433
434 TargetLowering::CallLoweringInfo CLI(DAG);
435 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
436 CC: CallingConv::C, ResultType: IntPtrTy,
437 Target: DAG.getExternalSymbol(Sym: "__misaligned_load",
438 VT: getPointerTy(DL: DAG.getDataLayout())),
439 ArgsList: std::move(Args));
440
441 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
442 SDValue Ops[] = { CallResult.first, CallResult.second };
443 return DAG.getMergeValues(Ops, dl: DL);
444}
445
446SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
447 LLVMContext &Context = *DAG.getContext();
448 StoreSDNode *ST = cast<StoreSDNode>(Val&: Op);
449 assert(!ST->isTruncatingStore() && "Unexpected store type");
450 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
451
452 if (allowsMemoryAccessForAlignment(Context, DL: DAG.getDataLayout(),
453 VT: ST->getMemoryVT(), MMO: *ST->getMemOperand()))
454 return SDValue();
455
456 SDValue Chain = ST->getChain();
457 SDValue BasePtr = ST->getBasePtr();
458 SDValue Value = ST->getValue();
459 SDLoc dl(Op);
460
461 if (ST->getAlign() == Align(2)) {
462 SDValue Low = Value;
463 SDValue High = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: Value,
464 N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32));
465 SDValue StoreLow =
466 DAG.getTruncStore(Chain, dl, Val: Low, Ptr: BasePtr, PtrInfo: ST->getPointerInfo(),
467 SVT: MVT::i16, Alignment: Align(2), MMOFlags: ST->getMemOperand()->getFlags());
468 SDValue HighAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: BasePtr,
469 N2: DAG.getConstant(Val: 2, DL: dl, VT: MVT::i32));
470 SDValue StoreHigh = DAG.getTruncStore(
471 Chain, dl, Val: High, Ptr: HighAddr, PtrInfo: ST->getPointerInfo().getWithOffset(O: 2),
472 SVT: MVT::i16, Alignment: Align(2), MMOFlags: ST->getMemOperand()->getFlags());
473 return DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, N1: StoreLow, N2: StoreHigh);
474 }
475
476 // Lower to a call to __misaligned_store(BasePtr, Value).
477 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(C&: Context);
478 TargetLowering::ArgListTy Args;
479 Args.emplace_back(args&: BasePtr, args&: IntPtrTy);
480 Args.emplace_back(args&: Value, args&: IntPtrTy);
481
482 TargetLowering::CallLoweringInfo CLI(DAG);
483 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
484 CC: CallingConv::C, ResultType: Type::getVoidTy(C&: Context),
485 Target: DAG.getExternalSymbol(Sym: "__misaligned_store",
486 VT: getPointerTy(DL: DAG.getDataLayout())),
487 ArgsList: std::move(Args));
488
489 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
490 return CallResult.second;
491}
492
493SDValue XCoreTargetLowering::
494LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
495{
496 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
497 "Unexpected operand to lower!");
498 SDLoc dl(Op);
499 SDValue LHS = Op.getOperand(i: 0);
500 SDValue RHS = Op.getOperand(i: 1);
501 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32);
502 SDValue Hi = DAG.getNode(Opcode: XCoreISD::MACCS, DL: dl,
503 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Zero, N2: Zero,
504 N3: LHS, N4: RHS);
505 SDValue Lo(Hi.getNode(), 1);
506 SDValue Ops[] = { Lo, Hi };
507 return DAG.getMergeValues(Ops, dl);
508}
509
510SDValue XCoreTargetLowering::
511LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
512{
513 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
514 "Unexpected operand to lower!");
515 SDLoc dl(Op);
516 SDValue LHS = Op.getOperand(i: 0);
517 SDValue RHS = Op.getOperand(i: 1);
518 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32);
519 SDValue Hi = DAG.getNode(Opcode: XCoreISD::LMUL, DL: dl,
520 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: LHS, N2: RHS,
521 N3: Zero, N4: Zero);
522 SDValue Lo(Hi.getNode(), 1);
523 SDValue Ops[] = { Lo, Hi };
524 return DAG.getMergeValues(Ops, dl);
525}
526
527/// isADDADDMUL - Return whether Op is in a form that is equivalent to
528/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
529/// each intermediate result in the calculation must also have a single use.
530/// If the Op is in the correct form the constituent parts are written to Mul0,
531/// Mul1, Addend0 and Addend1.
532static bool
533isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
534 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
535{
536 if (Op.getOpcode() != ISD::ADD)
537 return false;
538 SDValue N0 = Op.getOperand(i: 0);
539 SDValue N1 = Op.getOperand(i: 1);
540 SDValue AddOp;
541 SDValue OtherOp;
542 if (N0.getOpcode() == ISD::ADD) {
543 AddOp = N0;
544 OtherOp = N1;
545 } else if (N1.getOpcode() == ISD::ADD) {
546 AddOp = N1;
547 OtherOp = N0;
548 } else {
549 return false;
550 }
551 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
552 return false;
553 if (OtherOp.getOpcode() == ISD::MUL) {
554 // add(add(a,b),mul(x,y))
555 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
556 return false;
557 Mul0 = OtherOp.getOperand(i: 0);
558 Mul1 = OtherOp.getOperand(i: 1);
559 Addend0 = AddOp.getOperand(i: 0);
560 Addend1 = AddOp.getOperand(i: 1);
561 return true;
562 }
563 if (AddOp.getOperand(i: 0).getOpcode() == ISD::MUL) {
564 // add(add(mul(x,y),a),b)
565 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(i: 0).hasOneUse())
566 return false;
567 Mul0 = AddOp.getOperand(i: 0).getOperand(i: 0);
568 Mul1 = AddOp.getOperand(i: 0).getOperand(i: 1);
569 Addend0 = AddOp.getOperand(i: 1);
570 Addend1 = OtherOp;
571 return true;
572 }
573 if (AddOp.getOperand(i: 1).getOpcode() == ISD::MUL) {
574 // add(add(a,mul(x,y)),b)
575 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(i: 1).hasOneUse())
576 return false;
577 Mul0 = AddOp.getOperand(i: 1).getOperand(i: 0);
578 Mul1 = AddOp.getOperand(i: 1).getOperand(i: 1);
579 Addend0 = AddOp.getOperand(i: 0);
580 Addend1 = OtherOp;
581 return true;
582 }
583 return false;
584}
585
586SDValue XCoreTargetLowering::
587TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
588{
589 SDValue Mul;
590 SDValue Other;
591 if (N->getOperand(Num: 0).getOpcode() == ISD::MUL) {
592 Mul = N->getOperand(Num: 0);
593 Other = N->getOperand(Num: 1);
594 } else if (N->getOperand(Num: 1).getOpcode() == ISD::MUL) {
595 Mul = N->getOperand(Num: 1);
596 Other = N->getOperand(Num: 0);
597 } else {
598 return SDValue();
599 }
600 SDLoc dl(N);
601 SDValue LL, RL, AddendL, AddendH;
602 LL = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
603 N1: Mul.getOperand(i: 0), N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
604 RL = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
605 N1: Mul.getOperand(i: 1), N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
606 AddendL = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
607 N1: Other, N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
608 AddendH = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
609 N1: Other, N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
610 APInt HighMask = APInt::getHighBitsSet(numBits: 64, hiBitsSet: 32);
611 unsigned LHSSB = DAG.ComputeNumSignBits(Op: Mul.getOperand(i: 0));
612 unsigned RHSSB = DAG.ComputeNumSignBits(Op: Mul.getOperand(i: 1));
613 if (DAG.MaskedValueIsZero(Op: Mul.getOperand(i: 0), Mask: HighMask) &&
614 DAG.MaskedValueIsZero(Op: Mul.getOperand(i: 1), Mask: HighMask)) {
615 // The inputs are both zero-extended.
616 SDValue Hi = DAG.getNode(Opcode: XCoreISD::MACCU, DL: dl,
617 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: AddendH,
618 N2: AddendL, N3: LL, N4: RL);
619 SDValue Lo(Hi.getNode(), 1);
620 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi);
621 }
622 if (LHSSB > 32 && RHSSB > 32) {
623 // The inputs are both sign-extended.
624 SDValue Hi = DAG.getNode(Opcode: XCoreISD::MACCS, DL: dl,
625 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: AddendH,
626 N2: AddendL, N3: LL, N4: RL);
627 SDValue Lo(Hi.getNode(), 1);
628 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi);
629 }
630 SDValue LH, RH;
631 LH = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
632 N1: Mul.getOperand(i: 0), N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
633 RH = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
634 N1: Mul.getOperand(i: 1), N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
635 SDValue Hi = DAG.getNode(Opcode: XCoreISD::MACCU, DL: dl,
636 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: AddendH,
637 N2: AddendL, N3: LL, N4: RL);
638 SDValue Lo(Hi.getNode(), 1);
639 RH = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: LL, N2: RH);
640 LH = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: MVT::i32, N1: LH, N2: RL);
641 Hi = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Hi, N2: RH);
642 Hi = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Hi, N2: LH);
643 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi);
644}
645
646SDValue XCoreTargetLowering::
647ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
648{
649 assert(N->getValueType(0) == MVT::i64 &&
650 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
651 "Unknown operand to lower!");
652
653 if (N->getOpcode() == ISD::ADD)
654 if (SDValue Result = TryExpandADDWithMul(N, DAG))
655 return Result;
656
657 SDLoc dl(N);
658
659 // Extract components
660 SDValue LHSL = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
661 N1: N->getOperand(Num: 0),
662 N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
663 SDValue LHSH = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
664 N1: N->getOperand(Num: 0),
665 N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
666 SDValue RHSL = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
667 N1: N->getOperand(Num: 1),
668 N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
669 SDValue RHSH = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
670 N1: N->getOperand(Num: 1),
671 N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32));
672
673 // Expand
674 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
675 XCoreISD::LSUB;
676 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32);
677 SDValue Lo = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32),
678 N1: LHSL, N2: RHSL, N3: Zero);
679 SDValue Carry(Lo.getNode(), 1);
680
681 SDValue Hi = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32),
682 N1: LHSH, N2: RHSH, N3: Carry);
683 SDValue Ignored(Hi.getNode(), 1);
684 // Merge the pieces
685 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi);
686}
687
688SDValue XCoreTargetLowering::
689LowerVAARG(SDValue Op, SelectionDAG &DAG) const
690{
691 // Whist llvm does not support aggregate varargs we can ignore
692 // the possibility of the ValueType being an implicit byVal vararg.
693 SDNode *Node = Op.getNode();
694 EVT VT = Node->getValueType(ResNo: 0); // not an aggregate
695 SDValue InChain = Node->getOperand(Num: 0);
696 SDValue VAListPtr = Node->getOperand(Num: 1);
697 EVT PtrVT = VAListPtr.getValueType();
698 const Value *SV = cast<SrcValueSDNode>(Val: Node->getOperand(Num: 2))->getValue();
699 SDLoc dl(Node);
700 SDValue VAList =
701 DAG.getLoad(VT: PtrVT, dl, Chain: InChain, Ptr: VAListPtr, PtrInfo: MachinePointerInfo(SV));
702 // Increment the pointer, VAList, to the next vararg
703 SDValue nextPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: VAList,
704 N2: DAG.getIntPtrConstant(Val: VT.getSizeInBits() / 8,
705 DL: dl));
706 // Store the incremented VAList to the legalized pointer
707 InChain = DAG.getStore(Chain: VAList.getValue(R: 1), dl, Val: nextPtr, Ptr: VAListPtr,
708 PtrInfo: MachinePointerInfo(SV));
709 // Load the actual argument out of the pointer VAList
710 return DAG.getLoad(VT, dl, Chain: InChain, Ptr: VAList, PtrInfo: MachinePointerInfo());
711}
712
713SDValue XCoreTargetLowering::
714LowerVASTART(SDValue Op, SelectionDAG &DAG) const
715{
716 SDLoc dl(Op);
717 // vastart stores the address of the VarArgsFrameIndex slot into the
718 // memory location argument
719 MachineFunction &MF = DAG.getMachineFunction();
720 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
721 SDValue Addr = DAG.getFrameIndex(FI: XFI->getVarArgsFrameIndex(), VT: MVT::i32);
722 return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: Addr, Ptr: Op.getOperand(i: 1),
723 PtrInfo: MachinePointerInfo());
724}
725
726SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
727 SelectionDAG &DAG) const {
728 // This nodes represent llvm.frameaddress on the DAG.
729 // It takes one operand, the index of the frame address to return.
730 // An index of zero corresponds to the current function's frame address.
731 // An index of one to the parent's frame address, and so on.
732 // Depths > 0 not supported yet!
733 if (Op.getConstantOperandVal(i: 0) > 0)
734 return SDValue();
735
736 MachineFunction &MF = DAG.getMachineFunction();
737 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
738 return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: SDLoc(Op),
739 Reg: RegInfo->getFrameRegister(MF), VT: MVT::i32);
740}
741
742SDValue XCoreTargetLowering::
743LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
744 // This nodes represent llvm.returnaddress on the DAG.
745 // It takes one operand, the index of the return address to return.
746 // An index of zero corresponds to the current function's return address.
747 // An index of one to the parent's return address, and so on.
748 // Depths > 0 not supported yet!
749 if (Op.getConstantOperandVal(i: 0) > 0)
750 return SDValue();
751
752 MachineFunction &MF = DAG.getMachineFunction();
753 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
754 int FI = XFI->createLRSpillSlot(MF);
755 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
756 return DAG.getLoad(VT: getPointerTy(DL: DAG.getDataLayout()), dl: SDLoc(Op),
757 Chain: DAG.getEntryNode(), Ptr: FIN,
758 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI));
759}
760
761SDValue XCoreTargetLowering::
762LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
763 // This node represents offset from frame pointer to first on-stack argument.
764 // This is needed for correct stack adjustment during unwind.
765 // However, we don't know the offset until after the frame has be finalised.
766 // This is done during the XCoreFTAOElim pass.
767 return DAG.getNode(Opcode: XCoreISD::FRAME_TO_ARGS_OFFSET, DL: SDLoc(Op), VT: MVT::i32);
768}
769
770SDValue XCoreTargetLowering::
771LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
772 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
773 // This node represents 'eh_return' gcc dwarf builtin, which is used to
774 // return from exception. The general meaning is: adjust stack by OFFSET and
775 // pass execution to HANDLER.
776 MachineFunction &MF = DAG.getMachineFunction();
777 SDValue Chain = Op.getOperand(i: 0);
778 SDValue Offset = Op.getOperand(i: 1);
779 SDValue Handler = Op.getOperand(i: 2);
780 SDLoc dl(Op);
781
782 // Absolute SP = (FP + FrameToArgs) + Offset
783 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
784 SDValue Stack = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl,
785 Reg: RegInfo->getFrameRegister(MF), VT: MVT::i32);
786 SDValue FrameToArgs = DAG.getNode(Opcode: XCoreISD::FRAME_TO_ARGS_OFFSET, DL: dl,
787 VT: MVT::i32);
788 Stack = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Stack, N2: FrameToArgs);
789 Stack = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Stack, N2: Offset);
790
791 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
792 // which leaves 2 caller saved registers, R2 & R3 for us to use.
793 unsigned StackReg = XCore::R2;
794 unsigned HandlerReg = XCore::R3;
795
796 SDValue OutChains[] = {
797 DAG.getCopyToReg(Chain, dl, Reg: StackReg, N: Stack),
798 DAG.getCopyToReg(Chain, dl, Reg: HandlerReg, N: Handler)
799 };
800
801 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: OutChains);
802
803 return DAG.getNode(Opcode: XCoreISD::EH_RETURN, DL: dl, VT: MVT::Other, N1: Chain,
804 N2: DAG.getRegister(Reg: StackReg, VT: MVT::i32),
805 N3: DAG.getRegister(Reg: HandlerReg, VT: MVT::i32));
806
807}
808
809SDValue XCoreTargetLowering::
810LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
811 return Op.getOperand(i: 0);
812}
813
814SDValue XCoreTargetLowering::
815LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
816 SDValue Chain = Op.getOperand(i: 0);
817 SDValue Trmp = Op.getOperand(i: 1); // trampoline
818 SDValue FPtr = Op.getOperand(i: 2); // nested function
819 SDValue Nest = Op.getOperand(i: 3); // 'nest' parameter value
820
821 const Value *TrmpAddr = cast<SrcValueSDNode>(Val: Op.getOperand(i: 4))->getValue();
822
823 // .align 4
824 // LDAPF_u10 r11, nest
825 // LDW_2rus r11, r11[0]
826 // STWSP_ru6 r11, sp[0]
827 // LDAPF_u10 r11, fptr
828 // LDW_2rus r11, r11[0]
829 // BAU_1r r11
830 // nest:
831 // .word nest
832 // fptr:
833 // .word fptr
834 SDValue OutChains[5];
835
836 SDValue Addr = Trmp;
837
838 SDLoc dl(Op);
839 OutChains[0] =
840 DAG.getStore(Chain, dl, Val: DAG.getConstant(Val: 0x0a3cd805, DL: dl, VT: MVT::i32), Ptr: Addr,
841 PtrInfo: MachinePointerInfo(TrmpAddr));
842
843 Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Trmp,
844 N2: DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32));
845 OutChains[1] =
846 DAG.getStore(Chain, dl, Val: DAG.getConstant(Val: 0xd80456c0, DL: dl, VT: MVT::i32), Ptr: Addr,
847 PtrInfo: MachinePointerInfo(TrmpAddr, 4));
848
849 Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Trmp,
850 N2: DAG.getConstant(Val: 8, DL: dl, VT: MVT::i32));
851 OutChains[2] =
852 DAG.getStore(Chain, dl, Val: DAG.getConstant(Val: 0x27fb0a3c, DL: dl, VT: MVT::i32), Ptr: Addr,
853 PtrInfo: MachinePointerInfo(TrmpAddr, 8));
854
855 Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Trmp,
856 N2: DAG.getConstant(Val: 12, DL: dl, VT: MVT::i32));
857 OutChains[3] =
858 DAG.getStore(Chain, dl, Val: Nest, Ptr: Addr, PtrInfo: MachinePointerInfo(TrmpAddr, 12));
859
860 Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: Trmp,
861 N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32));
862 OutChains[4] =
863 DAG.getStore(Chain, dl, Val: FPtr, Ptr: Addr, PtrInfo: MachinePointerInfo(TrmpAddr, 16));
864
865 return DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: OutChains);
866}
867
868SDValue XCoreTargetLowering::
869LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
870 SDLoc DL(Op);
871 unsigned IntNo = Op.getConstantOperandVal(i: 0);
872 switch (IntNo) {
873 case Intrinsic::xcore_crc8:
874 EVT VT = Op.getValueType();
875 SDValue Data =
876 DAG.getNode(Opcode: XCoreISD::CRC8, DL, VTList: DAG.getVTList(VT1: VT, VT2: VT),
877 N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2) , N3: Op.getOperand(i: 3));
878 SDValue Crc(Data.getNode(), 1);
879 SDValue Results[] = { Crc, Data };
880 return DAG.getMergeValues(Ops: Results, dl: DL);
881 }
882 return SDValue();
883}
884
885SDValue XCoreTargetLowering::
886LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
887 SDLoc DL(Op);
888 return DAG.getNode(Opcode: ISD::MEMBARRIER, DL, VT: MVT::Other, Operand: Op.getOperand(i: 0));
889}
890
891//===----------------------------------------------------------------------===//
892// Calling Convention Implementation
893//===----------------------------------------------------------------------===//
894
895#include "XCoreGenCallingConv.inc"
896
897//===----------------------------------------------------------------------===//
898// Call Calling Convention Implementation
899//===----------------------------------------------------------------------===//
900
901/// XCore call implementation
902SDValue
903XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
904 SmallVectorImpl<SDValue> &InVals) const {
905 SelectionDAG &DAG = CLI.DAG;
906 SDLoc &dl = CLI.DL;
907 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
908 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
909 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
910 SDValue Chain = CLI.Chain;
911 SDValue Callee = CLI.Callee;
912 bool &isTailCall = CLI.IsTailCall;
913 CallingConv::ID CallConv = CLI.CallConv;
914 bool isVarArg = CLI.IsVarArg;
915
916 // XCore target does not yet support tail call optimization.
917 isTailCall = false;
918
919 // For now, only CallingConv::C implemented
920 switch (CallConv)
921 {
922 default:
923 report_fatal_error(reason: "Unsupported calling convention");
924 case CallingConv::Fast:
925 case CallingConv::C:
926 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
927 Outs, OutVals, Ins, dl, DAG, InVals);
928 }
929}
930
931/// LowerCallResult - Lower the result values of a call into the
932/// appropriate copies out of appropriate physical registers / memory locations.
933static SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
934 const SmallVectorImpl<CCValAssign> &RVLocs,
935 const SDLoc &dl, SelectionDAG &DAG,
936 SmallVectorImpl<SDValue> &InVals) {
937 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
938 // Copy results out of physical registers.
939 for (const CCValAssign &VA : RVLocs) {
940 if (VA.isRegLoc()) {
941 Chain = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: VA.getValVT(),
942 Glue: InGlue).getValue(R: 1);
943 InGlue = Chain.getValue(R: 2);
944 InVals.push_back(Elt: Chain.getValue(R: 0));
945 } else {
946 assert(VA.isMemLoc());
947 ResultMemLocs.push_back(Elt: std::make_pair(x: VA.getLocMemOffset(),
948 y: InVals.size()));
949 // Reserve space for this result.
950 InVals.push_back(Elt: SDValue());
951 }
952 }
953
954 // Copy results out of memory.
955 SmallVector<SDValue, 4> MemOpChains;
956 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
957 int offset = ResultMemLocs[i].first;
958 unsigned index = ResultMemLocs[i].second;
959 SDVTList VTs = DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other);
960 SDValue Ops[] = { Chain, DAG.getConstant(Val: offset / 4, DL: dl, VT: MVT::i32) };
961 SDValue load = DAG.getNode(Opcode: XCoreISD::LDWSP, DL: dl, VTList: VTs, Ops);
962 InVals[index] = load;
963 MemOpChains.push_back(Elt: load.getValue(R: 1));
964 }
965
966 // Transform all loads nodes into one single node because
967 // all load nodes are independent of each other.
968 if (!MemOpChains.empty())
969 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains);
970
971 return Chain;
972}
973
974/// LowerCCCCallTo - functions arguments are copied from virtual
975/// regs to (physical regs)/(stack frame), CALLSEQ_START and
976/// CALLSEQ_END are emitted.
977/// TODO: isTailCall, sret.
978SDValue XCoreTargetLowering::LowerCCCCallTo(
979 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
980 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
981 const SmallVectorImpl<SDValue> &OutVals,
982 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
983 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
984
985 // Analyze operands of the call, assigning locations to each operand.
986 SmallVector<CCValAssign, 16> ArgLocs;
987 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
988 *DAG.getContext());
989
990 // The ABI dictates there should be one stack slot available to the callee
991 // on function entry (for saving lr).
992 CCInfo.AllocateStack(Size: 4, Alignment: Align(4));
993
994 CCInfo.AnalyzeCallOperands(Outs, Fn: CC_XCore);
995
996 SmallVector<CCValAssign, 16> RVLocs;
997 // Analyze return values to determine the number of bytes of stack required.
998 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
999 *DAG.getContext());
1000 RetCCInfo.AllocateStack(Size: CCInfo.getStackSize(), Alignment: Align(4));
1001 RetCCInfo.AnalyzeCallResult(Ins, Fn: RetCC_XCore);
1002
1003 // Get a count of how many bytes are to be pushed on the stack.
1004 unsigned NumBytes = RetCCInfo.getStackSize();
1005
1006 Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: dl);
1007
1008 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
1009 SmallVector<SDValue, 12> MemOpChains;
1010
1011 // Walk the register/memloc assignments, inserting copies/loads.
1012 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1013 CCValAssign &VA = ArgLocs[i];
1014 SDValue Arg = OutVals[i];
1015
1016 // Promote the value if needed.
1017 switch (VA.getLocInfo()) {
1018 default: llvm_unreachable("Unknown loc info!");
1019 case CCValAssign::Full: break;
1020 case CCValAssign::SExt:
1021 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
1022 break;
1023 case CCValAssign::ZExt:
1024 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
1025 break;
1026 case CCValAssign::AExt:
1027 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
1028 break;
1029 }
1030
1031 // Arguments that can be passed on register must be kept at
1032 // RegsToPass vector
1033 if (VA.isRegLoc()) {
1034 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
1035 } else {
1036 assert(VA.isMemLoc());
1037
1038 int Offset = VA.getLocMemOffset();
1039
1040 MemOpChains.push_back(Elt: DAG.getNode(Opcode: XCoreISD::STWSP, DL: dl, VT: MVT::Other,
1041 N1: Chain, N2: Arg,
1042 N3: DAG.getConstant(Val: Offset/4, DL: dl,
1043 VT: MVT::i32)));
1044 }
1045 }
1046
1047 // Transform all store nodes into one single node because
1048 // all store nodes are independent of each other.
1049 if (!MemOpChains.empty())
1050 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains);
1051
1052 // Build a sequence of copy-to-reg nodes chained together with token
1053 // chain and flag operands which copy the outgoing args into registers.
1054 // The InGlue in necessary since all emitted instructions must be
1055 // stuck together.
1056 SDValue InGlue;
1057 for (const auto &[Reg, N] : RegsToPass) {
1058 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, Glue: InGlue);
1059 InGlue = Chain.getValue(R: 1);
1060 }
1061
1062 // If the callee is a GlobalAddress node (quite common, every direct call is)
1063 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1064 // Likewise ExternalSymbol -> TargetExternalSymbol.
1065 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee))
1066 Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL: dl, VT: MVT::i32);
1067 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Val&: Callee))
1068 Callee = DAG.getTargetExternalSymbol(Sym: E->getSymbol(), VT: MVT::i32);
1069
1070 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1071 // = Chain, Callee, Reg#1, Reg#2, ...
1072 //
1073 // Returns a chain & a flag for retval copy to use.
1074 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
1075 SmallVector<SDValue, 8> Ops;
1076 Ops.push_back(Elt: Chain);
1077 Ops.push_back(Elt: Callee);
1078
1079 // Add argument registers to the end of the list so that they are
1080 // known live into the call.
1081 for (const auto &[Reg, N] : RegsToPass)
1082 Ops.push_back(Elt: DAG.getRegister(Reg, VT: N.getValueType()));
1083
1084 if (InGlue.getNode())
1085 Ops.push_back(Elt: InGlue);
1086
1087 Chain = DAG.getNode(Opcode: XCoreISD::BL, DL: dl, VTList: NodeTys, Ops);
1088 InGlue = Chain.getValue(R: 1);
1089
1090 // Create the CALLSEQ_END node.
1091 Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue: InGlue, DL: dl);
1092 InGlue = Chain.getValue(R: 1);
1093
1094 // Handle result values, copying them out of physregs into vregs that we
1095 // return.
1096 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);
1097}
1098
1099//===----------------------------------------------------------------------===//
1100// Formal Arguments Calling Convention Implementation
1101//===----------------------------------------------------------------------===//
1102
1103namespace {
1104 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1105}
1106
1107/// XCore formal arguments implementation
1108SDValue XCoreTargetLowering::LowerFormalArguments(
1109 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1110 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1111 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1112 switch (CallConv)
1113 {
1114 default:
1115 report_fatal_error(reason: "Unsupported calling convention");
1116 case CallingConv::C:
1117 case CallingConv::Fast:
1118 return LowerCCCArguments(Chain, CallConv, isVarArg,
1119 Ins, dl, DAG, InVals);
1120 }
1121}
1122
1123/// LowerCCCArguments - transform physical registers into
1124/// virtual registers and generate load operations for
1125/// arguments places on the stack.
1126/// TODO: sret
1127SDValue XCoreTargetLowering::LowerCCCArguments(
1128 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1129 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1130 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1131 MachineFunction &MF = DAG.getMachineFunction();
1132 MachineFrameInfo &MFI = MF.getFrameInfo();
1133 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1134 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1135
1136 // Assign locations to all of the incoming arguments.
1137 SmallVector<CCValAssign, 16> ArgLocs;
1138 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1139 *DAG.getContext());
1140
1141 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_XCore);
1142
1143 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1144
1145 unsigned LRSaveSize = StackSlotSize;
1146
1147 if (!isVarArg)
1148 XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize);
1149
1150 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1151 // scheduler clobbering a register before it has been copied.
1152 // The stages are:
1153 // 1. CopyFromReg (and load) arg & vararg registers.
1154 // 2. Chain CopyFromReg nodes into a TokenFactor.
1155 // 3. Memcpy 'byVal' args & push final InVals.
1156 // 4. Chain mem ops nodes into a TokenFactor.
1157 SmallVector<SDValue, 4> CFRegNode;
1158 SmallVector<ArgDataPair, 4> ArgData;
1159 SmallVector<SDValue, 4> MemOps;
1160
1161 // 1a. CopyFromReg (and load) arg registers.
1162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1163
1164 CCValAssign &VA = ArgLocs[i];
1165 SDValue ArgIn;
1166
1167 if (VA.isRegLoc()) {
1168 // Arguments passed in registers
1169 EVT RegVT = VA.getLocVT();
1170 switch (RegVT.getSimpleVT().SimpleTy) {
1171 default:
1172 {
1173#ifndef NDEBUG
1174 errs() << "LowerFormalArguments Unhandled argument type: "
1175 << RegVT << "\n";
1176#endif
1177 llvm_unreachable(nullptr);
1178 }
1179 case MVT::i32:
1180 Register VReg = RegInfo.createVirtualRegister(RegClass: &XCore::GRRegsRegClass);
1181 RegInfo.addLiveIn(Reg: VA.getLocReg(), vreg: VReg);
1182 ArgIn = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: RegVT);
1183 CFRegNode.push_back(Elt: ArgIn.getValue(R: ArgIn->getNumValues() - 1));
1184 }
1185 } else {
1186 // Only arguments passed on the stack should make it here.
1187 assert(VA.isMemLoc());
1188 // Load the argument to a virtual register
1189 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1190 if (ObjSize > StackSlotSize) {
1191 errs() << "LowerFormalArguments Unhandled argument type: "
1192 << VA.getLocVT() << "\n";
1193 }
1194 // Create the frame index object for this incoming parameter...
1195 int FI = MFI.CreateFixedObject(Size: ObjSize,
1196 SPOffset: LRSaveSize + VA.getLocMemOffset(),
1197 IsImmutable: true);
1198
1199 // Create the SelectionDAG nodes corresponding to a load
1200 //from this parameter
1201 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
1202 ArgIn = DAG.getLoad(VT: VA.getLocVT(), dl, Chain, Ptr: FIN,
1203 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI));
1204 }
1205 const ArgDataPair ADP = { .SDV: ArgIn, .Flags: Ins[i].Flags };
1206 ArgData.push_back(Elt: ADP);
1207 }
1208
1209 // 1b. CopyFromReg vararg registers.
1210 if (isVarArg) {
1211 // Argument registers
1212 static const MCPhysReg ArgRegs[] = {
1213 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1214 };
1215 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1216 unsigned FirstVAReg = CCInfo.getFirstUnallocated(Regs: ArgRegs);
1217 if (FirstVAReg < std::size(ArgRegs)) {
1218 int offset = 0;
1219 // Save remaining registers, storing higher register numbers at a higher
1220 // address
1221 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1222 // Create a stack slot
1223 int FI = MFI.CreateFixedObject(Size: 4, SPOffset: offset, IsImmutable: true);
1224 if (i == (int)FirstVAReg) {
1225 XFI->setVarArgsFrameIndex(FI);
1226 }
1227 offset -= StackSlotSize;
1228 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
1229 // Move argument from phys reg -> virt reg
1230 Register VReg = RegInfo.createVirtualRegister(RegClass: &XCore::GRRegsRegClass);
1231 RegInfo.addLiveIn(Reg: ArgRegs[i], vreg: VReg);
1232 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: MVT::i32);
1233 CFRegNode.push_back(Elt: Val.getValue(R: Val->getNumValues() - 1));
1234 // Move argument from virt reg -> stack
1235 SDValue Store =
1236 DAG.getStore(Chain: Val.getValue(R: 1), dl, Val, Ptr: FIN, PtrInfo: MachinePointerInfo());
1237 MemOps.push_back(Elt: Store);
1238 }
1239 } else {
1240 // This will point to the next argument passed via stack.
1241 XFI->setVarArgsFrameIndex(
1242 MFI.CreateFixedObject(Size: 4, SPOffset: LRSaveSize + CCInfo.getStackSize(), IsImmutable: true));
1243 }
1244 }
1245
1246 // 2. chain CopyFromReg nodes into a TokenFactor.
1247 if (!CFRegNode.empty())
1248 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: CFRegNode);
1249
1250 // 3. Memcpy 'byVal' args & push final InVals.
1251 // Aggregates passed "byVal" need to be copied by the callee.
1252 // The callee will use a pointer to this copy, rather than the original
1253 // pointer.
1254 for (const ArgDataPair &ArgDI : ArgData) {
1255 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1256 unsigned Size = ArgDI.Flags.getByValSize();
1257 Align Alignment =
1258 std::max(a: Align(StackSlotSize), b: ArgDI.Flags.getNonZeroByValAlign());
1259 // Create a new object on the stack and copy the pointee into it.
1260 int FI = MFI.CreateStackObject(Size, Alignment, isSpillSlot: false);
1261 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
1262 InVals.push_back(Elt: FIN);
1263 MemOps.push_back(Elt: DAG.getMemcpy(
1264 Chain, dl, Dst: FIN, Src: ArgDI.SDV, Size: DAG.getConstant(Val: Size, DL: dl, VT: MVT::i32),
1265 Alignment, isVol: false, AlwaysInline: false, /*CI=*/nullptr, OverrideTailCall: std::nullopt,
1266 DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo()));
1267 } else {
1268 InVals.push_back(Elt: ArgDI.SDV);
1269 }
1270 }
1271
1272 // 4, chain mem ops nodes into a TokenFactor.
1273 if (!MemOps.empty()) {
1274 MemOps.push_back(Elt: Chain);
1275 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOps);
1276 }
1277
1278 return Chain;
1279}
1280
1281//===----------------------------------------------------------------------===//
1282// Return Value Calling Convention Implementation
1283//===----------------------------------------------------------------------===//
1284
1285bool XCoreTargetLowering::
1286CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1287 bool isVarArg,
1288 const SmallVectorImpl<ISD::OutputArg> &Outs,
1289 LLVMContext &Context, const Type *RetTy) const {
1290 SmallVector<CCValAssign, 16> RVLocs;
1291 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1292 if (!CCInfo.CheckReturn(Outs, Fn: RetCC_XCore))
1293 return false;
1294 if (CCInfo.getStackSize() != 0 && isVarArg)
1295 return false;
1296 return true;
1297}
1298
1299SDValue
1300XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1301 bool isVarArg,
1302 const SmallVectorImpl<ISD::OutputArg> &Outs,
1303 const SmallVectorImpl<SDValue> &OutVals,
1304 const SDLoc &dl, SelectionDAG &DAG) const {
1305
1306 XCoreFunctionInfo *XFI =
1307 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>();
1308 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1309
1310 // CCValAssign - represent the assignment of
1311 // the return value to a location
1312 SmallVector<CCValAssign, 16> RVLocs;
1313
1314 // CCState - Info about the registers and stack slot.
1315 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1316 *DAG.getContext());
1317
1318 // Analyze return values.
1319 if (!isVarArg)
1320 CCInfo.AllocateStack(Size: XFI->getReturnStackOffset(), Alignment: Align(4));
1321
1322 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_XCore);
1323
1324 SDValue Glue;
1325 SmallVector<SDValue, 4> RetOps(1, Chain);
1326
1327 // Return on XCore is always a "retsp 0"
1328 RetOps.push_back(Elt: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
1329
1330 SmallVector<SDValue, 4> MemOpChains;
1331 // Handle return values that must be copied to memory.
1332 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1333 CCValAssign &VA = RVLocs[i];
1334 if (VA.isRegLoc())
1335 continue;
1336 assert(VA.isMemLoc());
1337 if (isVarArg) {
1338 report_fatal_error(reason: "Can't return value from vararg function in memory");
1339 }
1340
1341 int Offset = VA.getLocMemOffset();
1342 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1343 // Create the frame index object for the memory location.
1344 int FI = MFI.CreateFixedObject(Size: ObjSize, SPOffset: Offset, IsImmutable: false);
1345
1346 // Create a SelectionDAG node corresponding to a store
1347 // to this memory location.
1348 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
1349 MemOpChains.push_back(Elt: DAG.getStore(
1350 Chain, dl, Val: OutVals[i], Ptr: FIN,
1351 PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)));
1352 }
1353
1354 // Transform all store nodes into one single node because
1355 // all stores are independent of each other.
1356 if (!MemOpChains.empty())
1357 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains);
1358
1359 // Now handle return values copied to registers.
1360 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1361 CCValAssign &VA = RVLocs[i];
1362 if (!VA.isRegLoc())
1363 continue;
1364 // Copy the result values into the output registers.
1365 Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: OutVals[i], Glue);
1366
1367 // guarantee that all emitted copies are
1368 // stuck together, avoiding something bad
1369 Glue = Chain.getValue(R: 1);
1370 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
1371 }
1372
1373 RetOps[0] = Chain; // Update chain.
1374
1375 // Add the glue if we have it.
1376 if (Glue.getNode())
1377 RetOps.push_back(Elt: Glue);
1378
1379 return DAG.getNode(Opcode: XCoreISD::RETSP, DL: dl, VT: MVT::Other, Ops: RetOps);
1380}
1381
1382//===----------------------------------------------------------------------===//
1383// Other Lowering Code
1384//===----------------------------------------------------------------------===//
1385
1386MachineBasicBlock *
1387XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1388 MachineBasicBlock *BB) const {
1389 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1390 DebugLoc dl = MI.getDebugLoc();
1391 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1392 "Unexpected instr type to insert");
1393
1394 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1395 // control-flow pattern. The incoming instruction knows the destination vreg
1396 // to set, the condition code register to branch on, the true/false values to
1397 // select between, and a branch opcode to use.
1398 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1399 MachineFunction::iterator It = ++BB->getIterator();
1400
1401 // thisMBB:
1402 // ...
1403 // TrueVal = ...
1404 // cmpTY ccX, r1, r2
1405 // bCC copy1MBB
1406 // fallthrough --> copy0MBB
1407 MachineBasicBlock *thisMBB = BB;
1408 MachineFunction *F = BB->getParent();
1409 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1410 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1411 F->insert(MBBI: It, MBB: copy0MBB);
1412 F->insert(MBBI: It, MBB: sinkMBB);
1413
1414 // Transfer the remainder of BB and its successor edges to sinkMBB.
1415 sinkMBB->splice(Where: sinkMBB->begin(), Other: BB,
1416 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
1417 sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1418
1419 // Next, add the true and fallthrough blocks as its successors.
1420 BB->addSuccessor(Succ: copy0MBB);
1421 BB->addSuccessor(Succ: sinkMBB);
1422
1423 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: XCore::BRFT_lru6))
1424 .addReg(RegNo: MI.getOperand(i: 1).getReg())
1425 .addMBB(MBB: sinkMBB);
1426
1427 // copy0MBB:
1428 // %FalseValue = ...
1429 // # fallthrough to sinkMBB
1430 BB = copy0MBB;
1431
1432 // Update machine-CFG edges
1433 BB->addSuccessor(Succ: sinkMBB);
1434
1435 // sinkMBB:
1436 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1437 // ...
1438 BB = sinkMBB;
1439 BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII.get(Opcode: XCore::PHI), DestReg: MI.getOperand(i: 0).getReg())
1440 .addReg(RegNo: MI.getOperand(i: 3).getReg())
1441 .addMBB(MBB: copy0MBB)
1442 .addReg(RegNo: MI.getOperand(i: 2).getReg())
1443 .addMBB(MBB: thisMBB);
1444
1445 MI.eraseFromParent(); // The pseudo instruction is gone now.
1446 return BB;
1447}
1448
1449//===----------------------------------------------------------------------===//
1450// Target Optimization Hooks
1451//===----------------------------------------------------------------------===//
1452
1453SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1454 DAGCombinerInfo &DCI) const {
1455 SelectionDAG &DAG = DCI.DAG;
1456 SDLoc dl(N);
1457 switch (N->getOpcode()) {
1458 default: break;
1459 case ISD::INTRINSIC_VOID:
1460 switch (N->getConstantOperandVal(Num: 1)) {
1461 case Intrinsic::xcore_outt:
1462 case Intrinsic::xcore_outct:
1463 case Intrinsic::xcore_chkct: {
1464 SDValue OutVal = N->getOperand(Num: 3);
1465 // These instructions ignore the high bits.
1466 if (OutVal.hasOneUse()) {
1467 unsigned BitWidth = OutVal.getValueSizeInBits();
1468 APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 8);
1469 KnownBits Known;
1470 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1471 !DCI.isBeforeLegalizeOps());
1472 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1473 if (TLI.ShrinkDemandedConstant(Op: OutVal, DemandedBits: DemandedMask, TLO) ||
1474 TLI.SimplifyDemandedBits(Op: OutVal, DemandedBits: DemandedMask, Known, TLO))
1475 DCI.CommitTargetLoweringOpt(TLO);
1476 }
1477 break;
1478 }
1479 case Intrinsic::xcore_setpt: {
1480 SDValue Time = N->getOperand(Num: 3);
1481 // This instruction ignores the high bits.
1482 if (Time.hasOneUse()) {
1483 unsigned BitWidth = Time.getValueSizeInBits();
1484 APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16);
1485 KnownBits Known;
1486 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1487 !DCI.isBeforeLegalizeOps());
1488 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1489 if (TLI.ShrinkDemandedConstant(Op: Time, DemandedBits: DemandedMask, TLO) ||
1490 TLI.SimplifyDemandedBits(Op: Time, DemandedBits: DemandedMask, Known, TLO))
1491 DCI.CommitTargetLoweringOpt(TLO);
1492 }
1493 break;
1494 }
1495 }
1496 break;
1497 case XCoreISD::LADD: {
1498 SDValue N0 = N->getOperand(Num: 0);
1499 SDValue N1 = N->getOperand(Num: 1);
1500 SDValue N2 = N->getOperand(Num: 2);
1501 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(Val&: N0);
1502 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1);
1503 EVT VT = N0.getValueType();
1504
1505 // canonicalize constant to RHS
1506 if (N0C && !N1C)
1507 return DAG.getNode(Opcode: XCoreISD::LADD, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1, N2: N0, N3: N2);
1508
1509 // fold (ladd 0, 0, x) -> 0, x & 1
1510 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1511 SDValue Carry = DAG.getConstant(Val: 0, DL: dl, VT);
1512 SDValue Result = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: N2,
1513 N2: DAG.getConstant(Val: 1, DL: dl, VT));
1514 SDValue Ops[] = { Result, Carry };
1515 return DAG.getMergeValues(Ops, dl);
1516 }
1517
1518 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1519 // low bit set
1520 if (N1C && N1C->isZero() && N->hasNUsesOfValue(NUses: 0, Value: 1)) {
1521 APInt Mask = APInt::getHighBitsSet(numBits: VT.getSizeInBits(),
1522 hiBitsSet: VT.getSizeInBits() - 1);
1523 KnownBits Known = DAG.computeKnownBits(Op: N2);
1524 if ((Known.Zero & Mask) == Mask) {
1525 SDValue Carry = DAG.getConstant(Val: 0, DL: dl, VT);
1526 SDValue Result = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0, N2);
1527 SDValue Ops[] = { Result, Carry };
1528 return DAG.getMergeValues(Ops, dl);
1529 }
1530 }
1531 }
1532 break;
1533 case XCoreISD::LSUB: {
1534 SDValue N0 = N->getOperand(Num: 0);
1535 SDValue N1 = N->getOperand(Num: 1);
1536 SDValue N2 = N->getOperand(Num: 2);
1537 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(Val&: N0);
1538 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1);
1539 EVT VT = N0.getValueType();
1540
1541 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1542 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1543 APInt Mask = APInt::getHighBitsSet(numBits: VT.getSizeInBits(),
1544 hiBitsSet: VT.getSizeInBits() - 1);
1545 KnownBits Known = DAG.computeKnownBits(Op: N2);
1546 if ((Known.Zero & Mask) == Mask) {
1547 SDValue Borrow = N2;
1548 SDValue Result = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT,
1549 N1: DAG.getConstant(Val: 0, DL: dl, VT), N2);
1550 SDValue Ops[] = { Result, Borrow };
1551 return DAG.getMergeValues(Ops, dl);
1552 }
1553 }
1554
1555 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1556 // low bit set
1557 if (N1C && N1C->isZero() && N->hasNUsesOfValue(NUses: 0, Value: 1)) {
1558 APInt Mask = APInt::getHighBitsSet(numBits: VT.getSizeInBits(),
1559 hiBitsSet: VT.getSizeInBits() - 1);
1560 KnownBits Known = DAG.computeKnownBits(Op: N2);
1561 if ((Known.Zero & Mask) == Mask) {
1562 SDValue Borrow = DAG.getConstant(Val: 0, DL: dl, VT);
1563 SDValue Result = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: N0, N2);
1564 SDValue Ops[] = { Result, Borrow };
1565 return DAG.getMergeValues(Ops, dl);
1566 }
1567 }
1568 }
1569 break;
1570 case XCoreISD::LMUL: {
1571 SDValue N0 = N->getOperand(Num: 0);
1572 SDValue N1 = N->getOperand(Num: 1);
1573 SDValue N2 = N->getOperand(Num: 2);
1574 SDValue N3 = N->getOperand(Num: 3);
1575 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(Val&: N0);
1576 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1);
1577 EVT VT = N0.getValueType();
1578 // Canonicalize multiplicative constant to RHS. If both multiplicative
1579 // operands are constant canonicalize smallest to RHS.
1580 if ((N0C && !N1C) ||
1581 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1582 return DAG.getNode(Opcode: XCoreISD::LMUL, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT),
1583 N1, N2: N0, N3: N2, N4: N3);
1584
1585 // lmul(x, 0, a, b)
1586 if (N1C && N1C->isZero()) {
1587 // If the high result is unused fold to add(a, b)
1588 if (N->hasNUsesOfValue(NUses: 0, Value: 0)) {
1589 SDValue Lo = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N2, N2: N3);
1590 SDValue Ops[] = { Lo, Lo };
1591 return DAG.getMergeValues(Ops, dl);
1592 }
1593 // Otherwise fold to ladd(a, b, 0)
1594 SDValue Result =
1595 DAG.getNode(Opcode: XCoreISD::LADD, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1: N2, N2: N3, N3: N1);
1596 SDValue Carry(Result.getNode(), 1);
1597 SDValue Ops[] = { Carry, Result };
1598 return DAG.getMergeValues(Ops, dl);
1599 }
1600 }
1601 break;
1602 case ISD::ADD: {
1603 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1604 // lmul(x, y, a, b). The high result of lmul will be ignored.
1605 // This is only profitable if the intermediate results are unused
1606 // elsewhere.
1607 SDValue Mul0, Mul1, Addend0, Addend1;
1608 if (N->getValueType(ResNo: 0) == MVT::i32 &&
1609 isADDADDMUL(Op: SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, requireIntermediatesHaveOneUse: true)) {
1610 SDValue Ignored = DAG.getNode(Opcode: XCoreISD::LMUL, DL: dl,
1611 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Mul0,
1612 N2: Mul1, N3: Addend0, N4: Addend1);
1613 SDValue Result(Ignored.getNode(), 1);
1614 return Result;
1615 }
1616 APInt HighMask = APInt::getHighBitsSet(numBits: 64, hiBitsSet: 32);
1617 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1618 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1619 // before type legalization as it is messy to match the operands after
1620 // that.
1621 if (N->getValueType(ResNo: 0) == MVT::i64 &&
1622 isADDADDMUL(Op: SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, requireIntermediatesHaveOneUse: false) &&
1623 DAG.MaskedValueIsZero(Op: Mul0, Mask: HighMask) &&
1624 DAG.MaskedValueIsZero(Op: Mul1, Mask: HighMask) &&
1625 DAG.MaskedValueIsZero(Op: Addend0, Mask: HighMask) &&
1626 DAG.MaskedValueIsZero(Op: Addend1, Mask: HighMask)) {
1627 SDValue Mul0L = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
1628 N1: Mul0, N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
1629 SDValue Mul1L = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
1630 N1: Mul1, N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
1631 SDValue Addend0L = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
1632 N1: Addend0, N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
1633 SDValue Addend1L = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32,
1634 N1: Addend1, N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32));
1635 SDValue Hi = DAG.getNode(Opcode: XCoreISD::LMUL, DL: dl,
1636 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Mul0L, N2: Mul1L,
1637 N3: Addend0L, N4: Addend1L);
1638 SDValue Lo(Hi.getNode(), 1);
1639 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi);
1640 }
1641 }
1642 break;
1643 case ISD::STORE: {
1644 // Replace unaligned store of unaligned load with memmove.
1645 StoreSDNode *ST = cast<StoreSDNode>(Val: N);
1646 if (!DCI.isBeforeLegalize() ||
1647 allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
1648 VT: ST->getMemoryVT(),
1649 MMO: *ST->getMemOperand()) ||
1650 ST->isVolatile() || ST->isIndexed()) {
1651 break;
1652 }
1653 SDValue Chain = ST->getChain();
1654
1655 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1656 assert((StoreBits % 8) == 0 &&
1657 "Store size in bits must be a multiple of 8");
1658 Align Alignment = ST->getAlign();
1659
1660 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: ST->getValue())) {
1661 if (LD->hasNUsesOfValue(NUses: 1, Value: 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1662 LD->getAlign() == Alignment &&
1663 !LD->isVolatile() && !LD->isIndexed() &&
1664 Chain.reachesChainWithoutSideEffects(Dest: SDValue(LD, 1))) {
1665 bool isTail = isInTailCallPosition(DAG, Node: ST, Chain);
1666 return DAG.getMemmove(Chain, dl, Dst: ST->getBasePtr(), Src: LD->getBasePtr(),
1667 Size: DAG.getConstant(Val: StoreBits / 8, DL: dl, VT: MVT::i32),
1668 Alignment, isVol: false, CI: nullptr, OverrideTailCall: isTail,
1669 DstPtrInfo: ST->getPointerInfo(), SrcPtrInfo: LD->getPointerInfo());
1670 }
1671 }
1672 break;
1673 }
1674 }
1675 return SDValue();
1676}
1677
1678void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1679 KnownBits &Known,
1680 const APInt &DemandedElts,
1681 const SelectionDAG &DAG,
1682 unsigned Depth) const {
1683 Known.resetAll();
1684 switch (Op.getOpcode()) {
1685 default: break;
1686 case XCoreISD::LADD:
1687 case XCoreISD::LSUB:
1688 if (Op.getResNo() == 1) {
1689 // Top bits of carry / borrow are clear.
1690 Known.Zero = APInt::getHighBitsSet(numBits: Known.getBitWidth(),
1691 hiBitsSet: Known.getBitWidth() - 1);
1692 }
1693 break;
1694 case ISD::INTRINSIC_W_CHAIN:
1695 {
1696 unsigned IntNo = Op.getConstantOperandVal(i: 1);
1697 switch (IntNo) {
1698 case Intrinsic::xcore_getts:
1699 // High bits are known to be zero.
1700 Known.Zero =
1701 APInt::getHighBitsSet(numBits: Known.getBitWidth(), hiBitsSet: Known.getBitWidth() - 16);
1702 break;
1703 case Intrinsic::xcore_int:
1704 case Intrinsic::xcore_inct:
1705 // High bits are known to be zero.
1706 Known.Zero =
1707 APInt::getHighBitsSet(numBits: Known.getBitWidth(), hiBitsSet: Known.getBitWidth() - 8);
1708 break;
1709 case Intrinsic::xcore_testct:
1710 // Result is either 0 or 1.
1711 Known.Zero =
1712 APInt::getHighBitsSet(numBits: Known.getBitWidth(), hiBitsSet: Known.getBitWidth() - 1);
1713 break;
1714 case Intrinsic::xcore_testwct:
1715 // Result is in the range 0 - 4.
1716 Known.Zero =
1717 APInt::getHighBitsSet(numBits: Known.getBitWidth(), hiBitsSet: Known.getBitWidth() - 3);
1718 break;
1719 }
1720 }
1721 break;
1722 }
1723}
1724
1725//===----------------------------------------------------------------------===//
1726// Addressing mode description hooks
1727//===----------------------------------------------------------------------===//
1728
1729static inline bool isImmUs(int64_t val)
1730{
1731 return (val >= 0 && val <= 11);
1732}
1733
1734static inline bool isImmUs2(int64_t val)
1735{
1736 return (val%2 == 0 && isImmUs(val: val/2));
1737}
1738
1739static inline bool isImmUs4(int64_t val)
1740{
1741 return (val%4 == 0 && isImmUs(val: val/4));
1742}
1743
1744/// isLegalAddressingMode - Return true if the addressing mode represented
1745/// by AM is legal for this target, for a load/store of the specified type.
1746bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1747 const AddrMode &AM, Type *Ty,
1748 unsigned AS,
1749 Instruction *I) const {
1750 if (Ty->getTypeID() == Type::VoidTyID)
1751 return AM.Scale == 0 && isImmUs(val: AM.BaseOffs) && isImmUs4(val: AM.BaseOffs);
1752
1753 unsigned Size = DL.getTypeAllocSize(Ty);
1754 if (AM.BaseGV) {
1755 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1756 AM.BaseOffs%4 == 0;
1757 }
1758
1759 switch (Size) {
1760 case 1:
1761 // reg + imm
1762 if (AM.Scale == 0) {
1763 return isImmUs(val: AM.BaseOffs);
1764 }
1765 // reg + reg
1766 return AM.Scale == 1 && AM.BaseOffs == 0;
1767 case 2:
1768 case 3:
1769 // reg + imm
1770 if (AM.Scale == 0) {
1771 return isImmUs2(val: AM.BaseOffs);
1772 }
1773 // reg + reg<<1
1774 return AM.Scale == 2 && AM.BaseOffs == 0;
1775 default:
1776 // reg + imm
1777 if (AM.Scale == 0) {
1778 return isImmUs4(val: AM.BaseOffs);
1779 }
1780 // reg + reg<<2
1781 return AM.Scale == 4 && AM.BaseOffs == 0;
1782 }
1783}
1784
1785//===----------------------------------------------------------------------===//
1786// XCore Inline Assembly Support
1787//===----------------------------------------------------------------------===//
1788
1789std::pair<unsigned, const TargetRegisterClass *>
1790XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1791 StringRef Constraint,
1792 MVT VT) const {
1793 if (Constraint.size() == 1) {
1794 switch (Constraint[0]) {
1795 default : break;
1796 case 'r':
1797 return std::make_pair(x: 0U, y: &XCore::GRRegsRegClass);
1798 }
1799 }
1800 // Use the default implementation in TargetLowering to convert the register
1801 // constraint into a member of a register class.
1802 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1803}
1804