1//===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AVR uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AVRISelLowering.h"
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/StringSwitch.h"
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/SelectionDAG.h"
23#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24#include "llvm/IR/Function.h"
25#include "llvm/Support/ErrorHandling.h"
26
27#include "AVR.h"
28#include "AVRMachineFunctionInfo.h"
29#include "AVRSubtarget.h"
30#include "AVRTargetMachine.h"
31#include "MCTargetDesc/AVRMCTargetDesc.h"
32
33namespace llvm {
34
35AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
36 const AVRSubtarget &STI)
37 : TargetLowering(TM, STI), Subtarget(STI) {
38 // Set up the register classes.
39 addRegisterClass(VT: MVT::i8, RC: &AVR::GPR8RegClass);
40 addRegisterClass(VT: MVT::i16, RC: &AVR::DREGSRegClass);
41
42 // Compute derived properties from the register classes.
43 computeRegisterProperties(TRI: Subtarget.getRegisterInfo());
44
45 setBooleanContents(ZeroOrOneBooleanContent);
46 setBooleanVectorContents(ZeroOrOneBooleanContent);
47 setSchedulingPreference(Sched::RegPressure);
48 setStackPointerRegisterToSaveRestore(AVR::SP);
49 setSupportsUnalignedAtomics(true);
50
51 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i16, Action: Custom);
52 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i16, Action: Custom);
53
54 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
55 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
56 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i8, Action: Expand);
57 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i16, Action: Expand);
58
59 setOperationAction(Op: ISD::INLINEASM, VT: MVT::Other, Action: Custom);
60
61 for (MVT VT : MVT::integer_valuetypes()) {
62 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
63 setLoadExtAction(ExtType: N, ValVT: VT, MemVT: MVT::i1, Action: Promote);
64 setLoadExtAction(ExtType: N, ValVT: VT, MemVT: MVT::i8, Action: Expand);
65 }
66 }
67
68 setTruncStoreAction(ValVT: MVT::i16, MemVT: MVT::i8, Action: Expand);
69
70 for (MVT VT : MVT::integer_valuetypes()) {
71 setOperationAction(Op: ISD::ADDC, VT, Action: Legal);
72 setOperationAction(Op: ISD::SUBC, VT, Action: Legal);
73 setOperationAction(Op: ISD::ADDE, VT, Action: Legal);
74 setOperationAction(Op: ISD::SUBE, VT, Action: Legal);
75 }
76
77 // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
78 // revert into a sub since we don't have an add with immediate instruction.
79 setOperationAction(Op: ISD::ADD, VT: MVT::i32, Action: Custom);
80 setOperationAction(Op: ISD::ADD, VT: MVT::i64, Action: Custom);
81
82 // our shift instructions are only able to shift 1 bit at a time, so handle
83 // this in a custom way.
84 setOperationAction(Op: ISD::SRA, VT: MVT::i8, Action: Custom);
85 setOperationAction(Op: ISD::SHL, VT: MVT::i8, Action: Custom);
86 setOperationAction(Op: ISD::SRL, VT: MVT::i8, Action: Custom);
87 setOperationAction(Op: ISD::SRA, VT: MVT::i16, Action: Custom);
88 setOperationAction(Op: ISD::SHL, VT: MVT::i16, Action: Custom);
89 setOperationAction(Op: ISD::SRL, VT: MVT::i16, Action: Custom);
90 setOperationAction(Op: ISD::SRA, VT: MVT::i32, Action: Custom);
91 setOperationAction(Op: ISD::SHL, VT: MVT::i32, Action: Custom);
92 setOperationAction(Op: ISD::SRL, VT: MVT::i32, Action: Custom);
93 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i16, Action: Expand);
94 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i16, Action: Expand);
95 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i16, Action: Expand);
96
97 setOperationAction(Op: ISD::ROTL, VT: MVT::i8, Action: Custom);
98 setOperationAction(Op: ISD::ROTL, VT: MVT::i16, Action: Expand);
99 setOperationAction(Op: ISD::ROTR, VT: MVT::i8, Action: Custom);
100 setOperationAction(Op: ISD::ROTR, VT: MVT::i16, Action: Expand);
101
102 setOperationAction(Op: ISD::BR_CC, VT: MVT::i8, Action: Custom);
103 setOperationAction(Op: ISD::BR_CC, VT: MVT::i16, Action: Custom);
104 setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Custom);
105 setOperationAction(Op: ISD::BR_CC, VT: MVT::i64, Action: Custom);
106 setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Expand);
107
108 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i8, Action: Custom);
109 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i16, Action: Custom);
110 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Expand);
111 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i64, Action: Expand);
112 setOperationAction(Op: ISD::SETCC, VT: MVT::i8, Action: Custom);
113 setOperationAction(Op: ISD::SETCC, VT: MVT::i16, Action: Custom);
114 setOperationAction(Op: ISD::SETCC, VT: MVT::i32, Action: Custom);
115 setOperationAction(Op: ISD::SETCC, VT: MVT::i64, Action: Custom);
116 setOperationAction(Op: ISD::SELECT, VT: MVT::i8, Action: Expand);
117 setOperationAction(Op: ISD::SELECT, VT: MVT::i16, Action: Expand);
118
119 setOperationAction(Op: ISD::BSWAP, VT: MVT::i16, Action: Expand);
120
121 // Add support for postincrement and predecrement load/stores.
122 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i8, Action: Legal);
123 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i16, Action: Legal);
124 setIndexedLoadAction(IdxModes: ISD::PRE_DEC, VT: MVT::i8, Action: Legal);
125 setIndexedLoadAction(IdxModes: ISD::PRE_DEC, VT: MVT::i16, Action: Legal);
126 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i8, Action: Legal);
127 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i16, Action: Legal);
128 setIndexedStoreAction(IdxModes: ISD::PRE_DEC, VT: MVT::i8, Action: Legal);
129 setIndexedStoreAction(IdxModes: ISD::PRE_DEC, VT: MVT::i16, Action: Legal);
130
131 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand);
132
133 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
134 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
135 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand);
136 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
137
138 // Atomic operations which must be lowered to rtlib calls
139 for (MVT VT : MVT::integer_valuetypes()) {
140 setOperationAction(Op: ISD::ATOMIC_SWAP, VT, Action: Expand);
141 setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT, Action: Expand);
142 setOperationAction(Op: ISD::ATOMIC_LOAD_NAND, VT, Action: Expand);
143 setOperationAction(Op: ISD::ATOMIC_LOAD_MAX, VT, Action: Expand);
144 setOperationAction(Op: ISD::ATOMIC_LOAD_MIN, VT, Action: Expand);
145 setOperationAction(Op: ISD::ATOMIC_LOAD_UMAX, VT, Action: Expand);
146 setOperationAction(Op: ISD::ATOMIC_LOAD_UMIN, VT, Action: Expand);
147 }
148
149 // Division/remainder
150 setOperationAction(Op: ISD::UDIV, VT: MVT::i8, Action: Expand);
151 setOperationAction(Op: ISD::UDIV, VT: MVT::i16, Action: Expand);
152 setOperationAction(Op: ISD::UREM, VT: MVT::i8, Action: Expand);
153 setOperationAction(Op: ISD::UREM, VT: MVT::i16, Action: Expand);
154 setOperationAction(Op: ISD::SDIV, VT: MVT::i8, Action: Expand);
155 setOperationAction(Op: ISD::SDIV, VT: MVT::i16, Action: Expand);
156 setOperationAction(Op: ISD::SREM, VT: MVT::i8, Action: Expand);
157 setOperationAction(Op: ISD::SREM, VT: MVT::i16, Action: Expand);
158
159 // Make division and modulus custom
160 setOperationAction(Op: ISD::UDIVREM, VT: MVT::i8, Action: Custom);
161 setOperationAction(Op: ISD::UDIVREM, VT: MVT::i16, Action: Custom);
162 setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Custom);
163 setOperationAction(Op: ISD::SDIVREM, VT: MVT::i8, Action: Custom);
164 setOperationAction(Op: ISD::SDIVREM, VT: MVT::i16, Action: Custom);
165 setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Custom);
166
167 // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
168 setOperationAction(Op: ISD::MUL, VT: MVT::i8, Action: Expand);
169 setOperationAction(Op: ISD::MUL, VT: MVT::i16, Action: Expand);
170
171 // Expand 16 bit multiplications.
172 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i16, Action: Expand);
173 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i16, Action: Expand);
174
175 // Expand multiplications to libcalls when there is
176 // no hardware MUL.
177 if (!Subtarget.supportsMultiplication()) {
178 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i8, Action: Expand);
179 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i8, Action: Expand);
180 }
181
182 for (MVT VT : MVT::integer_valuetypes()) {
183 setOperationAction(Op: ISD::MULHS, VT, Action: Expand);
184 setOperationAction(Op: ISD::MULHU, VT, Action: Expand);
185 }
186
187 for (MVT VT : MVT::integer_valuetypes()) {
188 setOperationAction(Op: ISD::CTPOP, VT, Action: Expand);
189 setOperationAction(Op: ISD::CTLZ, VT, Action: Expand);
190 setOperationAction(Op: ISD::CTTZ, VT, Action: Expand);
191 }
192
193 for (MVT VT : MVT::integer_valuetypes()) {
194 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT, Action: Expand);
195 // TODO: The generated code is pretty poor. Investigate using the
196 // same "shift and subtract with carry" trick that we do for
197 // extending 8-bit to 16-bit. This may require infrastructure
198 // improvements in how we treat 16-bit "registers" to be feasible.
199 }
200
201 setMinFunctionAlignment(Align(2));
202 setMinimumJumpTableEntries(UINT_MAX);
203}
204
205EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
206 EVT VT) const {
207 assert(!VT.isVector() && "No AVR SetCC type for vectors!");
208 return MVT::i8;
209}
210
211SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
212 unsigned Opc8;
213 const SDNode *N = Op.getNode();
214 EVT VT = Op.getValueType();
215 SDLoc dl(N);
216 assert(llvm::has_single_bit<uint32_t>(VT.getSizeInBits()) &&
217 "Expected power-of-2 shift amount");
218
219 if (VT.getSizeInBits() == 32) {
220 if (!isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) {
221 // 32-bit shifts are converted to a loop in IR.
222 // This should be unreachable.
223 report_fatal_error(reason: "Expected a constant shift amount!");
224 }
225 SDVTList ResTys = DAG.getVTList(VT1: MVT::i16, VT2: MVT::i16);
226 SDValue SrcLo =
227 DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i16, N1: Op.getOperand(i: 0),
228 N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i16));
229 SDValue SrcHi =
230 DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i16, N1: Op.getOperand(i: 0),
231 N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i16));
232 uint64_t ShiftAmount = N->getConstantOperandVal(Num: 1);
233 if (ShiftAmount == 16) {
234 // Special case these two operations because they appear to be used by the
235 // generic codegen parts to lower 32-bit numbers.
236 // TODO: perhaps we can lower shift amounts bigger than 16 to a 16-bit
237 // shift of a part of the 32-bit value?
238 switch (Op.getOpcode()) {
239 case ISD::SHL: {
240 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i16);
241 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i32, N1: Zero, N2: SrcLo);
242 }
243 case ISD::SRL: {
244 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i16);
245 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i32, N1: SrcHi, N2: Zero);
246 }
247 }
248 }
249 SDValue Cnt = DAG.getTargetConstant(Val: ShiftAmount, DL: dl, VT: MVT::i8);
250 unsigned Opc;
251 switch (Op.getOpcode()) {
252 default:
253 llvm_unreachable("Invalid 32-bit shift opcode!");
254 case ISD::SHL:
255 Opc = AVRISD::LSLW;
256 break;
257 case ISD::SRL:
258 Opc = AVRISD::LSRW;
259 break;
260 case ISD::SRA:
261 Opc = AVRISD::ASRW;
262 break;
263 }
264 SDValue Result = DAG.getNode(Opcode: Opc, DL: dl, VTList: ResTys, N1: SrcLo, N2: SrcHi, N3: Cnt);
265 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i32, N1: Result.getValue(R: 0),
266 N2: Result.getValue(R: 1));
267 }
268
269 // Expand non-constant shifts to loops.
270 if (!isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) {
271 switch (Op.getOpcode()) {
272 default:
273 llvm_unreachable("Invalid shift opcode!");
274 case ISD::SHL:
275 return DAG.getNode(Opcode: AVRISD::LSLLOOP, DL: dl, VT, N1: N->getOperand(Num: 0),
276 N2: N->getOperand(Num: 1));
277 case ISD::SRL:
278 return DAG.getNode(Opcode: AVRISD::LSRLOOP, DL: dl, VT, N1: N->getOperand(Num: 0),
279 N2: N->getOperand(Num: 1));
280 case ISD::ROTL: {
281 SDValue Amt = N->getOperand(Num: 1);
282 EVT AmtVT = Amt.getValueType();
283 Amt = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: AmtVT, N1: Amt,
284 N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL: dl, VT: AmtVT));
285 return DAG.getNode(Opcode: AVRISD::ROLLOOP, DL: dl, VT, N1: N->getOperand(Num: 0), N2: Amt);
286 }
287 case ISD::ROTR: {
288 SDValue Amt = N->getOperand(Num: 1);
289 EVT AmtVT = Amt.getValueType();
290 Amt = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: AmtVT, N1: Amt,
291 N2: DAG.getConstant(Val: VT.getSizeInBits() - 1, DL: dl, VT: AmtVT));
292 return DAG.getNode(Opcode: AVRISD::RORLOOP, DL: dl, VT, N1: N->getOperand(Num: 0), N2: Amt);
293 }
294 case ISD::SRA:
295 return DAG.getNode(Opcode: AVRISD::ASRLOOP, DL: dl, VT, N1: N->getOperand(Num: 0),
296 N2: N->getOperand(Num: 1));
297 }
298 }
299
300 uint64_t ShiftAmount = N->getConstantOperandVal(Num: 1);
301 SDValue Victim = N->getOperand(Num: 0);
302
303 switch (Op.getOpcode()) {
304 case ISD::SRA:
305 Opc8 = AVRISD::ASR;
306 break;
307 case ISD::ROTL:
308 Opc8 = AVRISD::ROL;
309 ShiftAmount = ShiftAmount % VT.getSizeInBits();
310 break;
311 case ISD::ROTR:
312 Opc8 = AVRISD::ROR;
313 ShiftAmount = ShiftAmount % VT.getSizeInBits();
314 break;
315 case ISD::SRL:
316 Opc8 = AVRISD::LSR;
317 break;
318 case ISD::SHL:
319 Opc8 = AVRISD::LSL;
320 break;
321 default:
322 llvm_unreachable("Invalid shift opcode");
323 }
324
325 // Optimize int8/int16 shifts.
326 if (VT.getSizeInBits() == 8) {
327 if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
328 // Optimize LSL when 4 <= ShiftAmount <= 6.
329 Victim = DAG.getNode(Opcode: AVRISD::SWAP, DL: dl, VT, Operand: Victim);
330 Victim =
331 DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: Victim, N2: DAG.getConstant(Val: 0xf0, DL: dl, VT));
332 ShiftAmount -= 4;
333 } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&
334 ShiftAmount < 7) {
335 // Optimize LSR when 4 <= ShiftAmount <= 6.
336 Victim = DAG.getNode(Opcode: AVRISD::SWAP, DL: dl, VT, Operand: Victim);
337 Victim =
338 DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: Victim, N2: DAG.getConstant(Val: 0x0f, DL: dl, VT));
339 ShiftAmount -= 4;
340 } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {
341 // Optimize LSL when ShiftAmount == 7.
342 Victim = DAG.getNode(Opcode: AVRISD::LSLBN, DL: dl, VT, N1: Victim,
343 N2: DAG.getConstant(Val: 7, DL: dl, VT));
344 ShiftAmount = 0;
345 } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {
346 // Optimize LSR when ShiftAmount == 7.
347 Victim = DAG.getNode(Opcode: AVRISD::LSRBN, DL: dl, VT, N1: Victim,
348 N2: DAG.getConstant(Val: 7, DL: dl, VT));
349 ShiftAmount = 0;
350 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {
351 // Optimize ASR when ShiftAmount == 6.
352 Victim = DAG.getNode(Opcode: AVRISD::ASRBN, DL: dl, VT, N1: Victim,
353 N2: DAG.getConstant(Val: 6, DL: dl, VT));
354 ShiftAmount = 0;
355 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
356 // Optimize ASR when ShiftAmount == 7.
357 Victim = DAG.getNode(Opcode: AVRISD::ASRBN, DL: dl, VT, N1: Victim,
358 N2: DAG.getConstant(Val: 7, DL: dl, VT));
359 ShiftAmount = 0;
360 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 3) {
361 // Optimize left rotation 3 bits to swap then right rotation 1 bit.
362 Victim = DAG.getNode(Opcode: AVRISD::SWAP, DL: dl, VT, Operand: Victim);
363 Victim = DAG.getNode(Opcode: AVRISD::ROR, DL: dl, VT, Operand: Victim);
364 ShiftAmount = 0;
365 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 3) {
366 // Optimize right rotation 3 bits to swap then left rotation 1 bit.
367 Victim = DAG.getNode(Opcode: AVRISD::SWAP, DL: dl, VT, Operand: Victim);
368 Victim = DAG.getNode(Opcode: AVRISD::ROL, DL: dl, VT, Operand: Victim);
369 ShiftAmount = 0;
370 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 7) {
371 // Optimize left rotation 7 bits to right rotation 1 bit.
372 Victim = DAG.getNode(Opcode: AVRISD::ROR, DL: dl, VT, Operand: Victim);
373 ShiftAmount = 0;
374 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 7) {
375 // Optimize right rotation 7 bits to left rotation 1 bit.
376 Victim = DAG.getNode(Opcode: AVRISD::ROL, DL: dl, VT, Operand: Victim);
377 ShiftAmount = 0;
378 } else if ((Op.getOpcode() == ISD::ROTR || Op.getOpcode() == ISD::ROTL) &&
379 ShiftAmount >= 4) {
380 // Optimize left/right rotation with the SWAP instruction.
381 Victim = DAG.getNode(Opcode: AVRISD::SWAP, DL: dl, VT, Operand: Victim);
382 ShiftAmount -= 4;
383 }
384 } else if (VT.getSizeInBits() == 16) {
385 if (Op.getOpcode() == ISD::SRA)
386 // Special optimization for int16 arithmetic right shift.
387 switch (ShiftAmount) {
388 case 15:
389 Victim = DAG.getNode(Opcode: AVRISD::ASRWN, DL: dl, VT, N1: Victim,
390 N2: DAG.getConstant(Val: 15, DL: dl, VT));
391 ShiftAmount = 0;
392 break;
393 case 14:
394 Victim = DAG.getNode(Opcode: AVRISD::ASRWN, DL: dl, VT, N1: Victim,
395 N2: DAG.getConstant(Val: 14, DL: dl, VT));
396 ShiftAmount = 0;
397 break;
398 case 7:
399 Victim = DAG.getNode(Opcode: AVRISD::ASRWN, DL: dl, VT, N1: Victim,
400 N2: DAG.getConstant(Val: 7, DL: dl, VT));
401 ShiftAmount = 0;
402 break;
403 default:
404 break;
405 }
406 if (4 <= ShiftAmount && ShiftAmount < 8)
407 switch (Op.getOpcode()) {
408 case ISD::SHL:
409 Victim = DAG.getNode(Opcode: AVRISD::LSLWN, DL: dl, VT, N1: Victim,
410 N2: DAG.getConstant(Val: 4, DL: dl, VT));
411 ShiftAmount -= 4;
412 break;
413 case ISD::SRL:
414 Victim = DAG.getNode(Opcode: AVRISD::LSRWN, DL: dl, VT, N1: Victim,
415 N2: DAG.getConstant(Val: 4, DL: dl, VT));
416 ShiftAmount -= 4;
417 break;
418 default:
419 break;
420 }
421 else if (8 <= ShiftAmount && ShiftAmount < 12)
422 switch (Op.getOpcode()) {
423 case ISD::SHL:
424 Victim = DAG.getNode(Opcode: AVRISD::LSLWN, DL: dl, VT, N1: Victim,
425 N2: DAG.getConstant(Val: 8, DL: dl, VT));
426 ShiftAmount -= 8;
427 // Only operate on the higher byte for remaining shift bits.
428 Opc8 = AVRISD::LSLHI;
429 break;
430 case ISD::SRL:
431 Victim = DAG.getNode(Opcode: AVRISD::LSRWN, DL: dl, VT, N1: Victim,
432 N2: DAG.getConstant(Val: 8, DL: dl, VT));
433 ShiftAmount -= 8;
434 // Only operate on the lower byte for remaining shift bits.
435 Opc8 = AVRISD::LSRLO;
436 break;
437 case ISD::SRA:
438 Victim = DAG.getNode(Opcode: AVRISD::ASRWN, DL: dl, VT, N1: Victim,
439 N2: DAG.getConstant(Val: 8, DL: dl, VT));
440 ShiftAmount -= 8;
441 // Only operate on the lower byte for remaining shift bits.
442 Opc8 = AVRISD::ASRLO;
443 break;
444 default:
445 break;
446 }
447 else if (12 <= ShiftAmount)
448 switch (Op.getOpcode()) {
449 case ISD::SHL:
450 Victim = DAG.getNode(Opcode: AVRISD::LSLWN, DL: dl, VT, N1: Victim,
451 N2: DAG.getConstant(Val: 12, DL: dl, VT));
452 ShiftAmount -= 12;
453 // Only operate on the higher byte for remaining shift bits.
454 Opc8 = AVRISD::LSLHI;
455 break;
456 case ISD::SRL:
457 Victim = DAG.getNode(Opcode: AVRISD::LSRWN, DL: dl, VT, N1: Victim,
458 N2: DAG.getConstant(Val: 12, DL: dl, VT));
459 ShiftAmount -= 12;
460 // Only operate on the lower byte for remaining shift bits.
461 Opc8 = AVRISD::LSRLO;
462 break;
463 case ISD::SRA:
464 Victim = DAG.getNode(Opcode: AVRISD::ASRWN, DL: dl, VT, N1: Victim,
465 N2: DAG.getConstant(Val: 8, DL: dl, VT));
466 ShiftAmount -= 8;
467 // Only operate on the lower byte for remaining shift bits.
468 Opc8 = AVRISD::ASRLO;
469 break;
470 default:
471 break;
472 }
473 }
474
475 while (ShiftAmount--) {
476 Victim = DAG.getNode(Opcode: Opc8, DL: dl, VT, Operand: Victim);
477 }
478
479 return Victim;
480}
481
482SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
483 unsigned Opcode = Op->getOpcode();
484 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
485 "Invalid opcode for Div/Rem lowering");
486 bool IsSigned = (Opcode == ISD::SDIVREM);
487 EVT VT = Op->getValueType(ResNo: 0);
488 Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext());
489
490 RTLIB::Libcall LC;
491 switch (VT.getSimpleVT().SimpleTy) {
492 default:
493 llvm_unreachable("Unexpected request for libcall!");
494 case MVT::i8:
495 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
496 break;
497 case MVT::i16:
498 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
499 break;
500 case MVT::i32:
501 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
502 break;
503 }
504
505 SDValue InChain = DAG.getEntryNode();
506
507 TargetLowering::ArgListTy Args;
508 for (SDValue const &Value : Op->op_values()) {
509 TargetLowering::ArgListEntry Entry(
510 Value, Value.getValueType().getTypeForEVT(Context&: *DAG.getContext()));
511 Entry.IsSExt = IsSigned;
512 Entry.IsZExt = !IsSigned;
513 Args.push_back(x: Entry);
514 }
515
516 RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(Call: LC);
517 if (LCImpl == RTLIB::Unsupported)
518 return SDValue();
519
520 SDValue Callee =
521 DAG.getExternalSymbol(LCImpl, VT: getPointerTy(DL: DAG.getDataLayout()));
522
523 Type *RetTy = (Type *)StructType::get(elt1: Ty, elts: Ty);
524
525 SDLoc dl(Op);
526 TargetLowering::CallLoweringInfo CLI(DAG);
527 CLI.setDebugLoc(dl)
528 .setChain(InChain)
529 .setLibCallee(CC: DAG.getLibcalls().getLibcallImplCallingConv(Call: LCImpl), ResultType: RetTy,
530 Target: Callee, ArgsList: std::move(Args))
531 .setInRegister()
532 .setSExtResult(IsSigned)
533 .setZExtResult(!IsSigned);
534
535 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
536 return CallInfo.first;
537}
538
539SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
540 SelectionDAG &DAG) const {
541 auto DL = DAG.getDataLayout();
542
543 const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal();
544 int64_t Offset = cast<GlobalAddressSDNode>(Val&: Op)->getOffset();
545
546 // Create the TargetGlobalAddress node, folding in the constant offset.
547 SDValue Result =
548 DAG.getTargetGlobalAddress(GV, DL: SDLoc(Op), VT: getPointerTy(DL), offset: Offset);
549 return DAG.getNode(Opcode: AVRISD::WRAPPER, DL: SDLoc(Op), VT: getPointerTy(DL), Operand: Result);
550}
551
552SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
553 SelectionDAG &DAG) const {
554 auto DL = DAG.getDataLayout();
555 const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress();
556
557 SDValue Result = DAG.getTargetBlockAddress(BA, VT: getPointerTy(DL));
558
559 return DAG.getNode(Opcode: AVRISD::WRAPPER, DL: SDLoc(Op), VT: getPointerTy(DL), Operand: Result);
560}
561
562/// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
563static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
564 switch (CC) {
565 default:
566 llvm_unreachable("Unknown condition code!");
567 case ISD::SETEQ:
568 return AVRCC::COND_EQ;
569 case ISD::SETNE:
570 return AVRCC::COND_NE;
571 case ISD::SETGE:
572 return AVRCC::COND_GE;
573 case ISD::SETLT:
574 return AVRCC::COND_LT;
575 case ISD::SETUGE:
576 return AVRCC::COND_SH;
577 case ISD::SETULT:
578 return AVRCC::COND_LO;
579 }
580}
581
582/// Returns appropriate CP/CPI/CPC nodes code for the given 8/16-bit operands.
583SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
584 SelectionDAG &DAG, SDLoc DL) const {
585 assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&
586 "LHS and RHS have different types");
587 assert(((LHS.getSimpleValueType() == MVT::i16) ||
588 (LHS.getSimpleValueType() == MVT::i8)) &&
589 "invalid comparison type");
590
591 SDValue Cmp;
592
593 if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(Val: RHS)) {
594 uint64_t Imm = RHS->getAsZExtVal();
595 // Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
596 // register for the constant RHS if its lower or higher byte is zero.
597 SDValue LHSlo = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHS,
598 N2: DAG.getIntPtrConstant(Val: 0, DL));
599 SDValue LHShi = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHS,
600 N2: DAG.getIntPtrConstant(Val: 1, DL));
601 SDValue RHSlo = (Imm & 0xff) == 0
602 ? DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8)
603 : DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: RHS,
604 N2: DAG.getIntPtrConstant(Val: 0, DL));
605 SDValue RHShi = (Imm & 0xff00) == 0
606 ? DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8)
607 : DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: RHS,
608 N2: DAG.getIntPtrConstant(Val: 1, DL));
609 Cmp = DAG.getNode(Opcode: AVRISD::CMP, DL, VT: MVT::Glue, N1: LHSlo, N2: RHSlo);
610 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHShi, N2: RHShi, N3: Cmp);
611 } else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(Val: LHS)) {
612 // Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
613 // register for the constant LHS if its lower or higher byte is zero.
614 uint64_t Imm = LHS->getAsZExtVal();
615 SDValue LHSlo = (Imm & 0xff) == 0
616 ? DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8)
617 : DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHS,
618 N2: DAG.getIntPtrConstant(Val: 0, DL));
619 SDValue LHShi = (Imm & 0xff00) == 0
620 ? DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8)
621 : DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHS,
622 N2: DAG.getIntPtrConstant(Val: 1, DL));
623 SDValue RHSlo = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: RHS,
624 N2: DAG.getIntPtrConstant(Val: 0, DL));
625 SDValue RHShi = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: RHS,
626 N2: DAG.getIntPtrConstant(Val: 1, DL));
627 Cmp = DAG.getNode(Opcode: AVRISD::CMP, DL, VT: MVT::Glue, N1: LHSlo, N2: RHSlo);
628 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHShi, N2: RHShi, N3: Cmp);
629 } else {
630 // Generate ordinary 16-bit comparison.
631 Cmp = DAG.getNode(Opcode: AVRISD::CMP, DL, VT: MVT::Glue, N1: LHS, N2: RHS);
632 }
633
634 return Cmp;
635}
636
637/// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
638/// the given operands.
639SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
640 SDValue &AVRcc, SelectionDAG &DAG,
641 SDLoc DL) const {
642 SDValue Cmp;
643 EVT VT = LHS.getValueType();
644 bool UseTest = false;
645
646 switch (CC) {
647 default:
648 break;
649 case ISD::SETLE: {
650 // Swap operands and reverse the branching condition.
651 std::swap(a&: LHS, b&: RHS);
652 CC = ISD::SETGE;
653 break;
654 }
655 case ISD::SETGT: {
656 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) {
657 switch (C->getSExtValue()) {
658 case -1: {
659 // When doing lhs > -1 use a tst instruction on the top part of lhs
660 // and use brpl instead of using a chain of cp/cpc.
661 UseTest = true;
662 AVRcc = DAG.getConstant(Val: AVRCC::COND_PL, DL, VT: MVT::i8);
663 break;
664 }
665 case 0: {
666 // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
667 // __zero_reg__ in lhs.
668 RHS = LHS;
669 LHS = DAG.getConstant(Val: 0, DL, VT);
670 CC = ISD::SETLT;
671 break;
672 }
673 default: {
674 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
675 // us to fold the constant into the cmp instruction.
676 RHS = DAG.getSignedConstant(Val: C->getSExtValue() + 1, DL, VT);
677 CC = ISD::SETGE;
678 break;
679 }
680 }
681 break;
682 }
683 // Swap operands and reverse the branching condition.
684 std::swap(a&: LHS, b&: RHS);
685 CC = ISD::SETLT;
686 break;
687 }
688 case ISD::SETLT: {
689 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) {
690 switch (C->getSExtValue()) {
691 case 1: {
692 // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
693 // __zero_reg__ in lhs.
694 RHS = LHS;
695 LHS = DAG.getConstant(Val: 0, DL, VT);
696 CC = ISD::SETGE;
697 break;
698 }
699 case 0: {
700 // When doing lhs < 0 use a tst instruction on the top part of lhs
701 // and use brmi instead of using a chain of cp/cpc.
702 UseTest = true;
703 AVRcc = DAG.getConstant(Val: AVRCC::COND_MI, DL, VT: MVT::i8);
704 break;
705 }
706 }
707 }
708 break;
709 }
710 case ISD::SETULE: {
711 // Swap operands and reverse the branching condition.
712 std::swap(a&: LHS, b&: RHS);
713 CC = ISD::SETUGE;
714 break;
715 }
716 case ISD::SETUGT: {
717 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
718 // fold the constant into the cmp instruction.
719 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) {
720 // Doing a "icmp ugt i16 65535, %0" comparison should have been converted
721 // already to something else. Assert to make sure this assumption holds.
722 assert((!C->isAllOnes()) && "integer overflow in comparison transform");
723 RHS = DAG.getConstant(Val: C->getZExtValue() + 1, DL, VT);
724 CC = ISD::SETUGE;
725 break;
726 }
727 // Swap operands and reverse the branching condition.
728 std::swap(a&: LHS, b&: RHS);
729 CC = ISD::SETULT;
730 break;
731 }
732 }
733
734 // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
735 // using the default and/or/xor expansion code which is much longer.
736 if (VT == MVT::i32) {
737 SDValue LHSlo = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS,
738 N2: DAG.getIntPtrConstant(Val: 0, DL));
739 SDValue LHShi = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS,
740 N2: DAG.getIntPtrConstant(Val: 1, DL));
741 SDValue RHSlo = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS,
742 N2: DAG.getIntPtrConstant(Val: 0, DL));
743 SDValue RHShi = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS,
744 N2: DAG.getIntPtrConstant(Val: 1, DL));
745
746 if (UseTest) {
747 // When using tst we only care about the highest part.
748 SDValue Top = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHShi,
749 N2: DAG.getIntPtrConstant(Val: 1, DL));
750 Cmp = DAG.getNode(Opcode: AVRISD::TST, DL, VT: MVT::Glue, Operand: Top);
751 } else {
752 Cmp = getAVRCmp(LHS: LHSlo, RHS: RHSlo, DAG, DL);
753 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHShi, N2: RHShi, N3: Cmp);
754 }
755 } else if (VT == MVT::i64) {
756 SDValue LHS_0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: LHS,
757 N2: DAG.getIntPtrConstant(Val: 0, DL));
758 SDValue LHS_1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: LHS,
759 N2: DAG.getIntPtrConstant(Val: 1, DL));
760
761 SDValue LHS0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS_0,
762 N2: DAG.getIntPtrConstant(Val: 0, DL));
763 SDValue LHS1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS_0,
764 N2: DAG.getIntPtrConstant(Val: 1, DL));
765 SDValue LHS2 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS_1,
766 N2: DAG.getIntPtrConstant(Val: 0, DL));
767 SDValue LHS3 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: LHS_1,
768 N2: DAG.getIntPtrConstant(Val: 1, DL));
769
770 SDValue RHS_0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: RHS,
771 N2: DAG.getIntPtrConstant(Val: 0, DL));
772 SDValue RHS_1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: RHS,
773 N2: DAG.getIntPtrConstant(Val: 1, DL));
774
775 SDValue RHS0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS_0,
776 N2: DAG.getIntPtrConstant(Val: 0, DL));
777 SDValue RHS1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS_0,
778 N2: DAG.getIntPtrConstant(Val: 1, DL));
779 SDValue RHS2 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS_1,
780 N2: DAG.getIntPtrConstant(Val: 0, DL));
781 SDValue RHS3 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i16, N1: RHS_1,
782 N2: DAG.getIntPtrConstant(Val: 1, DL));
783
784 if (UseTest) {
785 // When using tst we only care about the highest part.
786 SDValue Top = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8, N1: LHS3,
787 N2: DAG.getIntPtrConstant(Val: 1, DL));
788 Cmp = DAG.getNode(Opcode: AVRISD::TST, DL, VT: MVT::Glue, Operand: Top);
789 } else {
790 Cmp = getAVRCmp(LHS: LHS0, RHS: RHS0, DAG, DL);
791 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHS1, N2: RHS1, N3: Cmp);
792 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHS2, N2: RHS2, N3: Cmp);
793 Cmp = DAG.getNode(Opcode: AVRISD::CMPC, DL, VT: MVT::Glue, N1: LHS3, N2: RHS3, N3: Cmp);
794 }
795 } else if (VT == MVT::i8 || VT == MVT::i16) {
796 if (UseTest) {
797 // When using tst we only care about the highest part.
798 Cmp = DAG.getNode(Opcode: AVRISD::TST, DL, VT: MVT::Glue,
799 Operand: (VT == MVT::i8)
800 ? LHS
801 : DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i8,
802 N1: LHS, N2: DAG.getIntPtrConstant(Val: 1, DL)));
803 } else {
804 Cmp = getAVRCmp(LHS, RHS, DAG, DL);
805 }
806 } else {
807 llvm_unreachable("Invalid comparison size");
808 }
809
810 // When using a test instruction AVRcc is already set.
811 if (!UseTest) {
812 AVRcc = DAG.getConstant(Val: intCCToAVRCC(CC), DL, VT: MVT::i8);
813 }
814
815 return Cmp;
816}
817
818SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
819 SDValue Chain = Op.getOperand(i: 0);
820 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get();
821 SDValue LHS = Op.getOperand(i: 2);
822 SDValue RHS = Op.getOperand(i: 3);
823 SDValue Dest = Op.getOperand(i: 4);
824 SDLoc dl(Op);
825
826 SDValue TargetCC;
827 SDValue Cmp = getAVRCmp(LHS, RHS, CC, AVRcc&: TargetCC, DAG, DL: dl);
828
829 return DAG.getNode(Opcode: AVRISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: TargetCC,
830 N4: Cmp);
831}
832
833SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
834 SDValue LHS = Op.getOperand(i: 0);
835 SDValue RHS = Op.getOperand(i: 1);
836 SDValue TrueV = Op.getOperand(i: 2);
837 SDValue FalseV = Op.getOperand(i: 3);
838 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get();
839 SDLoc dl(Op);
840
841 SDValue TargetCC;
842 SDValue Cmp = getAVRCmp(LHS, RHS, CC, AVRcc&: TargetCC, DAG, DL: dl);
843
844 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
845
846 return DAG.getNode(Opcode: AVRISD::SELECT_CC, DL: dl, VT: Op.getValueType(), Ops);
847}
848
849SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
850 SDValue LHS = Op.getOperand(i: 0);
851 SDValue RHS = Op.getOperand(i: 1);
852 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
853 SDLoc DL(Op);
854
855 SDValue TargetCC;
856 SDValue Cmp = getAVRCmp(LHS, RHS, CC, AVRcc&: TargetCC, DAG, DL);
857
858 SDValue TrueV = DAG.getConstant(Val: 1, DL, VT: Op.getValueType());
859 SDValue FalseV = DAG.getConstant(Val: 0, DL, VT: Op.getValueType());
860 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
861
862 return DAG.getNode(Opcode: AVRISD::SELECT_CC, DL, VT: Op.getValueType(), Ops);
863}
864
865SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
866 const MachineFunction &MF = DAG.getMachineFunction();
867 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
868 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
869 auto DL = DAG.getDataLayout();
870 SDLoc dl(Op);
871
872 // Vastart just stores the address of the VarArgsFrameIndex slot into the
873 // memory location argument.
874 SDValue FI = DAG.getFrameIndex(FI: AFI->getVarArgsFrameIndex(), VT: getPointerTy(DL));
875
876 return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: FI, Ptr: Op.getOperand(i: 1),
877 PtrInfo: MachinePointerInfo(SV));
878}
879
880// Modify the existing ISD::INLINEASM node to add the implicit zero register.
881SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
882 SDValue ZeroReg = DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8);
883 if (Op.getOperand(i: Op.getNumOperands() - 1) == ZeroReg ||
884 Op.getOperand(i: Op.getNumOperands() - 2) == ZeroReg) {
885 // Zero register has already been added. Don't add it again.
886 // If this isn't handled, we get called over and over again.
887 return Op;
888 }
889
890 // Get a list of operands to the new INLINEASM node. This is mostly a copy,
891 // with some edits.
892 // Add the following operands at the end (but before the glue node, if it's
893 // there):
894 // - The flags of the implicit zero register operand.
895 // - The implicit zero register operand itself.
896 SDLoc dl(Op);
897 SmallVector<SDValue, 8> Ops;
898 SDNode *N = Op.getNode();
899 SDValue Glue;
900 for (unsigned I = 0; I < N->getNumOperands(); I++) {
901 SDValue Operand = N->getOperand(Num: I);
902 if (Operand.getValueType() == MVT::Glue) {
903 // The glue operand always needs to be at the end, so we need to treat it
904 // specially.
905 Glue = Operand;
906 } else {
907 Ops.push_back(Elt: Operand);
908 }
909 }
910 InlineAsm::Flag Flags(InlineAsm::Kind::RegUse, 1);
911 Ops.push_back(Elt: DAG.getTargetConstant(Val: Flags, DL: dl, VT: MVT::i32));
912 Ops.push_back(Elt: ZeroReg);
913 if (Glue) {
914 Ops.push_back(Elt: Glue);
915 }
916
917 // Replace the current INLINEASM node with a new one that has the zero
918 // register as implicit parameter.
919 SDValue New = DAG.getNode(Opcode: N->getOpcode(), DL: dl, VTList: N->getVTList(), Ops);
920 DAG.ReplaceAllUsesOfValueWith(From: Op, To: New);
921 DAG.ReplaceAllUsesOfValueWith(From: Op.getValue(R: 1), To: New.getValue(R: 1));
922
923 return New;
924}
925
926SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
927 switch (Op.getOpcode()) {
928 default:
929 llvm_unreachable("Don't know how to custom lower this!");
930 case ISD::SHL:
931 case ISD::SRA:
932 case ISD::SRL:
933 case ISD::ROTL:
934 case ISD::ROTR:
935 return LowerShifts(Op, DAG);
936 case ISD::GlobalAddress:
937 return LowerGlobalAddress(Op, DAG);
938 case ISD::BlockAddress:
939 return LowerBlockAddress(Op, DAG);
940 case ISD::BR_CC:
941 return LowerBR_CC(Op, DAG);
942 case ISD::SELECT_CC:
943 return LowerSELECT_CC(Op, DAG);
944 case ISD::SETCC:
945 return LowerSETCC(Op, DAG);
946 case ISD::VASTART:
947 return LowerVASTART(Op, DAG);
948 case ISD::SDIVREM:
949 case ISD::UDIVREM:
950 return LowerDivRem(Op, DAG);
951 case ISD::INLINEASM:
952 return LowerINLINEASM(Op, DAG);
953 }
954
955 return SDValue();
956}
957
958/// Replace a node with an illegal result type
959/// with a new node built out of custom code.
960void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
961 SmallVectorImpl<SDValue> &Results,
962 SelectionDAG &DAG) const {
963 SDLoc DL(N);
964
965 switch (N->getOpcode()) {
966 case ISD::ADD: {
967 // Convert add (x, imm) into sub (x, -imm).
968 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1))) {
969 SDValue Sub = DAG.getNode(
970 Opcode: ISD::SUB, DL, VT: N->getValueType(ResNo: 0), N1: N->getOperand(Num: 0),
971 N2: DAG.getConstant(Val: -C->getAPIntValue(), DL, VT: C->getValueType(ResNo: 0)));
972 Results.push_back(Elt: Sub);
973 }
974 break;
975 }
976 default: {
977 SDValue Res = LowerOperation(Op: SDValue(N, 0), DAG);
978
979 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
980 Results.push_back(Elt: Res.getValue(R: I));
981
982 break;
983 }
984 }
985}
986
987/// Return true if the addressing mode represented
988/// by AM is legal for this target, for a load/store of the specified type.
989bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
990 const AddrMode &AM, Type *Ty,
991 unsigned AS,
992 Instruction *I) const {
993 int64_t Offs = AM.BaseOffs;
994
995 // Allow absolute addresses.
996 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
997 return true;
998 }
999
1000 // Flash memory instructions only allow zero offsets.
1001 if (isa<PointerType>(Val: Ty) && AS == AVR::ProgramMemory) {
1002 return false;
1003 }
1004
1005 // Allow reg+<6bit> offset.
1006 if (Offs < 0)
1007 Offs = -Offs;
1008 if (AM.BaseGV == nullptr && AM.HasBaseReg && AM.Scale == 0 &&
1009 isUInt<6>(x: Offs)) {
1010 return true;
1011 }
1012
1013 return false;
1014}
1015
1016/// Returns true by value, base pointer and
1017/// offset pointer and addressing mode by reference if the node's address
1018/// can be legally represented as pre-indexed load / store address.
1019bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1020 SDValue &Offset,
1021 ISD::MemIndexedMode &AM,
1022 SelectionDAG &DAG) const {
1023 EVT VT;
1024 const SDNode *Op;
1025 SDLoc DL(N);
1026
1027 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) {
1028 VT = LD->getMemoryVT();
1029 Op = LD->getBasePtr().getNode();
1030 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1031 return false;
1032 if (AVR::isProgramMemoryAccess(N: LD)) {
1033 return false;
1034 }
1035 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) {
1036 VT = ST->getMemoryVT();
1037 Op = ST->getBasePtr().getNode();
1038 if (AVR::isProgramMemoryAccess(N: ST)) {
1039 return false;
1040 }
1041 } else {
1042 return false;
1043 }
1044
1045 if (VT != MVT::i8 && VT != MVT::i16) {
1046 return false;
1047 }
1048
1049 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1050 return false;
1051 }
1052
1053 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1))) {
1054 int RHSC = RHS->getSExtValue();
1055 if (Op->getOpcode() == ISD::SUB)
1056 RHSC = -RHSC;
1057
1058 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1059 return false;
1060 }
1061
1062 Base = Op->getOperand(Num: 0);
1063 Offset = DAG.getSignedConstant(Val: RHSC, DL, VT: MVT::i8);
1064 AM = ISD::PRE_DEC;
1065
1066 return true;
1067 }
1068
1069 return false;
1070}
1071
1072/// Returns true by value, base pointer and
1073/// offset pointer and addressing mode by reference if this node can be
1074/// combined with a load / store to form a post-indexed load / store.
1075bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
1076 SDValue &Base,
1077 SDValue &Offset,
1078 ISD::MemIndexedMode &AM,
1079 SelectionDAG &DAG) const {
1080 EVT VT;
1081 SDValue Ptr;
1082 SDLoc DL(N);
1083
1084 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) {
1085 VT = LD->getMemoryVT();
1086 Ptr = LD->getBasePtr();
1087 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1088 return false;
1089 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) {
1090 VT = ST->getMemoryVT();
1091 Ptr = ST->getBasePtr();
1092 // We can not store to program memory.
1093 if (AVR::isProgramMemoryAccess(N: ST))
1094 return false;
1095 // Since the high byte need to be stored first, we can not emit
1096 // i16 post increment store like:
1097 // st X+, r24
1098 // st X+, r25
1099 if (VT == MVT::i16 && !Subtarget.hasLowByteFirst())
1100 return false;
1101 } else {
1102 return false;
1103 }
1104
1105 if (VT != MVT::i8 && VT != MVT::i16) {
1106 return false;
1107 }
1108
1109 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1110 return false;
1111 }
1112
1113 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1))) {
1114 int RHSC = RHS->getSExtValue();
1115 if (Op->getOpcode() == ISD::SUB)
1116 RHSC = -RHSC;
1117 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1118 return false;
1119 }
1120
1121 // FIXME: We temporarily disable post increment load from program memory,
1122 // due to bug https://github.com/llvm/llvm-project/issues/59914.
1123 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N))
1124 if (AVR::isProgramMemoryAccess(N: LD))
1125 return false;
1126
1127 Base = Op->getOperand(Num: 0);
1128
1129 // Post-indexing updates the base, so it's not a valid transform
1130 // if that's not the same as the load's pointer.
1131 if (Ptr != Base)
1132 return false;
1133
1134 Offset = DAG.getConstant(Val: RHSC, DL, VT: MVT::i8);
1135 AM = ISD::POST_INC;
1136
1137 return true;
1138 }
1139
1140 return false;
1141}
1142
1143bool AVRTargetLowering::isOffsetFoldingLegal(
1144 const GlobalAddressSDNode *GA) const {
1145 return true;
1146}
1147
1148//===----------------------------------------------------------------------===//
1149// Formal Arguments Calling Convention Implementation
1150//===----------------------------------------------------------------------===//
1151
1152#include "AVRGenCallingConv.inc"
1153
1154/// Registers for calling conventions, ordered in reverse as required by ABI.
1155/// Both arrays must be of the same length.
1156static const MCPhysReg RegList8AVR[] = {
1157 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1158 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1159 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1160static const MCPhysReg RegList8Tiny[] = {AVR::R25, AVR::R24, AVR::R23,
1161 AVR::R22, AVR::R21, AVR::R20};
1162static const MCPhysReg RegList16AVR[] = {
1163 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1164 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1165 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1166 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1167static const MCPhysReg RegList16Tiny[] = {AVR::R26R25, AVR::R25R24,
1168 AVR::R24R23, AVR::R23R22,
1169 AVR::R22R21, AVR::R21R20};
1170
1171static_assert(std::size(RegList8AVR) == std::size(RegList16AVR),
1172 "8-bit and 16-bit register arrays must be of equal length");
1173static_assert(std::size(RegList8Tiny) == std::size(RegList16Tiny),
1174 "8-bit and 16-bit register arrays must be of equal length");
1175
1176/// Analyze incoming and outgoing function arguments. We need custom C++ code
1177/// to handle special constraints in the ABI.
1178/// In addition, all pieces of a certain argument have to be passed either
1179/// using registers or the stack but never mixing both.
1180template <typename ArgT>
1181static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI,
1182 const Function *F, const DataLayout *TD,
1183 const SmallVectorImpl<ArgT> &Args,
1184 SmallVectorImpl<CCValAssign> &ArgLocs,
1185 CCState &CCInfo, bool Tiny) {
1186 // Choose the proper register list for argument passing according to the ABI.
1187 ArrayRef<MCPhysReg> RegList8;
1188 ArrayRef<MCPhysReg> RegList16;
1189 if (Tiny) {
1190 RegList8 = ArrayRef(RegList8Tiny);
1191 RegList16 = ArrayRef(RegList16Tiny);
1192 } else {
1193 RegList8 = ArrayRef(RegList8AVR);
1194 RegList16 = ArrayRef(RegList16AVR);
1195 }
1196
1197 unsigned NumArgs = Args.size();
1198 // This is the index of the last used register, in RegList*.
1199 // -1 means R26 (R26 is never actually used in CC).
1200 int RegLastIdx = -1;
1201 // Once a value is passed to the stack it will always be used
1202 bool UseStack = false;
1203 for (unsigned i = 0; i != NumArgs;) {
1204 MVT VT = Args[i].VT;
1205 // We have to count the number of bytes for each function argument, that is
1206 // those Args with the same OrigArgIndex. This is important in case the
1207 // function takes an aggregate type.
1208 // Current argument will be between [i..j).
1209 unsigned ArgIndex = Args[i].OrigArgIndex;
1210 unsigned TotalBytes = VT.getStoreSize();
1211 unsigned j = i + 1;
1212 for (; j != NumArgs; ++j) {
1213 if (Args[j].OrigArgIndex != ArgIndex)
1214 break;
1215 TotalBytes += Args[j].VT.getStoreSize();
1216 }
1217 // Round up to even number of bytes.
1218 TotalBytes = alignTo(Value: TotalBytes, Align: 2);
1219 // Skip zero sized arguments
1220 if (TotalBytes == 0)
1221 continue;
1222 // The index of the first register to be used
1223 unsigned RegIdx = RegLastIdx + TotalBytes;
1224 RegLastIdx = RegIdx;
1225 // If there are not enough registers, use the stack
1226 if (RegIdx >= RegList8.size()) {
1227 UseStack = true;
1228 }
1229 for (; i != j; ++i) {
1230 MVT VT = Args[i].VT;
1231
1232 if (UseStack) {
1233 auto evt = EVT(VT).getTypeForEVT(Context&: CCInfo.getContext());
1234 unsigned Offset = CCInfo.AllocateStack(Size: TD->getTypeAllocSize(Ty: evt),
1235 Alignment: TD->getABITypeAlign(Ty: evt));
1236 CCInfo.addLoc(
1237 V: CCValAssign::getMem(ValNo: i, ValVT: VT, Offset, LocVT: VT, HTP: CCValAssign::Full));
1238 } else {
1239 unsigned Reg;
1240 if (VT == MVT::i8) {
1241 Reg = CCInfo.AllocateReg(Reg: RegList8[RegIdx]);
1242 } else if (VT == MVT::i16) {
1243 Reg = CCInfo.AllocateReg(Reg: RegList16[RegIdx]);
1244 } else {
1245 llvm_unreachable(
1246 "calling convention can only manage i8 and i16 types");
1247 }
1248 assert(Reg && "register not available in calling convention");
1249 CCInfo.addLoc(V: CCValAssign::getReg(ValNo: i, ValVT: VT, Reg, LocVT: VT, HTP: CCValAssign::Full));
1250 // Registers inside a particular argument are sorted in increasing order
1251 // (remember the array is reversed).
1252 RegIdx -= VT.getStoreSize();
1253 }
1254 }
1255 }
1256}
1257
1258/// Count the total number of bytes needed to pass or return these arguments.
1259template <typename ArgT>
1260static unsigned
1261getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
1262 unsigned TotalBytes = 0;
1263
1264 for (const ArgT &Arg : Args) {
1265 TotalBytes += Arg.VT.getStoreSize();
1266 }
1267 return TotalBytes;
1268}
1269
1270/// Analyze incoming and outgoing value of returning from a function.
1271/// The algorithm is similar to analyzeArguments, but there can only be
1272/// one value, possibly an aggregate, and it is limited to 8 bytes.
1273template <typename ArgT>
1274static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
1275 CCState &CCInfo, bool Tiny) {
1276 unsigned NumArgs = Args.size();
1277 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
1278 // CanLowerReturn() guarantees this assertion.
1279 if (Tiny)
1280 assert(TotalBytes <= 4 &&
1281 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1282 else
1283 assert(TotalBytes <= 8 &&
1284 "return values greater than 8 bytes cannot be lowered on AVR");
1285
1286 // Choose the proper register list for argument passing according to the ABI.
1287 ArrayRef<MCPhysReg> RegList8;
1288 ArrayRef<MCPhysReg> RegList16;
1289 if (Tiny) {
1290 RegList8 = ArrayRef(RegList8Tiny);
1291 RegList16 = ArrayRef(RegList16Tiny);
1292 } else {
1293 RegList8 = ArrayRef(RegList8AVR);
1294 RegList16 = ArrayRef(RegList16AVR);
1295 }
1296
1297 // GCC-ABI says that the size is rounded up to the next even number,
1298 // but actually once it is more than 4 it will always round up to 8.
1299 if (TotalBytes > 4) {
1300 TotalBytes = 8;
1301 } else {
1302 TotalBytes = alignTo(Value: TotalBytes, Align: 2);
1303 }
1304
1305 // The index of the first register to use.
1306 int RegIdx = TotalBytes - 1;
1307 for (unsigned i = 0; i != NumArgs; ++i) {
1308 MVT VT = Args[i].VT;
1309 unsigned Reg;
1310 if (VT == MVT::i8) {
1311 Reg = CCInfo.AllocateReg(Reg: RegList8[RegIdx]);
1312 } else if (VT == MVT::i16) {
1313 Reg = CCInfo.AllocateReg(Reg: RegList16[RegIdx]);
1314 } else {
1315 llvm_unreachable("calling convention can only manage i8 and i16 types");
1316 }
1317 assert(Reg && "register not available in calling convention");
1318 CCInfo.addLoc(V: CCValAssign::getReg(ValNo: i, ValVT: VT, Reg, LocVT: VT, HTP: CCValAssign::Full));
1319 // Registers sort in increasing order
1320 RegIdx -= VT.getStoreSize();
1321 }
1322}
1323
1324SDValue AVRTargetLowering::LowerFormalArguments(
1325 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1326 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1327 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1328 MachineFunction &MF = DAG.getMachineFunction();
1329 MachineFrameInfo &MFI = MF.getFrameInfo();
1330 auto DL = DAG.getDataLayout();
1331
1332 // Assign locations to all of the incoming arguments.
1333 SmallVector<CCValAssign, 16> ArgLocs;
1334 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1335 *DAG.getContext());
1336
1337 // Variadic functions do not need all the analysis below.
1338 if (isVarArg) {
1339 CCInfo.AnalyzeFormalArguments(Ins, Fn: ArgCC_AVR_Vararg);
1340 } else {
1341 analyzeArguments(CLI: nullptr, F: &MF.getFunction(), TD: &DL, Args: Ins, ArgLocs, CCInfo,
1342 Tiny: Subtarget.hasTinyEncoding());
1343 }
1344
1345 SDValue ArgValue;
1346 for (CCValAssign &VA : ArgLocs) {
1347
1348 // Arguments stored on registers.
1349 if (VA.isRegLoc()) {
1350 EVT RegVT = VA.getLocVT();
1351 const TargetRegisterClass *RC;
1352 if (RegVT == MVT::i8) {
1353 RC = &AVR::GPR8RegClass;
1354 } else if (RegVT == MVT::i16) {
1355 RC = &AVR::DREGSRegClass;
1356 } else {
1357 llvm_unreachable("Unknown argument type!");
1358 }
1359
1360 Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC);
1361 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, VT: RegVT);
1362
1363 // :NOTE: Clang should not promote any i8 into i16 but for safety the
1364 // following code will handle zexts or sexts generated by other
1365 // front ends. Otherwise:
1366 // If this is an 8 bit value, it is really passed promoted
1367 // to 16 bits. Insert an assert[sz]ext to capture this, then
1368 // truncate to the right size.
1369 switch (VA.getLocInfo()) {
1370 default:
1371 llvm_unreachable("Unknown loc info!");
1372 case CCValAssign::Full:
1373 break;
1374 case CCValAssign::BCvt:
1375 ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: ArgValue);
1376 break;
1377 case CCValAssign::SExt:
1378 ArgValue = DAG.getNode(Opcode: ISD::AssertSext, DL: dl, VT: RegVT, N1: ArgValue,
1379 N2: DAG.getValueType(VA.getValVT()));
1380 ArgValue = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VA.getValVT(), Operand: ArgValue);
1381 break;
1382 case CCValAssign::ZExt:
1383 ArgValue = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: RegVT, N1: ArgValue,
1384 N2: DAG.getValueType(VA.getValVT()));
1385 ArgValue = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VA.getValVT(), Operand: ArgValue);
1386 break;
1387 }
1388
1389 InVals.push_back(Elt: ArgValue);
1390 } else {
1391 // Only arguments passed on the stack should make it here.
1392 assert(VA.isMemLoc());
1393
1394 EVT LocVT = VA.getLocVT();
1395
1396 // Create the frame index object for this incoming parameter.
1397 int FI = MFI.CreateFixedObject(Size: LocVT.getSizeInBits() / 8,
1398 SPOffset: VA.getLocMemOffset(), IsImmutable: true);
1399
1400 // Create the SelectionDAG nodes corresponding to a load
1401 // from this parameter.
1402 SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL));
1403 InVals.push_back(Elt: DAG.getLoad(VT: LocVT, dl, Chain, Ptr: FIN,
1404 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI)));
1405 }
1406 }
1407
1408 // If the function takes variable number of arguments, make a frame index for
1409 // the start of the first vararg value... for expansion of llvm.va_start.
1410 if (isVarArg) {
1411 unsigned StackSize = CCInfo.getStackSize();
1412 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1413
1414 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(Size: 2, SPOffset: StackSize, IsImmutable: true));
1415 }
1416
1417 return Chain;
1418}
1419
1420//===----------------------------------------------------------------------===//
1421// Call Calling Convention Implementation
1422//===----------------------------------------------------------------------===//
1423
1424SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1425 SmallVectorImpl<SDValue> &InVals) const {
1426 SelectionDAG &DAG = CLI.DAG;
1427 SDLoc &DL = CLI.DL;
1428 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1429 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1430 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1431 SDValue Chain = CLI.Chain;
1432 SDValue Callee = CLI.Callee;
1433 bool &isTailCall = CLI.IsTailCall;
1434 CallingConv::ID CallConv = CLI.CallConv;
1435 bool isVarArg = CLI.IsVarArg;
1436
1437 MachineFunction &MF = DAG.getMachineFunction();
1438
1439 // AVR does not yet support tail call optimization.
1440 isTailCall = false;
1441
1442 // Analyze operands of the call, assigning locations to each operand.
1443 SmallVector<CCValAssign, 16> ArgLocs;
1444 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1445 *DAG.getContext());
1446
1447 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1448 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1449 // node so that legalize doesn't hack it.
1450 const Function *F = nullptr;
1451 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
1452 const GlobalValue *GV = G->getGlobal();
1453 if (isa<Function>(Val: GV))
1454 F = cast<Function>(Val: GV);
1455 Callee =
1456 DAG.getTargetGlobalAddress(GV, DL, VT: getPointerTy(DL: DAG.getDataLayout()));
1457 } else if (const ExternalSymbolSDNode *ES =
1458 dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
1459 Callee = DAG.getTargetExternalSymbol(Sym: ES->getSymbol(),
1460 VT: getPointerTy(DL: DAG.getDataLayout()));
1461 }
1462
1463 // Variadic functions do not need all the analysis below.
1464 if (isVarArg) {
1465 CCInfo.AnalyzeCallOperands(Outs, Fn: ArgCC_AVR_Vararg);
1466 } else {
1467 analyzeArguments(CLI: &CLI, F, TD: &DAG.getDataLayout(), Args: Outs, ArgLocs, CCInfo,
1468 Tiny: Subtarget.hasTinyEncoding());
1469 }
1470
1471 // Get a count of how many bytes are to be pushed on the stack.
1472 unsigned NumBytes = CCInfo.getStackSize();
1473
1474 Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL);
1475
1476 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1477
1478 // First, walk the register assignments, inserting copies.
1479 unsigned AI, AE;
1480 bool HasStackArgs = false;
1481 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1482 CCValAssign &VA = ArgLocs[AI];
1483 EVT RegVT = VA.getLocVT();
1484 SDValue Arg = OutVals[AI];
1485
1486 // Promote the value if needed. With Clang this should not happen.
1487 switch (VA.getLocInfo()) {
1488 default:
1489 llvm_unreachable("Unknown loc info!");
1490 case CCValAssign::Full:
1491 break;
1492 case CCValAssign::SExt:
1493 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: RegVT, Operand: Arg);
1494 break;
1495 case CCValAssign::ZExt:
1496 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: RegVT, Operand: Arg);
1497 break;
1498 case CCValAssign::AExt:
1499 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: RegVT, Operand: Arg);
1500 break;
1501 case CCValAssign::BCvt:
1502 Arg = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: RegVT, Operand: Arg);
1503 break;
1504 }
1505
1506 // Stop when we encounter a stack argument, we need to process them
1507 // in reverse order in the loop below.
1508 if (VA.isMemLoc()) {
1509 HasStackArgs = true;
1510 break;
1511 }
1512
1513 // Arguments that can be passed on registers must be kept in the RegsToPass
1514 // vector.
1515 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
1516 }
1517
1518 // Second, stack arguments have to walked.
1519 // Previously this code created chained stores but those chained stores appear
1520 // to be unchained in the legalization phase. Therefore, do not attempt to
1521 // chain them here. In fact, chaining them here somehow causes the first and
1522 // second store to be reversed which is the exact opposite of the intended
1523 // effect.
1524 if (HasStackArgs) {
1525 SmallVector<SDValue, 8> MemOpChains;
1526 for (; AI != AE; AI++) {
1527 CCValAssign &VA = ArgLocs[AI];
1528 SDValue Arg = OutVals[AI];
1529
1530 assert(VA.isMemLoc());
1531
1532 // SP points to one stack slot further so add one to adjust it.
1533 SDValue PtrOff = DAG.getNode(
1534 Opcode: ISD::ADD, DL, VT: getPointerTy(DL: DAG.getDataLayout()),
1535 N1: DAG.getRegister(Reg: AVR::SP, VT: getPointerTy(DL: DAG.getDataLayout())),
1536 N2: DAG.getIntPtrConstant(Val: VA.getLocMemOffset() + 1, DL));
1537
1538 MemOpChains.push_back(
1539 Elt: DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: PtrOff,
1540 PtrInfo: MachinePointerInfo::getStack(MF, Offset: VA.getLocMemOffset())));
1541 }
1542
1543 if (!MemOpChains.empty())
1544 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOpChains);
1545 }
1546
1547 // Build a sequence of copy-to-reg nodes chained together with token chain and
1548 // flag operands which copy the outgoing args into registers. The InGlue in
1549 // necessary since all emited instructions must be stuck together.
1550 SDValue InGlue;
1551 for (auto Reg : RegsToPass) {
1552 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: Reg.first, N: Reg.second, Glue: InGlue);
1553 InGlue = Chain.getValue(R: 1);
1554 }
1555
1556 // Returns a chain & a flag for retval copy to use.
1557 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
1558 SmallVector<SDValue, 8> Ops;
1559 Ops.push_back(Elt: Chain);
1560 Ops.push_back(Elt: Callee);
1561
1562 // Add argument registers to the end of the list so that they are known live
1563 // into the call.
1564 for (auto Reg : RegsToPass) {
1565 Ops.push_back(Elt: DAG.getRegister(Reg: Reg.first, VT: Reg.second.getValueType()));
1566 }
1567
1568 // The zero register (usually R1) must be passed as an implicit register so
1569 // that this register is correctly zeroed in interrupts.
1570 Ops.push_back(Elt: DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8));
1571
1572 // Add a register mask operand representing the call-preserved registers.
1573 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1574 const uint32_t *Mask =
1575 TRI->getCallPreservedMask(MF: DAG.getMachineFunction(), CallConv);
1576 assert(Mask && "Missing call preserved mask for calling convention");
1577 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
1578
1579 if (InGlue.getNode()) {
1580 Ops.push_back(Elt: InGlue);
1581 }
1582
1583 Chain = DAG.getNode(Opcode: AVRISD::CALL, DL, VTList: NodeTys, Ops);
1584 InGlue = Chain.getValue(R: 1);
1585
1586 // Create the CALLSEQ_END node.
1587 Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue: InGlue, DL);
1588
1589 if (!Ins.empty()) {
1590 InGlue = Chain.getValue(R: 1);
1591 }
1592
1593 // Handle result values, copying them out of physregs into vregs that we
1594 // return.
1595 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl: DL, DAG,
1596 InVals);
1597}
1598
1599/// Lower the result values of a call into the
1600/// appropriate copies out of appropriate physical registers.
1601///
1602SDValue AVRTargetLowering::LowerCallResult(
1603 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
1604 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1605 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1606
1607 // Assign locations to each value returned by this call.
1608 SmallVector<CCValAssign, 16> RVLocs;
1609 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1610 *DAG.getContext());
1611
1612 // Handle runtime calling convs.
1613 if (CallConv == CallingConv::AVR_BUILTIN) {
1614 CCInfo.AnalyzeCallResult(Ins, Fn: RetCC_AVR_BUILTIN);
1615 } else {
1616 analyzeReturnValues(Args: Ins, CCInfo, Tiny: Subtarget.hasTinyEncoding());
1617 }
1618
1619 // Copy all of the result registers out of their specified physreg.
1620 for (CCValAssign const &RVLoc : RVLocs) {
1621 Chain = DAG.getCopyFromReg(Chain, dl, Reg: RVLoc.getLocReg(), VT: RVLoc.getValVT(),
1622 Glue: InGlue)
1623 .getValue(R: 1);
1624 InGlue = Chain.getValue(R: 2);
1625 InVals.push_back(Elt: Chain.getValue(R: 0));
1626 }
1627
1628 return Chain;
1629}
1630
1631//===----------------------------------------------------------------------===//
1632// Return Value Calling Convention Implementation
1633//===----------------------------------------------------------------------===//
1634
1635bool AVRTargetLowering::CanLowerReturn(
1636 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
1637 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
1638 const Type *RetTy) const {
1639 if (CallConv == CallingConv::AVR_BUILTIN) {
1640 SmallVector<CCValAssign, 16> RVLocs;
1641 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1642 return CCInfo.CheckReturn(Outs, Fn: RetCC_AVR_BUILTIN);
1643 }
1644
1645 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args: Outs);
1646 return TotalBytes <= (unsigned)(Subtarget.hasTinyEncoding() ? 4 : 8);
1647}
1648
1649SDValue
1650AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1651 bool isVarArg,
1652 const SmallVectorImpl<ISD::OutputArg> &Outs,
1653 const SmallVectorImpl<SDValue> &OutVals,
1654 const SDLoc &dl, SelectionDAG &DAG) const {
1655 // CCValAssign - represent the assignment of the return value to locations.
1656 SmallVector<CCValAssign, 16> RVLocs;
1657
1658 // CCState - Info about the registers and stack slot.
1659 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1660 *DAG.getContext());
1661
1662 MachineFunction &MF = DAG.getMachineFunction();
1663
1664 // Analyze return values.
1665 if (CallConv == CallingConv::AVR_BUILTIN) {
1666 CCInfo.AnalyzeReturn(Outs, Fn: RetCC_AVR_BUILTIN);
1667 } else {
1668 analyzeReturnValues(Args: Outs, CCInfo, Tiny: Subtarget.hasTinyEncoding());
1669 }
1670
1671 SDValue Glue;
1672 SmallVector<SDValue, 4> RetOps(1, Chain);
1673 // Copy the result values into the output registers.
1674 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1675 CCValAssign &VA = RVLocs[i];
1676 assert(VA.isRegLoc() && "Can only return in registers!");
1677
1678 Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: OutVals[i], Glue);
1679
1680 // Guarantee that all emitted copies are stuck together with flags.
1681 Glue = Chain.getValue(R: 1);
1682 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
1683 }
1684
1685 // Don't emit the ret/reti instruction when the naked attribute is present in
1686 // the function being compiled.
1687 if (MF.getFunction().getAttributes().hasFnAttr(Kind: Attribute::Naked)) {
1688 return Chain;
1689 }
1690
1691 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1692
1693 if (!AFI->isInterruptOrSignalHandler()) {
1694 // The return instruction has an implicit zero register operand: it must
1695 // contain zero on return.
1696 // This is not needed in interrupts however, where the zero register is
1697 // handled specially (only pushed/popped when needed).
1698 RetOps.push_back(Elt: DAG.getRegister(Reg: Subtarget.getZeroRegister(), VT: MVT::i8));
1699 }
1700
1701 unsigned RetOpc =
1702 AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_GLUE : AVRISD::RET_GLUE;
1703
1704 RetOps[0] = Chain; // Update chain.
1705
1706 if (Glue.getNode()) {
1707 RetOps.push_back(Elt: Glue);
1708 }
1709
1710 return DAG.getNode(Opcode: RetOpc, DL: dl, VT: MVT::Other, Ops: RetOps);
1711}
1712
1713//===----------------------------------------------------------------------===//
1714// Custom Inserters
1715//===----------------------------------------------------------------------===//
1716
1717MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1718 MachineBasicBlock *BB,
1719 bool Tiny) const {
1720 unsigned Opc;
1721 const TargetRegisterClass *RC;
1722 bool HasRepeatedOperand = false;
1723 MachineFunction *F = BB->getParent();
1724 MachineRegisterInfo &RI = F->getRegInfo();
1725 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1726 DebugLoc dl = MI.getDebugLoc();
1727
1728 switch (MI.getOpcode()) {
1729 default:
1730 llvm_unreachable("Invalid shift opcode!");
1731 case AVR::Lsl8:
1732 Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd
1733 RC = &AVR::GPR8RegClass;
1734 HasRepeatedOperand = true;
1735 break;
1736 case AVR::Lsl16:
1737 Opc = AVR::LSLWRd;
1738 RC = &AVR::DREGSRegClass;
1739 break;
1740 case AVR::Asr8:
1741 Opc = AVR::ASRRd;
1742 RC = &AVR::GPR8RegClass;
1743 break;
1744 case AVR::Asr16:
1745 Opc = AVR::ASRWRd;
1746 RC = &AVR::DREGSRegClass;
1747 break;
1748 case AVR::Lsr8:
1749 Opc = AVR::LSRRd;
1750 RC = &AVR::GPR8RegClass;
1751 break;
1752 case AVR::Lsr16:
1753 Opc = AVR::LSRWRd;
1754 RC = &AVR::DREGSRegClass;
1755 break;
1756 case AVR::Rol8:
1757 Opc = Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1758 RC = &AVR::GPR8RegClass;
1759 break;
1760 case AVR::Rol16:
1761 Opc = AVR::ROLWRd;
1762 RC = &AVR::DREGSRegClass;
1763 break;
1764 case AVR::Ror8:
1765 Opc = AVR::RORBRd;
1766 RC = &AVR::GPR8RegClass;
1767 break;
1768 case AVR::Ror16:
1769 Opc = AVR::RORWRd;
1770 RC = &AVR::DREGSRegClass;
1771 break;
1772 }
1773
1774 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1775
1776 MachineFunction::iterator I;
1777 for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I)
1778 ;
1779 if (I != F->end())
1780 ++I;
1781
1782 // Create loop block.
1783 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1784 MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1785 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1786
1787 F->insert(MBBI: I, MBB: LoopBB);
1788 F->insert(MBBI: I, MBB: CheckBB);
1789 F->insert(MBBI: I, MBB: RemBB);
1790
1791 // Update machine-CFG edges by transferring all successors of the current
1792 // block to the block containing instructions after shift.
1793 RemBB->splice(Where: RemBB->begin(), Other: BB, From: std::next(x: MachineBasicBlock::iterator(MI)),
1794 To: BB->end());
1795 RemBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1796
1797 // Add edges BB => LoopBB => CheckBB => RemBB, CheckBB => LoopBB.
1798 BB->addSuccessor(Succ: CheckBB);
1799 LoopBB->addSuccessor(Succ: CheckBB);
1800 CheckBB->addSuccessor(Succ: LoopBB);
1801 CheckBB->addSuccessor(Succ: RemBB);
1802
1803 Register ShiftAmtReg = RI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1804 Register ShiftAmtReg2 = RI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1805 Register ShiftReg = RI.createVirtualRegister(RegClass: RC);
1806 Register ShiftReg2 = RI.createVirtualRegister(RegClass: RC);
1807 Register ShiftAmtSrcReg = MI.getOperand(i: 2).getReg();
1808 Register SrcReg = MI.getOperand(i: 1).getReg();
1809 Register DstReg = MI.getOperand(i: 0).getReg();
1810
1811 // BB:
1812 // rjmp CheckBB
1813 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: AVR::RJMPk)).addMBB(MBB: CheckBB);
1814
1815 // LoopBB:
1816 // ShiftReg2 = shift ShiftReg
1817 auto ShiftMI = BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: Opc), DestReg: ShiftReg2).addReg(RegNo: ShiftReg);
1818 if (HasRepeatedOperand)
1819 ShiftMI.addReg(RegNo: ShiftReg);
1820
1821 // CheckBB:
1822 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1823 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
1824 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
1825 // ShiftAmt2 = ShiftAmt - 1;
1826 // if (ShiftAmt2 >= 0) goto LoopBB;
1827 BuildMI(BB: CheckBB, MIMD: dl, MCID: TII.get(Opcode: AVR::PHI), DestReg: ShiftReg)
1828 .addReg(RegNo: SrcReg)
1829 .addMBB(MBB: BB)
1830 .addReg(RegNo: ShiftReg2)
1831 .addMBB(MBB: LoopBB);
1832 BuildMI(BB: CheckBB, MIMD: dl, MCID: TII.get(Opcode: AVR::PHI), DestReg: ShiftAmtReg)
1833 .addReg(RegNo: ShiftAmtSrcReg)
1834 .addMBB(MBB: BB)
1835 .addReg(RegNo: ShiftAmtReg2)
1836 .addMBB(MBB: LoopBB);
1837 BuildMI(BB: CheckBB, MIMD: dl, MCID: TII.get(Opcode: AVR::PHI), DestReg: DstReg)
1838 .addReg(RegNo: SrcReg)
1839 .addMBB(MBB: BB)
1840 .addReg(RegNo: ShiftReg2)
1841 .addMBB(MBB: LoopBB);
1842
1843 BuildMI(BB: CheckBB, MIMD: dl, MCID: TII.get(Opcode: AVR::DECRd), DestReg: ShiftAmtReg2).addReg(RegNo: ShiftAmtReg);
1844 BuildMI(BB: CheckBB, MIMD: dl, MCID: TII.get(Opcode: AVR::BRPLk)).addMBB(MBB: LoopBB);
1845
1846 MI.eraseFromParent(); // The pseudo instruction is gone now.
1847 return RemBB;
1848}
1849
1850// Do a multibyte AVR shift. Insert shift instructions and put the output
1851// registers in the Regs array.
1852// Because AVR does not have a normal shift instruction (only a single bit shift
1853// instruction), we have to emulate this behavior with other instructions.
1854// It first tries large steps (moving registers around) and then smaller steps
1855// like single bit shifts.
1856// Large shifts actually reduce the number of shifted registers, so the below
1857// algorithms have to work independently of the number of registers that are
1858// shifted.
1859// For more information and background, see this blogpost:
1860// https://aykevl.nl/2021/02/avr-bitshift
1861static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB,
1862 MutableArrayRef<std::pair<Register, int>> Regs,
1863 ISD::NodeType Opc, int64_t ShiftAmt) {
1864 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1865 const AVRSubtarget &STI = BB->getParent()->getSubtarget<AVRSubtarget>();
1866 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
1867 const DebugLoc &dl = MI.getDebugLoc();
1868
1869 const bool ShiftLeft = Opc == ISD::SHL;
1870 const bool ArithmeticShift = Opc == ISD::SRA;
1871
1872 // Zero a register, for use in later operations.
1873 Register ZeroReg = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1874 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::COPY), DestReg: ZeroReg)
1875 .addReg(RegNo: STI.getZeroRegister());
1876
1877 // Do a shift modulo 6 or 7. This is a bit more complicated than most shifts
1878 // and is hard to compose with the rest, so these are special cased.
1879 // The basic idea is to shift one or two bits in the opposite direction and
1880 // then move registers around to get the correct end result.
1881 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1882 // Left shift modulo 6 or 7.
1883
1884 // Create a slice of the registers we're going to modify, to ease working
1885 // with them.
1886 size_t ShiftRegsOffset = ShiftAmt / 8;
1887 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1888 MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1889 Regs.slice(N: ShiftRegsOffset, M: ShiftRegsSize);
1890
1891 // Shift one to the right, keeping the least significant bit as the carry
1892 // bit.
1893 insertMultibyteShift(MI, BB, Regs: ShiftRegs, Opc: ISD::SRL, ShiftAmt: 1);
1894
1895 // Rotate the least significant bit from the carry bit into a new register
1896 // (that starts out zero).
1897 Register LowByte = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1898 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::RORRd), DestReg: LowByte).addReg(RegNo: ZeroReg);
1899
1900 // Shift one more to the right if this is a modulo-6 shift.
1901 if (ShiftAmt % 8 == 6) {
1902 insertMultibyteShift(MI, BB, Regs: ShiftRegs, Opc: ISD::SRL, ShiftAmt: 1);
1903 Register NewLowByte = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1904 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::RORRd), DestReg: NewLowByte).addReg(RegNo: LowByte);
1905 LowByte = NewLowByte;
1906 }
1907
1908 // Move all registers to the left, zeroing the bottom registers as needed.
1909 for (size_t I = 0; I < Regs.size(); I++) {
1910 int ShiftRegsIdx = I + 1;
1911 if (ShiftRegsIdx < (int)ShiftRegs.size()) {
1912 Regs[I] = ShiftRegs[ShiftRegsIdx];
1913 } else if (ShiftRegsIdx == (int)ShiftRegs.size()) {
1914 Regs[I] = std::pair(LowByte, 0);
1915 } else {
1916 Regs[I] = std::pair(ZeroReg, 0);
1917 }
1918 }
1919
1920 return;
1921 }
1922
1923 // Right shift modulo 6 or 7.
1924 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1925 // Create a view on the registers we're going to modify, to ease working
1926 // with them.
1927 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1928 MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1929 Regs.slice(N: 0, M: ShiftRegsSize);
1930
1931 // Shift one to the left.
1932 insertMultibyteShift(MI, BB, Regs: ShiftRegs, Opc: ISD::SHL, ShiftAmt: 1);
1933
1934 // Sign or zero extend the most significant register into a new register.
1935 // The HighByte is the byte that still has one (or two) bits from the
1936 // original value. The ExtByte is purely a zero/sign extend byte (all bits
1937 // are either 0 or 1).
1938 Register HighByte = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1939 Register ExtByte = 0;
1940 if (ArithmeticShift) {
1941 // Sign-extend bit that was shifted out last.
1942 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::SBCRdRr), DestReg: HighByte)
1943 .addReg(RegNo: HighByte, Flags: RegState::Undef)
1944 .addReg(RegNo: HighByte, Flags: RegState::Undef);
1945 ExtByte = HighByte;
1946 // The highest bit of the original value is the same as the zero-extend
1947 // byte, so HighByte and ExtByte are the same.
1948 } else {
1949 // Use the zero register for zero extending.
1950 ExtByte = ZeroReg;
1951 // Rotate most significant bit into a new register (that starts out zero).
1952 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ADCRdRr), DestReg: HighByte)
1953 .addReg(RegNo: ExtByte)
1954 .addReg(RegNo: ExtByte);
1955 }
1956
1957 // Shift one more to the left for modulo 6 shifts.
1958 if (ShiftAmt % 8 == 6) {
1959 insertMultibyteShift(MI, BB, Regs: ShiftRegs, Opc: ISD::SHL, ShiftAmt: 1);
1960 // Shift the topmost bit into the HighByte.
1961 Register NewExt = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
1962 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ADCRdRr), DestReg: NewExt)
1963 .addReg(RegNo: HighByte)
1964 .addReg(RegNo: HighByte);
1965 HighByte = NewExt;
1966 }
1967
1968 // Move all to the right, while sign or zero extending.
1969 for (int I = Regs.size() - 1; I >= 0; I--) {
1970 int ShiftRegsIdx = I - (Regs.size() - ShiftRegs.size()) - 1;
1971 if (ShiftRegsIdx >= 0) {
1972 Regs[I] = ShiftRegs[ShiftRegsIdx];
1973 } else if (ShiftRegsIdx == -1) {
1974 Regs[I] = std::pair(HighByte, 0);
1975 } else {
1976 Regs[I] = std::pair(ExtByte, 0);
1977 }
1978 }
1979
1980 return;
1981 }
1982
1983 // For shift amounts of at least one register, simply rename the registers and
1984 // zero the bottom registers.
1985 while (ShiftLeft && ShiftAmt >= 8) {
1986 // Move all registers one to the left.
1987 for (size_t I = 0; I < Regs.size() - 1; I++) {
1988 Regs[I] = Regs[I + 1];
1989 }
1990
1991 // Zero the least significant register.
1992 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
1993
1994 // Continue shifts with the leftover registers.
1995 Regs = Regs.drop_back(N: 1);
1996
1997 ShiftAmt -= 8;
1998 }
1999
2000 // And again, the same for right shifts.
2001 Register ShrExtendReg = 0;
2002 if (!ShiftLeft && ShiftAmt >= 8) {
2003 if (ArithmeticShift) {
2004 // Sign extend the most significant register into ShrExtendReg.
2005 ShrExtendReg = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2006 Register Tmp = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2007 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ADDRdRr), DestReg: Tmp)
2008 .addReg(RegNo: Regs[0].first, Flags: {}, SubReg: Regs[0].second)
2009 .addReg(RegNo: Regs[0].first, Flags: {}, SubReg: Regs[0].second);
2010 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::SBCRdRr), DestReg: ShrExtendReg)
2011 .addReg(RegNo: Tmp)
2012 .addReg(RegNo: Tmp);
2013 } else {
2014 ShrExtendReg = ZeroReg;
2015 }
2016 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2017 // Move all registers one to the right.
2018 for (size_t I = Regs.size() - 1; I != 0; I--) {
2019 Regs[I] = Regs[I - 1];
2020 }
2021
2022 // Zero or sign extend the most significant register.
2023 Regs[0] = std::pair(ShrExtendReg, 0);
2024
2025 // Continue shifts with the leftover registers.
2026 Regs = Regs.drop_front(N: 1);
2027 }
2028 }
2029
2030 // The bigger shifts are already handled above.
2031 assert((ShiftAmt < 8) && "Unexpect shift amount");
2032
2033 // Shift by four bits, using a complicated swap/eor/andi/eor sequence.
2034 // It only works for logical shifts because the bits shifted in are all
2035 // zeroes.
2036 // To shift a single byte right, it produces code like this:
2037 // swap r0
2038 // andi r0, 0x0f
2039 // For a two-byte (16-bit) shift, it adds the following instructions to shift
2040 // the upper byte into the lower byte:
2041 // swap r1
2042 // eor r0, r1
2043 // andi r1, 0x0f
2044 // eor r0, r1
2045 // For bigger shifts, it repeats the above sequence. For example, for a 3-byte
2046 // (24-bit) shift it adds:
2047 // swap r2
2048 // eor r1, r2
2049 // andi r2, 0x0f
2050 // eor r1, r2
2051 if (!ArithmeticShift && ShiftAmt >= 4) {
2052 Register Prev = 0;
2053 for (size_t I = 0; I < Regs.size(); I++) {
2054 size_t Idx = ShiftLeft ? I : Regs.size() - I - 1;
2055 Register SwapReg = MRI.createVirtualRegister(RegClass: &AVR::LD8RegClass);
2056 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::SWAPRd), DestReg: SwapReg)
2057 .addReg(RegNo: Regs[Idx].first, Flags: {}, SubReg: Regs[Idx].second);
2058 if (I != 0) {
2059 Register R = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2060 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::EORRdRr), DestReg: R)
2061 .addReg(RegNo: Prev)
2062 .addReg(RegNo: SwapReg);
2063 Prev = R;
2064 }
2065 Register AndReg = MRI.createVirtualRegister(RegClass: &AVR::LD8RegClass);
2066 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ANDIRdK), DestReg: AndReg)
2067 .addReg(RegNo: SwapReg)
2068 .addImm(Val: ShiftLeft ? 0xf0 : 0x0f);
2069 if (I != 0) {
2070 Register R = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2071 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::EORRdRr), DestReg: R)
2072 .addReg(RegNo: Prev)
2073 .addReg(RegNo: AndReg);
2074 size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2075 Regs[PrevIdx] = std::pair(R, 0);
2076 }
2077 Prev = AndReg;
2078 Regs[Idx] = std::pair(AndReg, 0);
2079 }
2080 ShiftAmt -= 4;
2081 }
2082
2083 // Shift by one. This is the fallback that always works, and the shift
2084 // operation that is used for 1, 2, and 3 bit shifts.
2085 while (ShiftLeft && ShiftAmt) {
2086 // Shift one to the left.
2087 for (ssize_t I = Regs.size() - 1; I >= 0; I--) {
2088 Register Out = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2089 Register In = Regs[I].first;
2090 Register InSubreg = Regs[I].second;
2091 if (I == (ssize_t)Regs.size() - 1) { // first iteration
2092 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ADDRdRr), DestReg: Out)
2093 .addReg(RegNo: In, Flags: {}, SubReg: InSubreg)
2094 .addReg(RegNo: In, Flags: {}, SubReg: InSubreg);
2095 } else {
2096 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::ADCRdRr), DestReg: Out)
2097 .addReg(RegNo: In, Flags: {}, SubReg: InSubreg)
2098 .addReg(RegNo: In, Flags: {}, SubReg: InSubreg);
2099 }
2100 Regs[I] = std::pair(Out, 0);
2101 }
2102 ShiftAmt--;
2103 }
2104 while (!ShiftLeft && ShiftAmt) {
2105 // Shift one to the right.
2106 for (size_t I = 0; I < Regs.size(); I++) {
2107 Register Out = MRI.createVirtualRegister(RegClass: &AVR::GPR8RegClass);
2108 Register In = Regs[I].first;
2109 Register InSubreg = Regs[I].second;
2110 if (I == 0) {
2111 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2112 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: Opc), DestReg: Out).addReg(RegNo: In, Flags: {}, SubReg: InSubreg);
2113 } else {
2114 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::RORRd), DestReg: Out).addReg(RegNo: In, Flags: {}, SubReg: InSubreg);
2115 }
2116 Regs[I] = std::pair(Out, 0);
2117 }
2118 ShiftAmt--;
2119 }
2120
2121 if (ShiftAmt != 0) {
2122 llvm_unreachable("don't know how to shift!"); // sanity check
2123 }
2124}
2125
2126// Do a wide (32-bit) shift.
2127MachineBasicBlock *
2128AVRTargetLowering::insertWideShift(MachineInstr &MI,
2129 MachineBasicBlock *BB) const {
2130 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2131 const DebugLoc &dl = MI.getDebugLoc();
2132
2133 // How much to shift to the right (meaning: a negative number indicates a left
2134 // shift).
2135 int64_t ShiftAmt = MI.getOperand(i: 4).getImm();
2136 ISD::NodeType Opc;
2137 switch (MI.getOpcode()) {
2138 case AVR::Lsl32:
2139 Opc = ISD::SHL;
2140 break;
2141 case AVR::Lsr32:
2142 Opc = ISD::SRL;
2143 break;
2144 case AVR::Asr32:
2145 Opc = ISD::SRA;
2146 break;
2147 }
2148
2149 // Read the input registers, with the most significant register at index 0.
2150 std::array<std::pair<Register, int>, 4> Registers = {
2151 std::pair(MI.getOperand(i: 3).getReg(), AVR::sub_hi),
2152 std::pair(MI.getOperand(i: 3).getReg(), AVR::sub_lo),
2153 std::pair(MI.getOperand(i: 2).getReg(), AVR::sub_hi),
2154 std::pair(MI.getOperand(i: 2).getReg(), AVR::sub_lo),
2155 };
2156
2157 // Do the shift. The registers are modified in-place.
2158 insertMultibyteShift(MI, BB, Regs: Registers, Opc, ShiftAmt);
2159
2160 // Combine the 8-bit registers into 16-bit register pairs.
2161 // This done either from LSB to MSB or from MSB to LSB, depending on the
2162 // shift. It's an optimization so that the register allocator will use the
2163 // fewest movs possible (which order we use isn't a correctness issue, just an
2164 // optimization issue).
2165 // - lsl prefers starting from the most significant byte (2nd case).
2166 // - lshr prefers starting from the least significant byte (1st case).
2167 // - for ashr it depends on the number of shifted bytes.
2168 // Some shift operations still don't get the most optimal mov sequences even
2169 // with this distinction. TODO: figure out why and try to fix it (but we're
2170 // already equal to or faster than avr-gcc in all cases except ashr 8).
2171 if (Opc != ISD::SHL &&
2172 (Opc != ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2173 // Use the resulting registers starting with the least significant byte.
2174 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::REG_SEQUENCE), DestReg: MI.getOperand(i: 0).getReg())
2175 .addReg(RegNo: Registers[3].first, Flags: {}, SubReg: Registers[3].second)
2176 .addImm(Val: AVR::sub_lo)
2177 .addReg(RegNo: Registers[2].first, Flags: {}, SubReg: Registers[2].second)
2178 .addImm(Val: AVR::sub_hi);
2179 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::REG_SEQUENCE), DestReg: MI.getOperand(i: 1).getReg())
2180 .addReg(RegNo: Registers[1].first, Flags: {}, SubReg: Registers[1].second)
2181 .addImm(Val: AVR::sub_lo)
2182 .addReg(RegNo: Registers[0].first, Flags: {}, SubReg: Registers[0].second)
2183 .addImm(Val: AVR::sub_hi);
2184 } else {
2185 // Use the resulting registers starting with the most significant byte.
2186 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::REG_SEQUENCE), DestReg: MI.getOperand(i: 1).getReg())
2187 .addReg(RegNo: Registers[0].first, Flags: {}, SubReg: Registers[0].second)
2188 .addImm(Val: AVR::sub_hi)
2189 .addReg(RegNo: Registers[1].first, Flags: {}, SubReg: Registers[1].second)
2190 .addImm(Val: AVR::sub_lo);
2191 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: AVR::REG_SEQUENCE), DestReg: MI.getOperand(i: 0).getReg())
2192 .addReg(RegNo: Registers[2].first, Flags: {}, SubReg: Registers[2].second)
2193 .addImm(Val: AVR::sub_hi)
2194 .addReg(RegNo: Registers[3].first, Flags: {}, SubReg: Registers[3].second)
2195 .addImm(Val: AVR::sub_lo);
2196 }
2197
2198 // Remove the pseudo instruction.
2199 MI.eraseFromParent();
2200 return BB;
2201}
2202
2203static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
2204 if (I->getOpcode() == AVR::COPY) {
2205 Register SrcReg = I->getOperand(i: 1).getReg();
2206 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2207 }
2208
2209 return false;
2210}
2211
2212// The mul instructions wreak havock on our zero_reg R1. We need to clear it
2213// after the result has been evacuated. This is probably not the best way to do
2214// it, but it works for now.
2215MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
2216 MachineBasicBlock *BB) const {
2217 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2218 MachineBasicBlock::iterator I(MI);
2219 ++I; // in any case insert *after* the mul instruction
2220 if (isCopyMulResult(I))
2221 ++I;
2222 if (isCopyMulResult(I))
2223 ++I;
2224 BuildMI(BB&: *BB, I, MIMD: MI.getDebugLoc(), MCID: TII.get(Opcode: AVR::EORRdRr), DestReg: AVR::R1)
2225 .addReg(RegNo: AVR::R1)
2226 .addReg(RegNo: AVR::R1);
2227 return BB;
2228}
2229
2230// Insert a read from the zero register.
2231MachineBasicBlock *
2232AVRTargetLowering::insertCopyZero(MachineInstr &MI,
2233 MachineBasicBlock *BB) const {
2234 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2235 MachineBasicBlock::iterator I(MI);
2236 BuildMI(BB&: *BB, I, MIMD: MI.getDebugLoc(), MCID: TII.get(Opcode: AVR::COPY))
2237 .add(MO: MI.getOperand(i: 0))
2238 .addReg(RegNo: Subtarget.getZeroRegister());
2239 MI.eraseFromParent();
2240 return BB;
2241}
2242
2243// Lower atomicrmw operation to disable interrupts, do operation, and restore
2244// interrupts. This works because all AVR microcontrollers are single core.
2245MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2246 MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, int Width) const {
2247 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2248 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2249 MachineBasicBlock::iterator I(MI);
2250 DebugLoc dl = MI.getDebugLoc();
2251
2252 // Example instruction sequence, for an atomic 8-bit add:
2253 // ldi r25, 5
2254 // in r0, SREG
2255 // cli
2256 // ld r24, X
2257 // add r25, r24
2258 // st X, r25
2259 // out SREG, r0
2260
2261 const TargetRegisterClass *RC =
2262 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2263 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2264 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2265
2266 // Disable interrupts.
2267 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode: AVR::INRdA), DestReg: Subtarget.getTmpRegister())
2268 .addImm(Val: Subtarget.getIORegSREG());
2269 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode: AVR::BCLRs)).addImm(Val: 7);
2270
2271 // Load the original value.
2272 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode: LoadOpcode), DestReg: MI.getOperand(i: 0).getReg())
2273 .add(MO: MI.getOperand(i: 1));
2274
2275 // Do the arithmetic operation.
2276 Register Result = MRI.createVirtualRegister(RegClass: RC);
2277 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode), DestReg: Result)
2278 .addReg(RegNo: MI.getOperand(i: 0).getReg())
2279 .add(MO: MI.getOperand(i: 2));
2280
2281 // Store the result.
2282 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode: StoreOpcode))
2283 .add(MO: MI.getOperand(i: 1))
2284 .addReg(RegNo: Result);
2285
2286 // Restore interrupts.
2287 BuildMI(BB&: *BB, I, MIMD: dl, MCID: TII.get(Opcode: AVR::OUTARr))
2288 .addImm(Val: Subtarget.getIORegSREG())
2289 .addReg(RegNo: Subtarget.getTmpRegister());
2290
2291 // Remove the pseudo instruction.
2292 MI.eraseFromParent();
2293 return BB;
2294}
2295
2296MachineBasicBlock *
2297AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2298 MachineBasicBlock *MBB) const {
2299 int Opc = MI.getOpcode();
2300 const AVRSubtarget &STI = MBB->getParent()->getSubtarget<AVRSubtarget>();
2301
2302 // Pseudo shift instructions with a non constant shift amount are expanded
2303 // into a loop.
2304 switch (Opc) {
2305 case AVR::Lsl8:
2306 case AVR::Lsl16:
2307 case AVR::Lsr8:
2308 case AVR::Lsr16:
2309 case AVR::Rol8:
2310 case AVR::Rol16:
2311 case AVR::Ror8:
2312 case AVR::Ror16:
2313 case AVR::Asr8:
2314 case AVR::Asr16:
2315 return insertShift(MI, BB: MBB, Tiny: STI.hasTinyEncoding());
2316 case AVR::Lsl32:
2317 case AVR::Lsr32:
2318 case AVR::Asr32:
2319 return insertWideShift(MI, BB: MBB);
2320 case AVR::MULRdRr:
2321 case AVR::MULSRdRr:
2322 return insertMul(MI, BB: MBB);
2323 case AVR::CopyZero:
2324 return insertCopyZero(MI, BB: MBB);
2325 case AVR::AtomicLoadAdd8:
2326 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ADDRdRr, Width: 8);
2327 case AVR::AtomicLoadAdd16:
2328 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ADDWRdRr, Width: 16);
2329 case AVR::AtomicLoadSub8:
2330 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::SUBRdRr, Width: 8);
2331 case AVR::AtomicLoadSub16:
2332 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::SUBWRdRr, Width: 16);
2333 case AVR::AtomicLoadAnd8:
2334 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ANDRdRr, Width: 8);
2335 case AVR::AtomicLoadAnd16:
2336 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ANDWRdRr, Width: 16);
2337 case AVR::AtomicLoadOr8:
2338 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ORRdRr, Width: 8);
2339 case AVR::AtomicLoadOr16:
2340 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::ORWRdRr, Width: 16);
2341 case AVR::AtomicLoadXor8:
2342 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::EORRdRr, Width: 8);
2343 case AVR::AtomicLoadXor16:
2344 return insertAtomicArithmeticOp(MI, BB: MBB, Opcode: AVR::EORWRdRr, Width: 16);
2345 }
2346
2347 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2348 "Unexpected instr type to insert");
2349
2350 const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
2351 ->getParent()
2352 ->getSubtarget()
2353 .getInstrInfo();
2354 DebugLoc dl = MI.getDebugLoc();
2355
2356 // To "insert" a SELECT instruction, we insert the diamond
2357 // control-flow pattern. The incoming instruction knows the
2358 // destination vreg to set, the condition code register to branch
2359 // on, the true/false values to select between, and a branch opcode
2360 // to use.
2361
2362 MachineFunction *MF = MBB->getParent();
2363 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2364 MachineBasicBlock *FallThrough = MBB->getFallThrough();
2365
2366 // If the current basic block falls through to another basic block,
2367 // we must insert an unconditional branch to the fallthrough destination
2368 // if we are to insert basic blocks at the prior fallthrough point.
2369 if (FallThrough != nullptr) {
2370 BuildMI(BB: MBB, MIMD: dl, MCID: TII.get(Opcode: AVR::RJMPk)).addMBB(MBB: FallThrough);
2371 }
2372
2373 MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB);
2374 MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB);
2375
2376 MachineFunction::iterator I;
2377 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I)
2378 ;
2379 if (I != MF->end())
2380 ++I;
2381 MF->insert(MBBI: I, MBB: trueMBB);
2382 MF->insert(MBBI: I, MBB: falseMBB);
2383
2384 // Set the call frame size on entry to the new basic blocks.
2385 unsigned CallFrameSize = TII.getCallFrameSizeAt(MI);
2386 trueMBB->setCallFrameSize(CallFrameSize);
2387 falseMBB->setCallFrameSize(CallFrameSize);
2388
2389 // Transfer remaining instructions and all successors of the current
2390 // block to the block which will contain the Phi node for the
2391 // select.
2392 trueMBB->splice(Where: trueMBB->begin(), Other: MBB,
2393 From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end());
2394 trueMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB);
2395
2396 AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(i: 3).getImm();
2397 BuildMI(BB: MBB, MIMD: dl, MCID: TII.getBrCond(CC)).addMBB(MBB: trueMBB);
2398 BuildMI(BB: MBB, MIMD: dl, MCID: TII.get(Opcode: AVR::RJMPk)).addMBB(MBB: falseMBB);
2399 MBB->addSuccessor(Succ: falseMBB);
2400 MBB->addSuccessor(Succ: trueMBB);
2401
2402 // Unconditionally flow back to the true block
2403 BuildMI(BB: falseMBB, MIMD: dl, MCID: TII.get(Opcode: AVR::RJMPk)).addMBB(MBB: trueMBB);
2404 falseMBB->addSuccessor(Succ: trueMBB);
2405
2406 // Set up the Phi node to determine where we came from
2407 BuildMI(BB&: *trueMBB, I: trueMBB->begin(), MIMD: dl, MCID: TII.get(Opcode: AVR::PHI),
2408 DestReg: MI.getOperand(i: 0).getReg())
2409 .addReg(RegNo: MI.getOperand(i: 1).getReg())
2410 .addMBB(MBB)
2411 .addReg(RegNo: MI.getOperand(i: 2).getReg())
2412 .addMBB(MBB: falseMBB);
2413
2414 MI.eraseFromParent(); // The pseudo instruction is gone now.
2415 return trueMBB;
2416}
2417
2418//===----------------------------------------------------------------------===//
2419// Inline Asm Support
2420//===----------------------------------------------------------------------===//
2421
2422AVRTargetLowering::ConstraintType
2423AVRTargetLowering::getConstraintType(StringRef Constraint) const {
2424 if (Constraint.size() == 1) {
2425 // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
2426 switch (Constraint[0]) {
2427 default:
2428 break;
2429 case 'a': // Simple upper registers
2430 case 'b': // Base pointer registers pairs
2431 case 'd': // Upper register
2432 case 'l': // Lower registers
2433 case 'e': // Pointer register pairs
2434 case 'q': // Stack pointer register
2435 case 'r': // Any register
2436 case 'w': // Special upper register pairs
2437 return C_RegisterClass;
2438 case 't': // Temporary register
2439 case 'x':
2440 case 'X': // Pointer register pair X
2441 case 'y':
2442 case 'Y': // Pointer register pair Y
2443 case 'z':
2444 case 'Z': // Pointer register pair Z
2445 return C_Register;
2446 case 'Q': // A memory address based on Y or Z pointer with displacement.
2447 return C_Memory;
2448 case 'G': // Floating point constant
2449 case 'I': // 6-bit positive integer constant
2450 case 'J': // 6-bit negative integer constant
2451 case 'K': // Integer constant (Range: 2)
2452 case 'L': // Integer constant (Range: 0)
2453 case 'M': // 8-bit integer constant
2454 case 'N': // Integer constant (Range: -1)
2455 case 'O': // Integer constant (Range: 8, 16, 24)
2456 case 'P': // Integer constant (Range: 1)
2457 case 'R': // Integer constant (Range: -6 to 5)x
2458 return C_Immediate;
2459 }
2460 }
2461
2462 return TargetLowering::getConstraintType(Constraint);
2463}
2464
2465InlineAsm::ConstraintCode
2466AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2467 // Not sure if this is actually the right thing to do, but we got to do
2468 // *something* [agnat]
2469 switch (ConstraintCode[0]) {
2470 case 'Q':
2471 return InlineAsm::ConstraintCode::Q;
2472 }
2473 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2474}
2475
2476AVRTargetLowering::ConstraintWeight
2477AVRTargetLowering::getSingleConstraintMatchWeight(
2478 AsmOperandInfo &info, const char *constraint) const {
2479 ConstraintWeight weight = CW_Invalid;
2480 Value *CallOperandVal = info.CallOperandVal;
2481
2482 // If we don't have a value, we can't do a match,
2483 // but allow it at the lowest weight.
2484 // (this behaviour has been copied from the ARM backend)
2485 if (!CallOperandVal) {
2486 return CW_Default;
2487 }
2488
2489 // Look at the constraint type.
2490 switch (*constraint) {
2491 default:
2492 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
2493 break;
2494 case 'd':
2495 case 'r':
2496 case 'l':
2497 weight = CW_Register;
2498 break;
2499 case 'a':
2500 case 'b':
2501 case 'e':
2502 case 'q':
2503 case 't':
2504 case 'w':
2505 case 'x':
2506 case 'X':
2507 case 'y':
2508 case 'Y':
2509 case 'z':
2510 case 'Z':
2511 weight = CW_SpecificReg;
2512 break;
2513 case 'G':
2514 if (const ConstantFP *C = dyn_cast<ConstantFP>(Val: CallOperandVal)) {
2515 if (C->isZero()) {
2516 weight = CW_Constant;
2517 }
2518 }
2519 break;
2520 case 'I':
2521 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2522 if (isUInt<6>(x: C->getZExtValue())) {
2523 weight = CW_Constant;
2524 }
2525 }
2526 break;
2527 case 'J':
2528 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2529 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
2530 weight = CW_Constant;
2531 }
2532 }
2533 break;
2534 case 'K':
2535 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2536 if (C->getZExtValue() == 2) {
2537 weight = CW_Constant;
2538 }
2539 }
2540 break;
2541 case 'L':
2542 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2543 if (C->getZExtValue() == 0) {
2544 weight = CW_Constant;
2545 }
2546 }
2547 break;
2548 case 'M':
2549 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2550 if (isUInt<8>(x: C->getZExtValue())) {
2551 weight = CW_Constant;
2552 }
2553 }
2554 break;
2555 case 'N':
2556 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2557 if (C->getSExtValue() == -1) {
2558 weight = CW_Constant;
2559 }
2560 }
2561 break;
2562 case 'O':
2563 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2564 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
2565 (C->getZExtValue() == 24)) {
2566 weight = CW_Constant;
2567 }
2568 }
2569 break;
2570 case 'P':
2571 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2572 if (C->getZExtValue() == 1) {
2573 weight = CW_Constant;
2574 }
2575 }
2576 break;
2577 case 'R':
2578 if (const ConstantInt *C = dyn_cast<ConstantInt>(Val: CallOperandVal)) {
2579 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
2580 weight = CW_Constant;
2581 }
2582 }
2583 break;
2584 case 'Q':
2585 weight = CW_Memory;
2586 break;
2587 }
2588
2589 return weight;
2590}
2591
2592std::pair<unsigned, const TargetRegisterClass *>
2593AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2594 StringRef Constraint,
2595 MVT VT) const {
2596 if (Constraint.size() == 1) {
2597 switch (Constraint[0]) {
2598 case 'a': // Simple upper registers r16..r23.
2599 if (VT == MVT::i8)
2600 return std::make_pair(x: 0U, y: &AVR::LD8loRegClass);
2601 else if (VT == MVT::i16)
2602 return std::make_pair(x: 0U, y: &AVR::DREGSLD8loRegClass);
2603 break;
2604 case 'b': // Base pointer registers: y, z.
2605 if (VT == MVT::i8 || VT == MVT::i16)
2606 return std::make_pair(x: 0U, y: &AVR::PTRDISPREGSRegClass);
2607 break;
2608 case 'd': // Upper registers r16..r31.
2609 if (VT == MVT::i8)
2610 return std::make_pair(x: 0U, y: &AVR::LD8RegClass);
2611 else if (VT == MVT::i16)
2612 return std::make_pair(x: 0U, y: &AVR::DLDREGSRegClass);
2613 break;
2614 case 'l': // Lower registers r0..r15.
2615 if (VT == MVT::i8)
2616 return std::make_pair(x: 0U, y: &AVR::GPR8loRegClass);
2617 else if (VT == MVT::i16)
2618 return std::make_pair(x: 0U, y: &AVR::DREGSloRegClass);
2619 break;
2620 case 'e': // Pointer register pairs: x, y, z.
2621 if (VT == MVT::i8 || VT == MVT::i16)
2622 return std::make_pair(x: 0U, y: &AVR::PTRREGSRegClass);
2623 break;
2624 case 'q': // Stack pointer register: SPH:SPL.
2625 return std::make_pair(x: 0U, y: &AVR::GPRSPRegClass);
2626 case 'r': // Any register: r0..r31.
2627 if (VT == MVT::i8)
2628 return std::make_pair(x: 0U, y: &AVR::GPR8RegClass);
2629 else if (VT == MVT::i16)
2630 return std::make_pair(x: 0U, y: &AVR::DREGSRegClass);
2631 break;
2632 case 't': // Temporary register: r0.
2633 if (VT == MVT::i8)
2634 return std::make_pair(x: unsigned(Subtarget.getTmpRegister()),
2635 y: &AVR::GPR8RegClass);
2636 break;
2637 case 'w': // Special upper register pairs: r24, r26, r28, r30.
2638 if (VT == MVT::i8 || VT == MVT::i16)
2639 return std::make_pair(x: 0U, y: &AVR::IWREGSRegClass);
2640 break;
2641 case 'x': // Pointer register pair X: r27:r26.
2642 case 'X':
2643 if (VT == MVT::i8 || VT == MVT::i16)
2644 return std::make_pair(x: unsigned(AVR::R27R26), y: &AVR::PTRREGSRegClass);
2645 break;
2646 case 'y': // Pointer register pair Y: r29:r28.
2647 case 'Y':
2648 if (VT == MVT::i8 || VT == MVT::i16)
2649 return std::make_pair(x: unsigned(AVR::R29R28), y: &AVR::PTRREGSRegClass);
2650 break;
2651 case 'z': // Pointer register pair Z: r31:r30.
2652 case 'Z':
2653 if (VT == MVT::i8 || VT == MVT::i16)
2654 return std::make_pair(x: unsigned(AVR::R31R30), y: &AVR::PTRREGSRegClass);
2655 break;
2656 default:
2657 break;
2658 }
2659 }
2660
2661 return TargetLowering::getRegForInlineAsmConstraint(
2662 TRI: Subtarget.getRegisterInfo(), Constraint, VT);
2663}
2664
2665void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2666 StringRef Constraint,
2667 std::vector<SDValue> &Ops,
2668 SelectionDAG &DAG) const {
2669 SDValue Result;
2670 SDLoc DL(Op);
2671 EVT Ty = Op.getValueType();
2672
2673 // Currently only support length 1 constraints.
2674 if (Constraint.size() != 1) {
2675 return;
2676 }
2677
2678 char ConstraintLetter = Constraint[0];
2679 switch (ConstraintLetter) {
2680 default:
2681 break;
2682 // Deal with integers first:
2683 case 'I':
2684 case 'J':
2685 case 'K':
2686 case 'L':
2687 case 'M':
2688 case 'N':
2689 case 'O':
2690 case 'P':
2691 case 'R': {
2692 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op);
2693 if (!C) {
2694 return;
2695 }
2696
2697 int64_t CVal64 = C->getSExtValue();
2698 uint64_t CUVal64 = C->getZExtValue();
2699 switch (ConstraintLetter) {
2700 case 'I': // 0..63
2701 if (!isUInt<6>(x: CUVal64))
2702 return;
2703 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2704 break;
2705 case 'J': // -63..0
2706 if (CVal64 < -63 || CVal64 > 0)
2707 return;
2708 Result = DAG.getTargetConstant(Val: CVal64, DL, VT: Ty);
2709 break;
2710 case 'K': // 2
2711 if (CUVal64 != 2)
2712 return;
2713 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2714 break;
2715 case 'L': // 0
2716 if (CUVal64 != 0)
2717 return;
2718 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2719 break;
2720 case 'M': // 0..255
2721 if (!isUInt<8>(x: CUVal64))
2722 return;
2723 // i8 type may be printed as a negative number,
2724 // e.g. 254 would be printed as -2,
2725 // so we force it to i16 at least.
2726 if (Ty.getSimpleVT() == MVT::i8) {
2727 Ty = MVT::i16;
2728 }
2729 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2730 break;
2731 case 'N': // -1
2732 if (CVal64 != -1)
2733 return;
2734 Result = DAG.getTargetConstant(Val: CVal64, DL, VT: Ty);
2735 break;
2736 case 'O': // 8, 16, 24
2737 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2738 return;
2739 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2740 break;
2741 case 'P': // 1
2742 if (CUVal64 != 1)
2743 return;
2744 Result = DAG.getTargetConstant(Val: CUVal64, DL, VT: Ty);
2745 break;
2746 case 'R': // -6..5
2747 if (CVal64 < -6 || CVal64 > 5)
2748 return;
2749 Result = DAG.getTargetConstant(Val: CVal64, DL, VT: Ty);
2750 break;
2751 }
2752
2753 break;
2754 }
2755 case 'G':
2756 const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Val&: Op);
2757 if (!FC || !FC->isZero())
2758 return;
2759 // Soften float to i8 0
2760 Result = DAG.getTargetConstant(Val: 0, DL, VT: MVT::i8);
2761 break;
2762 }
2763
2764 if (Result.getNode()) {
2765 Ops.push_back(x: Result);
2766 return;
2767 }
2768
2769 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2770}
2771
2772Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT,
2773 const MachineFunction &MF) const {
2774 Register Reg;
2775
2776 if (VT == LLT::scalar(SizeInBits: 8)) {
2777 Reg = StringSwitch<unsigned>(RegName)
2778 .Case(S: "r0", Value: AVR::R0)
2779 .Case(S: "r1", Value: AVR::R1)
2780 .Default(Value: 0);
2781 } else {
2782 Reg = StringSwitch<unsigned>(RegName)
2783 .Case(S: "r0", Value: AVR::R1R0)
2784 .Case(S: "sp", Value: AVR::SP)
2785 .Default(Value: 0);
2786 }
2787
2788 if (Reg)
2789 return Reg;
2790
2791 report_fatal_error(
2792 reason: Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
2793}
2794
2795} // end of namespace llvm
2796