1//===-- MSP430ISelLowering.cpp - MSP430 DAG Lowering Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MSP430TargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MSP430ISelLowering.h"
14#include "MSP430.h"
15#include "MSP430MachineFunctionInfo.h"
16#include "MSP430Subtarget.h"
17#include "MSP430TargetMachine.h"
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24#include "llvm/CodeGen/ValueTypes.h"
25#include "llvm/IR/CallingConv.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/GlobalAlias.h"
29#include "llvm/IR/GlobalVariable.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35using namespace llvm;
36
37#define DEBUG_TYPE "msp430-lower"
38
39static cl::opt<bool>MSP430NoLegalImmediate(
40 "msp430-no-legal-immediate", cl::Hidden,
41 cl::desc("Enable non legal immediates (for testing purposes only)"),
42 cl::init(Val: false));
43
44MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
45 const MSP430Subtarget &STI)
46 : TargetLowering(TM) {
47
48 // Set up the register classes.
49 addRegisterClass(VT: MVT::i8, RC: &MSP430::GR8RegClass);
50 addRegisterClass(VT: MVT::i16, RC: &MSP430::GR16RegClass);
51
52 // Compute derived properties from the register classes
53 computeRegisterProperties(TRI: STI.getRegisterInfo());
54
55 // Provide all sorts of operation actions
56 setStackPointerRegisterToSaveRestore(MSP430::SP);
57 setBooleanContents(ZeroOrOneBooleanContent);
58 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
59
60 // We have post-incremented loads / stores.
61 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i8, Action: Legal);
62 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i16, Action: Legal);
63
64 for (MVT VT : MVT::integer_valuetypes()) {
65 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
66 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
67 setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote);
68 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i8, Action: Expand);
69 setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i16, Action: Expand);
70 }
71
72 // We don't have any truncstores
73 setTruncStoreAction(ValVT: MVT::i16, MemVT: MVT::i8, Action: Expand);
74
75 setOperationAction(Op: ISD::SRA, VT: MVT::i8, Action: Custom);
76 setOperationAction(Op: ISD::SHL, VT: MVT::i8, Action: Custom);
77 setOperationAction(Op: ISD::SRL, VT: MVT::i8, Action: Custom);
78 setOperationAction(Op: ISD::SRA, VT: MVT::i16, Action: Custom);
79 setOperationAction(Op: ISD::SHL, VT: MVT::i16, Action: Custom);
80 setOperationAction(Op: ISD::SRL, VT: MVT::i16, Action: Custom);
81 setOperationAction(Op: ISD::ROTL, VT: MVT::i8, Action: Expand);
82 setOperationAction(Op: ISD::ROTR, VT: MVT::i8, Action: Expand);
83 setOperationAction(Op: ISD::ROTL, VT: MVT::i16, Action: Expand);
84 setOperationAction(Op: ISD::ROTR, VT: MVT::i16, Action: Expand);
85 setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i16, Action: Custom);
86 setOperationAction(Op: ISD::ExternalSymbol, VT: MVT::i16, Action: Custom);
87 setOperationAction(Op: ISD::BlockAddress, VT: MVT::i16, Action: Custom);
88 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand);
89 setOperationAction(Op: ISD::BR_CC, VT: MVT::i8, Action: Custom);
90 setOperationAction(Op: ISD::BR_CC, VT: MVT::i16, Action: Custom);
91 setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Expand);
92 setOperationAction(Op: ISD::SETCC, VT: MVT::i8, Action: Custom);
93 setOperationAction(Op: ISD::SETCC, VT: MVT::i16, Action: Custom);
94 setOperationAction(Op: ISD::SELECT, VT: MVT::i8, Action: Expand);
95 setOperationAction(Op: ISD::SELECT, VT: MVT::i16, Action: Expand);
96 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i8, Action: Custom);
97 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i16, Action: Custom);
98 setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::i16, Action: Custom);
99 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i8, Action: Expand);
100 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i16, Action: Expand);
101 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
102 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
103
104 setOperationAction(Op: ISD::CTTZ, VT: MVT::i8, Action: Expand);
105 setOperationAction(Op: ISD::CTTZ, VT: MVT::i16, Action: Expand);
106 setOperationAction(Op: ISD::CTLZ, VT: MVT::i8, Action: Expand);
107 setOperationAction(Op: ISD::CTLZ, VT: MVT::i16, Action: Expand);
108 setOperationAction(Op: ISD::CTPOP, VT: MVT::i8, Action: Expand);
109 setOperationAction(Op: ISD::CTPOP, VT: MVT::i16, Action: Expand);
110
111 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i8, Action: Expand);
112 setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i16, Action: Expand);
113 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i8, Action: Expand);
114 setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i16, Action: Expand);
115 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i8, Action: Expand);
116 setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i16, Action: Expand);
117
118 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand);
119
120 // FIXME: Implement efficiently multiplication by a constant
121 setOperationAction(Op: ISD::MUL, VT: MVT::i8, Action: Promote);
122 setOperationAction(Op: ISD::MULHS, VT: MVT::i8, Action: Promote);
123 setOperationAction(Op: ISD::MULHU, VT: MVT::i8, Action: Promote);
124 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i8, Action: Promote);
125 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i8, Action: Promote);
126 setOperationAction(Op: ISD::MUL, VT: MVT::i16, Action: LibCall);
127 setOperationAction(Op: ISD::MULHS, VT: MVT::i16, Action: Expand);
128 setOperationAction(Op: ISD::MULHU, VT: MVT::i16, Action: Expand);
129 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i16, Action: Expand);
130 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i16, Action: Expand);
131
132 setOperationAction(Op: ISD::UDIV, VT: MVT::i8, Action: Promote);
133 setOperationAction(Op: ISD::UDIVREM, VT: MVT::i8, Action: Promote);
134 setOperationAction(Op: ISD::UREM, VT: MVT::i8, Action: Promote);
135 setOperationAction(Op: ISD::SDIV, VT: MVT::i8, Action: Promote);
136 setOperationAction(Op: ISD::SDIVREM, VT: MVT::i8, Action: Promote);
137 setOperationAction(Op: ISD::SREM, VT: MVT::i8, Action: Promote);
138 setOperationAction(Op: ISD::UDIV, VT: MVT::i16, Action: LibCall);
139 setOperationAction(Op: ISD::UDIVREM, VT: MVT::i16, Action: Expand);
140 setOperationAction(Op: ISD::UREM, VT: MVT::i16, Action: LibCall);
141 setOperationAction(Op: ISD::SDIV, VT: MVT::i16, Action: LibCall);
142 setOperationAction(Op: ISD::SDIVREM, VT: MVT::i16, Action: Expand);
143 setOperationAction(Op: ISD::SREM, VT: MVT::i16, Action: LibCall);
144
145 // varargs support
146 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
147 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand);
148 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
149 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
150 setOperationAction(Op: ISD::JumpTable, VT: MVT::i16, Action: Custom);
151
152 // EABI Libcalls - EABI Section 6.2
153 const struct {
154 const RTLIB::Libcall Op;
155 const char * const Name;
156 const ISD::CondCode Cond;
157 } LibraryCalls[] = {
158 // Floating point conversions - EABI Table 6
159 { .Op: RTLIB::FPROUND_F64_F32, .Name: "__mspabi_cvtdf", .Cond: ISD::SETCC_INVALID },
160 { .Op: RTLIB::FPEXT_F32_F64, .Name: "__mspabi_cvtfd", .Cond: ISD::SETCC_INVALID },
161 // The following is NOT implemented in libgcc
162 //{ RTLIB::FPTOSINT_F64_I16, "__mspabi_fixdi", ISD::SETCC_INVALID },
163 { .Op: RTLIB::FPTOSINT_F64_I32, .Name: "__mspabi_fixdli", .Cond: ISD::SETCC_INVALID },
164 { .Op: RTLIB::FPTOSINT_F64_I64, .Name: "__mspabi_fixdlli", .Cond: ISD::SETCC_INVALID },
165 // The following is NOT implemented in libgcc
166 //{ RTLIB::FPTOUINT_F64_I16, "__mspabi_fixdu", ISD::SETCC_INVALID },
167 { .Op: RTLIB::FPTOUINT_F64_I32, .Name: "__mspabi_fixdul", .Cond: ISD::SETCC_INVALID },
168 { .Op: RTLIB::FPTOUINT_F64_I64, .Name: "__mspabi_fixdull", .Cond: ISD::SETCC_INVALID },
169 // The following is NOT implemented in libgcc
170 //{ RTLIB::FPTOSINT_F32_I16, "__mspabi_fixfi", ISD::SETCC_INVALID },
171 { .Op: RTLIB::FPTOSINT_F32_I32, .Name: "__mspabi_fixfli", .Cond: ISD::SETCC_INVALID },
172 { .Op: RTLIB::FPTOSINT_F32_I64, .Name: "__mspabi_fixflli", .Cond: ISD::SETCC_INVALID },
173 // The following is NOT implemented in libgcc
174 //{ RTLIB::FPTOUINT_F32_I16, "__mspabi_fixfu", ISD::SETCC_INVALID },
175 { .Op: RTLIB::FPTOUINT_F32_I32, .Name: "__mspabi_fixful", .Cond: ISD::SETCC_INVALID },
176 { .Op: RTLIB::FPTOUINT_F32_I64, .Name: "__mspabi_fixfull", .Cond: ISD::SETCC_INVALID },
177 // TODO The following IS implemented in libgcc
178 //{ RTLIB::SINTTOFP_I16_F64, "__mspabi_fltid", ISD::SETCC_INVALID },
179 { .Op: RTLIB::SINTTOFP_I32_F64, .Name: "__mspabi_fltlid", .Cond: ISD::SETCC_INVALID },
180 // TODO The following IS implemented in libgcc but is not in the EABI
181 { .Op: RTLIB::SINTTOFP_I64_F64, .Name: "__mspabi_fltllid", .Cond: ISD::SETCC_INVALID },
182 // TODO The following IS implemented in libgcc
183 //{ RTLIB::UINTTOFP_I16_F64, "__mspabi_fltud", ISD::SETCC_INVALID },
184 { .Op: RTLIB::UINTTOFP_I32_F64, .Name: "__mspabi_fltuld", .Cond: ISD::SETCC_INVALID },
185 // The following IS implemented in libgcc but is not in the EABI
186 { .Op: RTLIB::UINTTOFP_I64_F64, .Name: "__mspabi_fltulld", .Cond: ISD::SETCC_INVALID },
187 // TODO The following IS implemented in libgcc
188 //{ RTLIB::SINTTOFP_I16_F32, "__mspabi_fltif", ISD::SETCC_INVALID },
189 { .Op: RTLIB::SINTTOFP_I32_F32, .Name: "__mspabi_fltlif", .Cond: ISD::SETCC_INVALID },
190 // TODO The following IS implemented in libgcc but is not in the EABI
191 { .Op: RTLIB::SINTTOFP_I64_F32, .Name: "__mspabi_fltllif", .Cond: ISD::SETCC_INVALID },
192 // TODO The following IS implemented in libgcc
193 //{ RTLIB::UINTTOFP_I16_F32, "__mspabi_fltuf", ISD::SETCC_INVALID },
194 { .Op: RTLIB::UINTTOFP_I32_F32, .Name: "__mspabi_fltulf", .Cond: ISD::SETCC_INVALID },
195 // The following IS implemented in libgcc but is not in the EABI
196 { .Op: RTLIB::UINTTOFP_I64_F32, .Name: "__mspabi_fltullf", .Cond: ISD::SETCC_INVALID },
197
198 // Floating point comparisons - EABI Table 7
199 { .Op: RTLIB::OEQ_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETEQ },
200 { .Op: RTLIB::UNE_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETNE },
201 { .Op: RTLIB::OGE_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETGE },
202 { .Op: RTLIB::OLT_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETLT },
203 { .Op: RTLIB::OLE_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETLE },
204 { .Op: RTLIB::OGT_F64, .Name: "__mspabi_cmpd", .Cond: ISD::SETGT },
205 { .Op: RTLIB::OEQ_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETEQ },
206 { .Op: RTLIB::UNE_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETNE },
207 { .Op: RTLIB::OGE_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETGE },
208 { .Op: RTLIB::OLT_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETLT },
209 { .Op: RTLIB::OLE_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETLE },
210 { .Op: RTLIB::OGT_F32, .Name: "__mspabi_cmpf", .Cond: ISD::SETGT },
211
212 // Floating point arithmetic - EABI Table 8
213 { .Op: RTLIB::ADD_F64, .Name: "__mspabi_addd", .Cond: ISD::SETCC_INVALID },
214 { .Op: RTLIB::ADD_F32, .Name: "__mspabi_addf", .Cond: ISD::SETCC_INVALID },
215 { .Op: RTLIB::DIV_F64, .Name: "__mspabi_divd", .Cond: ISD::SETCC_INVALID },
216 { .Op: RTLIB::DIV_F32, .Name: "__mspabi_divf", .Cond: ISD::SETCC_INVALID },
217 { .Op: RTLIB::MUL_F64, .Name: "__mspabi_mpyd", .Cond: ISD::SETCC_INVALID },
218 { .Op: RTLIB::MUL_F32, .Name: "__mspabi_mpyf", .Cond: ISD::SETCC_INVALID },
219 { .Op: RTLIB::SUB_F64, .Name: "__mspabi_subd", .Cond: ISD::SETCC_INVALID },
220 { .Op: RTLIB::SUB_F32, .Name: "__mspabi_subf", .Cond: ISD::SETCC_INVALID },
221 // The following are NOT implemented in libgcc
222 // { RTLIB::NEG_F64, "__mspabi_negd", ISD::SETCC_INVALID },
223 // { RTLIB::NEG_F32, "__mspabi_negf", ISD::SETCC_INVALID },
224
225 // Universal Integer Operations - EABI Table 9
226 { .Op: RTLIB::SDIV_I16, .Name: "__mspabi_divi", .Cond: ISD::SETCC_INVALID },
227 { .Op: RTLIB::SDIV_I32, .Name: "__mspabi_divli", .Cond: ISD::SETCC_INVALID },
228 { .Op: RTLIB::SDIV_I64, .Name: "__mspabi_divlli", .Cond: ISD::SETCC_INVALID },
229 { .Op: RTLIB::UDIV_I16, .Name: "__mspabi_divu", .Cond: ISD::SETCC_INVALID },
230 { .Op: RTLIB::UDIV_I32, .Name: "__mspabi_divul", .Cond: ISD::SETCC_INVALID },
231 { .Op: RTLIB::UDIV_I64, .Name: "__mspabi_divull", .Cond: ISD::SETCC_INVALID },
232 { .Op: RTLIB::SREM_I16, .Name: "__mspabi_remi", .Cond: ISD::SETCC_INVALID },
233 { .Op: RTLIB::SREM_I32, .Name: "__mspabi_remli", .Cond: ISD::SETCC_INVALID },
234 { .Op: RTLIB::SREM_I64, .Name: "__mspabi_remlli", .Cond: ISD::SETCC_INVALID },
235 { .Op: RTLIB::UREM_I16, .Name: "__mspabi_remu", .Cond: ISD::SETCC_INVALID },
236 { .Op: RTLIB::UREM_I32, .Name: "__mspabi_remul", .Cond: ISD::SETCC_INVALID },
237 { .Op: RTLIB::UREM_I64, .Name: "__mspabi_remull", .Cond: ISD::SETCC_INVALID },
238
239 // Bitwise Operations - EABI Table 10
240 // TODO: __mspabi_[srli/srai/slli] ARE implemented in libgcc
241 { .Op: RTLIB::SRL_I32, .Name: "__mspabi_srll", .Cond: ISD::SETCC_INVALID },
242 { .Op: RTLIB::SRA_I32, .Name: "__mspabi_sral", .Cond: ISD::SETCC_INVALID },
243 { .Op: RTLIB::SHL_I32, .Name: "__mspabi_slll", .Cond: ISD::SETCC_INVALID },
244 // __mspabi_[srlll/srall/sllll/rlli/rlll] are NOT implemented in libgcc
245
246 };
247
248 for (const auto &LC : LibraryCalls) {
249 setLibcallName(Call: LC.Op, Name: LC.Name);
250 if (LC.Cond != ISD::SETCC_INVALID)
251 setCmpLibcallCC(Call: LC.Op, CC: LC.Cond);
252 }
253
254 if (STI.hasHWMult16()) {
255 const struct {
256 const RTLIB::Libcall Op;
257 const char * const Name;
258 } LibraryCalls[] = {
259 // Integer Multiply - EABI Table 9
260 { .Op: RTLIB::MUL_I16, .Name: "__mspabi_mpyi_hw" },
261 { .Op: RTLIB::MUL_I32, .Name: "__mspabi_mpyl_hw" },
262 { .Op: RTLIB::MUL_I64, .Name: "__mspabi_mpyll_hw" },
263 // TODO The __mspabi_mpysl*_hw functions ARE implemented in libgcc
264 // TODO The __mspabi_mpyul*_hw functions ARE implemented in libgcc
265 };
266 for (const auto &LC : LibraryCalls) {
267 setLibcallName(Call: LC.Op, Name: LC.Name);
268 }
269 } else if (STI.hasHWMult32()) {
270 const struct {
271 const RTLIB::Libcall Op;
272 const char * const Name;
273 } LibraryCalls[] = {
274 // Integer Multiply - EABI Table 9
275 { .Op: RTLIB::MUL_I16, .Name: "__mspabi_mpyi_hw" },
276 { .Op: RTLIB::MUL_I32, .Name: "__mspabi_mpyl_hw32" },
277 { .Op: RTLIB::MUL_I64, .Name: "__mspabi_mpyll_hw32" },
278 // TODO The __mspabi_mpysl*_hw32 functions ARE implemented in libgcc
279 // TODO The __mspabi_mpyul*_hw32 functions ARE implemented in libgcc
280 };
281 for (const auto &LC : LibraryCalls) {
282 setLibcallName(Call: LC.Op, Name: LC.Name);
283 }
284 } else if (STI.hasHWMultF5()) {
285 const struct {
286 const RTLIB::Libcall Op;
287 const char * const Name;
288 } LibraryCalls[] = {
289 // Integer Multiply - EABI Table 9
290 { .Op: RTLIB::MUL_I16, .Name: "__mspabi_mpyi_f5hw" },
291 { .Op: RTLIB::MUL_I32, .Name: "__mspabi_mpyl_f5hw" },
292 { .Op: RTLIB::MUL_I64, .Name: "__mspabi_mpyll_f5hw" },
293 // TODO The __mspabi_mpysl*_f5hw functions ARE implemented in libgcc
294 // TODO The __mspabi_mpyul*_f5hw functions ARE implemented in libgcc
295 };
296 for (const auto &LC : LibraryCalls) {
297 setLibcallName(Call: LC.Op, Name: LC.Name);
298 }
299 } else { // NoHWMult
300 const struct {
301 const RTLIB::Libcall Op;
302 const char * const Name;
303 } LibraryCalls[] = {
304 // Integer Multiply - EABI Table 9
305 { .Op: RTLIB::MUL_I16, .Name: "__mspabi_mpyi" },
306 { .Op: RTLIB::MUL_I32, .Name: "__mspabi_mpyl" },
307 { .Op: RTLIB::MUL_I64, .Name: "__mspabi_mpyll" },
308 // The __mspabi_mpysl* functions are NOT implemented in libgcc
309 // The __mspabi_mpyul* functions are NOT implemented in libgcc
310 };
311 for (const auto &LC : LibraryCalls) {
312 setLibcallName(Call: LC.Op, Name: LC.Name);
313 }
314 setLibcallCallingConv(Call: RTLIB::MUL_I64, CC: CallingConv::MSP430_BUILTIN);
315 }
316
317 // Several of the runtime library functions use a special calling conv
318 setLibcallCallingConv(Call: RTLIB::UDIV_I64, CC: CallingConv::MSP430_BUILTIN);
319 setLibcallCallingConv(Call: RTLIB::UREM_I64, CC: CallingConv::MSP430_BUILTIN);
320 setLibcallCallingConv(Call: RTLIB::SDIV_I64, CC: CallingConv::MSP430_BUILTIN);
321 setLibcallCallingConv(Call: RTLIB::SREM_I64, CC: CallingConv::MSP430_BUILTIN);
322 setLibcallCallingConv(Call: RTLIB::ADD_F64, CC: CallingConv::MSP430_BUILTIN);
323 setLibcallCallingConv(Call: RTLIB::SUB_F64, CC: CallingConv::MSP430_BUILTIN);
324 setLibcallCallingConv(Call: RTLIB::MUL_F64, CC: CallingConv::MSP430_BUILTIN);
325 setLibcallCallingConv(Call: RTLIB::DIV_F64, CC: CallingConv::MSP430_BUILTIN);
326 setLibcallCallingConv(Call: RTLIB::OEQ_F64, CC: CallingConv::MSP430_BUILTIN);
327 setLibcallCallingConv(Call: RTLIB::UNE_F64, CC: CallingConv::MSP430_BUILTIN);
328 setLibcallCallingConv(Call: RTLIB::OGE_F64, CC: CallingConv::MSP430_BUILTIN);
329 setLibcallCallingConv(Call: RTLIB::OLT_F64, CC: CallingConv::MSP430_BUILTIN);
330 setLibcallCallingConv(Call: RTLIB::OLE_F64, CC: CallingConv::MSP430_BUILTIN);
331 setLibcallCallingConv(Call: RTLIB::OGT_F64, CC: CallingConv::MSP430_BUILTIN);
332 // TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
333
334 setMinFunctionAlignment(Align(2));
335 setPrefFunctionAlignment(Align(2));
336 setMaxAtomicSizeInBitsSupported(0);
337}
338
339SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
340 SelectionDAG &DAG) const {
341 switch (Op.getOpcode()) {
342 case ISD::SHL: // FALLTHROUGH
343 case ISD::SRL:
344 case ISD::SRA: return LowerShifts(Op, DAG);
345 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
346 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
347 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
348 case ISD::SETCC: return LowerSETCC(Op, DAG);
349 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
350 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
351 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
352 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
353 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
354 case ISD::VASTART: return LowerVASTART(Op, DAG);
355 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
356 default:
357 llvm_unreachable("unimplemented operand");
358 }
359}
360
361// Define non profitable transforms into shifts
362bool MSP430TargetLowering::shouldAvoidTransformToShift(EVT VT,
363 unsigned Amount) const {
364 return !(Amount == 8 || Amount == 9 || Amount<=2);
365}
366
367// Implemented to verify test case assertions in
368// tests/codegen/msp430/shift-amount-threshold-b.ll
369bool MSP430TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
370 if (MSP430NoLegalImmediate)
371 return Immed >= -32 && Immed < 32;
372 return TargetLowering::isLegalICmpImmediate(Immed);
373}
374
375//===----------------------------------------------------------------------===//
376// MSP430 Inline Assembly Support
377//===----------------------------------------------------------------------===//
378
379/// getConstraintType - Given a constraint letter, return the type of
380/// constraint it is for this target.
381TargetLowering::ConstraintType
382MSP430TargetLowering::getConstraintType(StringRef Constraint) const {
383 if (Constraint.size() == 1) {
384 switch (Constraint[0]) {
385 case 'r':
386 return C_RegisterClass;
387 default:
388 break;
389 }
390 }
391 return TargetLowering::getConstraintType(Constraint);
392}
393
394std::pair<unsigned, const TargetRegisterClass *>
395MSP430TargetLowering::getRegForInlineAsmConstraint(
396 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
397 if (Constraint.size() == 1) {
398 // GCC Constraint Letters
399 switch (Constraint[0]) {
400 default: break;
401 case 'r': // GENERAL_REGS
402 if (VT == MVT::i8)
403 return std::make_pair(x: 0U, y: &MSP430::GR8RegClass);
404
405 return std::make_pair(x: 0U, y: &MSP430::GR16RegClass);
406 }
407 }
408
409 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
410}
411
412//===----------------------------------------------------------------------===//
413// Calling Convention Implementation
414//===----------------------------------------------------------------------===//
415
416#include "MSP430GenCallingConv.inc"
417
418/// For each argument in a function store the number of pieces it is composed
419/// of.
420template<typename ArgT>
421static void ParseFunctionArgs(const SmallVectorImpl<ArgT> &Args,
422 SmallVectorImpl<unsigned> &Out) {
423 unsigned CurrentArgIndex;
424
425 if (Args.empty())
426 return;
427
428 CurrentArgIndex = Args[0].OrigArgIndex;
429 Out.push_back(Elt: 0);
430
431 for (auto &Arg : Args) {
432 if (CurrentArgIndex == Arg.OrigArgIndex) {
433 Out.back() += 1;
434 } else {
435 Out.push_back(Elt: 1);
436 CurrentArgIndex = Arg.OrigArgIndex;
437 }
438 }
439}
440
441static void AnalyzeVarArgs(CCState &State,
442 const SmallVectorImpl<ISD::OutputArg> &Outs) {
443 State.AnalyzeCallOperands(Outs, Fn: CC_MSP430_AssignStack);
444}
445
446static void AnalyzeVarArgs(CCState &State,
447 const SmallVectorImpl<ISD::InputArg> &Ins) {
448 State.AnalyzeFormalArguments(Ins, Fn: CC_MSP430_AssignStack);
449}
450
451/// Analyze incoming and outgoing function arguments. We need custom C++ code
452/// to handle special constraints in the ABI like reversing the order of the
453/// pieces of splitted arguments. In addition, all pieces of a certain argument
454/// have to be passed either using registers or the stack but never mixing both.
455template<typename ArgT>
456static void AnalyzeArguments(CCState &State,
457 SmallVectorImpl<CCValAssign> &ArgLocs,
458 const SmallVectorImpl<ArgT> &Args) {
459 static const MCPhysReg CRegList[] = {
460 MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
461 };
462 static const unsigned CNbRegs = std::size(CRegList);
463 static const MCPhysReg BuiltinRegList[] = {
464 MSP430::R8, MSP430::R9, MSP430::R10, MSP430::R11,
465 MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
466 };
467 static const unsigned BuiltinNbRegs = std::size(BuiltinRegList);
468
469 ArrayRef<MCPhysReg> RegList;
470 unsigned NbRegs;
471
472 bool Builtin = (State.getCallingConv() == CallingConv::MSP430_BUILTIN);
473 if (Builtin) {
474 RegList = BuiltinRegList;
475 NbRegs = BuiltinNbRegs;
476 } else {
477 RegList = CRegList;
478 NbRegs = CNbRegs;
479 }
480
481 if (State.isVarArg()) {
482 AnalyzeVarArgs(State, Args);
483 return;
484 }
485
486 SmallVector<unsigned, 4> ArgsParts;
487 ParseFunctionArgs(Args, ArgsParts);
488
489 if (Builtin) {
490 assert(ArgsParts.size() == 2 &&
491 "Builtin calling convention requires two arguments");
492 }
493
494 unsigned RegsLeft = NbRegs;
495 bool UsedStack = false;
496 unsigned ValNo = 0;
497
498 for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) {
499 MVT ArgVT = Args[ValNo].VT;
500 ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags;
501 MVT LocVT = ArgVT;
502 CCValAssign::LocInfo LocInfo = CCValAssign::Full;
503
504 // Promote i8 to i16
505 if (LocVT == MVT::i8) {
506 LocVT = MVT::i16;
507 if (ArgFlags.isSExt())
508 LocInfo = CCValAssign::SExt;
509 else if (ArgFlags.isZExt())
510 LocInfo = CCValAssign::ZExt;
511 else
512 LocInfo = CCValAssign::AExt;
513 }
514
515 // Handle byval arguments
516 if (ArgFlags.isByVal()) {
517 State.HandleByVal(ValNo: ValNo++, ValVT: ArgVT, LocVT, LocInfo, MinSize: 2, MinAlign: Align(2), ArgFlags);
518 continue;
519 }
520
521 unsigned Parts = ArgsParts[i];
522
523 if (Builtin) {
524 assert(Parts == 4 &&
525 "Builtin calling convention requires 64-bit arguments");
526 }
527
528 if (!UsedStack && Parts == 2 && RegsLeft == 1) {
529 // Special case for 32-bit register split, see EABI section 3.3.3
530 unsigned Reg = State.AllocateReg(Regs: RegList);
531 State.addLoc(V: CCValAssign::getReg(ValNo: ValNo++, ValVT: ArgVT, RegNo: Reg, LocVT, HTP: LocInfo));
532 RegsLeft -= 1;
533
534 UsedStack = true;
535 CC_MSP430_AssignStack(ValNo: ValNo++, ValVT: ArgVT, LocVT, LocInfo, ArgFlags, State);
536 } else if (Parts <= RegsLeft) {
537 for (unsigned j = 0; j < Parts; j++) {
538 unsigned Reg = State.AllocateReg(Regs: RegList);
539 State.addLoc(V: CCValAssign::getReg(ValNo: ValNo++, ValVT: ArgVT, RegNo: Reg, LocVT, HTP: LocInfo));
540 RegsLeft--;
541 }
542 } else {
543 UsedStack = true;
544 for (unsigned j = 0; j < Parts; j++)
545 CC_MSP430_AssignStack(ValNo: ValNo++, ValVT: ArgVT, LocVT, LocInfo, ArgFlags, State);
546 }
547 }
548}
549
550static void AnalyzeRetResult(CCState &State,
551 const SmallVectorImpl<ISD::InputArg> &Ins) {
552 State.AnalyzeCallResult(Ins, Fn: RetCC_MSP430);
553}
554
555static void AnalyzeRetResult(CCState &State,
556 const SmallVectorImpl<ISD::OutputArg> &Outs) {
557 State.AnalyzeReturn(Outs, Fn: RetCC_MSP430);
558}
559
560template<typename ArgT>
561static void AnalyzeReturnValues(CCState &State,
562 SmallVectorImpl<CCValAssign> &RVLocs,
563 const SmallVectorImpl<ArgT> &Args) {
564 AnalyzeRetResult(State, Args);
565}
566
567SDValue MSP430TargetLowering::LowerFormalArguments(
568 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
569 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
570 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
571
572 switch (CallConv) {
573 default:
574 report_fatal_error(reason: "Unsupported calling convention");
575 case CallingConv::C:
576 case CallingConv::Fast:
577 return LowerCCCArguments(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals);
578 case CallingConv::MSP430_INTR:
579 if (Ins.empty())
580 return Chain;
581 report_fatal_error(reason: "ISRs cannot have arguments");
582 }
583}
584
585SDValue
586MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
587 SmallVectorImpl<SDValue> &InVals) const {
588 SelectionDAG &DAG = CLI.DAG;
589 SDLoc &dl = CLI.DL;
590 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
591 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
592 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
593 SDValue Chain = CLI.Chain;
594 SDValue Callee = CLI.Callee;
595 bool &isTailCall = CLI.IsTailCall;
596 CallingConv::ID CallConv = CLI.CallConv;
597 bool isVarArg = CLI.IsVarArg;
598
599 // MSP430 target does not yet support tail call optimization.
600 isTailCall = false;
601
602 switch (CallConv) {
603 default:
604 report_fatal_error(reason: "Unsupported calling convention");
605 case CallingConv::MSP430_BUILTIN:
606 case CallingConv::Fast:
607 case CallingConv::C:
608 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
609 Outs, OutVals, Ins, dl, DAG, InVals);
610 case CallingConv::MSP430_INTR:
611 report_fatal_error(reason: "ISRs cannot be called directly");
612 }
613}
614
615/// LowerCCCArguments - transform physical registers into virtual registers and
616/// generate load operations for arguments places on the stack.
617// FIXME: struct return stuff
618SDValue MSP430TargetLowering::LowerCCCArguments(
619 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
620 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
621 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
622 MachineFunction &MF = DAG.getMachineFunction();
623 MachineFrameInfo &MFI = MF.getFrameInfo();
624 MachineRegisterInfo &RegInfo = MF.getRegInfo();
625 MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
626
627 // Assign locations to all of the incoming arguments.
628 SmallVector<CCValAssign, 16> ArgLocs;
629 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
630 *DAG.getContext());
631 AnalyzeArguments(State&: CCInfo, ArgLocs, Args: Ins);
632
633 // Create frame index for the start of the first vararg value
634 if (isVarArg) {
635 unsigned Offset = CCInfo.getStackSize();
636 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(Size: 1, SPOffset: Offset, IsImmutable: true));
637 }
638
639 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
640 CCValAssign &VA = ArgLocs[i];
641 if (VA.isRegLoc()) {
642 // Arguments passed in registers
643 EVT RegVT = VA.getLocVT();
644 switch (RegVT.getSimpleVT().SimpleTy) {
645 default:
646 {
647#ifndef NDEBUG
648 errs() << "LowerFormalArguments Unhandled argument type: "
649 << RegVT << "\n";
650#endif
651 llvm_unreachable(nullptr);
652 }
653 case MVT::i16:
654 Register VReg = RegInfo.createVirtualRegister(RegClass: &MSP430::GR16RegClass);
655 RegInfo.addLiveIn(Reg: VA.getLocReg(), vreg: VReg);
656 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: RegVT);
657
658 // If this is an 8-bit value, it is really passed promoted to 16
659 // bits. Insert an assert[sz]ext to capture this, then truncate to the
660 // right size.
661 if (VA.getLocInfo() == CCValAssign::SExt)
662 ArgValue = DAG.getNode(Opcode: ISD::AssertSext, DL: dl, VT: RegVT, N1: ArgValue,
663 N2: DAG.getValueType(VA.getValVT()));
664 else if (VA.getLocInfo() == CCValAssign::ZExt)
665 ArgValue = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: RegVT, N1: ArgValue,
666 N2: DAG.getValueType(VA.getValVT()));
667
668 if (VA.getLocInfo() != CCValAssign::Full)
669 ArgValue = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VA.getValVT(), Operand: ArgValue);
670
671 InVals.push_back(Elt: ArgValue);
672 }
673 } else {
674 // Only arguments passed on the stack should make it here.
675 assert(VA.isMemLoc());
676
677 SDValue InVal;
678 ISD::ArgFlagsTy Flags = Ins[i].Flags;
679
680 if (Flags.isByVal()) {
681 MVT PtrVT = VA.getLocVT();
682 int FI = MFI.CreateFixedObject(Size: Flags.getByValSize(),
683 SPOffset: VA.getLocMemOffset(), IsImmutable: true);
684 InVal = DAG.getFrameIndex(FI, VT: PtrVT);
685 } else {
686 // Load the argument to a virtual register
687 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
688 if (ObjSize > 2) {
689 errs() << "LowerFormalArguments Unhandled argument type: "
690 << VA.getLocVT() << "\n";
691 }
692 // Create the frame index object for this incoming parameter...
693 int FI = MFI.CreateFixedObject(Size: ObjSize, SPOffset: VA.getLocMemOffset(), IsImmutable: true);
694
695 // Create the SelectionDAG nodes corresponding to a load
696 //from this parameter
697 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i16);
698 InVal = DAG.getLoad(
699 VT: VA.getLocVT(), dl, Chain, Ptr: FIN,
700 PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI));
701 }
702
703 InVals.push_back(Elt: InVal);
704 }
705 }
706
707 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
708 if (Ins[i].Flags.isSRet()) {
709 Register Reg = FuncInfo->getSRetReturnReg();
710 if (!Reg) {
711 Reg = MF.getRegInfo().createVirtualRegister(
712 RegClass: getRegClassFor(VT: MVT::i16));
713 FuncInfo->setSRetReturnReg(Reg);
714 }
715 SDValue Copy = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg, N: InVals[i]);
716 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, N1: Copy, N2: Chain);
717 }
718 }
719
720 return Chain;
721}
722
723bool
724MSP430TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
725 MachineFunction &MF,
726 bool IsVarArg,
727 const SmallVectorImpl<ISD::OutputArg> &Outs,
728 LLVMContext &Context) const {
729 SmallVector<CCValAssign, 16> RVLocs;
730 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
731 return CCInfo.CheckReturn(Outs, Fn: RetCC_MSP430);
732}
733
734SDValue
735MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
736 bool isVarArg,
737 const SmallVectorImpl<ISD::OutputArg> &Outs,
738 const SmallVectorImpl<SDValue> &OutVals,
739 const SDLoc &dl, SelectionDAG &DAG) const {
740
741 MachineFunction &MF = DAG.getMachineFunction();
742
743 // CCValAssign - represent the assignment of the return value to a location
744 SmallVector<CCValAssign, 16> RVLocs;
745
746 // ISRs cannot return any value.
747 if (CallConv == CallingConv::MSP430_INTR && !Outs.empty())
748 report_fatal_error(reason: "ISRs cannot return any value");
749
750 // CCState - Info about the registers and stack slot.
751 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
752 *DAG.getContext());
753
754 // Analize return values.
755 AnalyzeReturnValues(State&: CCInfo, RVLocs, Args: Outs);
756
757 SDValue Glue;
758 SmallVector<SDValue, 4> RetOps(1, Chain);
759
760 // Copy the result values into the output registers.
761 for (unsigned i = 0; i != RVLocs.size(); ++i) {
762 CCValAssign &VA = RVLocs[i];
763 assert(VA.isRegLoc() && "Can only return in registers!");
764
765 Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(),
766 N: OutVals[i], Glue);
767
768 // Guarantee that all emitted copies are stuck together,
769 // avoiding something bad.
770 Glue = Chain.getValue(R: 1);
771 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
772 }
773
774 if (MF.getFunction().hasStructRetAttr()) {
775 MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
776 Register Reg = FuncInfo->getSRetReturnReg();
777
778 if (!Reg)
779 llvm_unreachable("sret virtual register not created in entry block");
780
781 MVT PtrVT = getFrameIndexTy(DL: DAG.getDataLayout());
782 SDValue Val =
783 DAG.getCopyFromReg(Chain, dl, Reg, VT: PtrVT);
784 unsigned R12 = MSP430::R12;
785
786 Chain = DAG.getCopyToReg(Chain, dl, Reg: R12, N: Val, Glue);
787 Glue = Chain.getValue(R: 1);
788 RetOps.push_back(Elt: DAG.getRegister(Reg: R12, VT: PtrVT));
789 }
790
791 unsigned Opc = (CallConv == CallingConv::MSP430_INTR ?
792 MSP430ISD::RETI_GLUE : MSP430ISD::RET_GLUE);
793
794 RetOps[0] = Chain; // Update chain.
795
796 // Add the glue if we have it.
797 if (Glue.getNode())
798 RetOps.push_back(Elt: Glue);
799
800 return DAG.getNode(Opcode: Opc, DL: dl, VT: MVT::Other, Ops: RetOps);
801}
802
803/// LowerCCCCallTo - functions arguments are copied from virtual regs to
804/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
805SDValue MSP430TargetLowering::LowerCCCCallTo(
806 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
807 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
808 const SmallVectorImpl<SDValue> &OutVals,
809 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
810 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
811 // Analyze operands of the call, assigning locations to each operand.
812 SmallVector<CCValAssign, 16> ArgLocs;
813 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
814 *DAG.getContext());
815 AnalyzeArguments(State&: CCInfo, ArgLocs, Args: Outs);
816
817 // Get a count of how many bytes are to be pushed on the stack.
818 unsigned NumBytes = CCInfo.getStackSize();
819 MVT PtrVT = getFrameIndexTy(DL: DAG.getDataLayout());
820
821 Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: dl);
822
823 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
824 SmallVector<SDValue, 12> MemOpChains;
825 SDValue StackPtr;
826
827 // Walk the register/memloc assignments, inserting copies/loads.
828 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
829 CCValAssign &VA = ArgLocs[i];
830
831 SDValue Arg = OutVals[i];
832
833 // Promote the value if needed.
834 switch (VA.getLocInfo()) {
835 default: llvm_unreachable("Unknown loc info!");
836 case CCValAssign::Full: break;
837 case CCValAssign::SExt:
838 Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
839 break;
840 case CCValAssign::ZExt:
841 Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
842 break;
843 case CCValAssign::AExt:
844 Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg);
845 break;
846 }
847
848 // Arguments that can be passed on register must be kept at RegsToPass
849 // vector
850 if (VA.isRegLoc()) {
851 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg));
852 } else {
853 assert(VA.isMemLoc());
854
855 if (!StackPtr.getNode())
856 StackPtr = DAG.getCopyFromReg(Chain, dl, Reg: MSP430::SP, VT: PtrVT);
857
858 SDValue PtrOff =
859 DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: StackPtr,
860 N2: DAG.getIntPtrConstant(Val: VA.getLocMemOffset(), DL: dl));
861
862 SDValue MemOp;
863 ISD::ArgFlagsTy Flags = Outs[i].Flags;
864
865 if (Flags.isByVal()) {
866 SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize(), DL: dl, VT: MVT::i16);
867 MemOp = DAG.getMemcpy(Chain, dl, Dst: PtrOff, Src: Arg, Size: SizeNode,
868 Alignment: Flags.getNonZeroByValAlign(),
869 /*isVolatile*/ isVol: false,
870 /*AlwaysInline=*/true,
871 /*CI=*/nullptr, OverrideTailCall: std::nullopt,
872 DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
873 } else {
874 MemOp = DAG.getStore(Chain, dl, Val: Arg, Ptr: PtrOff, PtrInfo: MachinePointerInfo());
875 }
876
877 MemOpChains.push_back(Elt: MemOp);
878 }
879 }
880
881 // Transform all store nodes into one single node because all store nodes are
882 // independent of each other.
883 if (!MemOpChains.empty())
884 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains);
885
886 // Build a sequence of copy-to-reg nodes chained together with token chain and
887 // flag operands which copy the outgoing args into registers. The InGlue in
888 // necessary since all emitted instructions must be stuck together.
889 SDValue InGlue;
890 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
891 Chain = DAG.getCopyToReg(Chain, dl, Reg: RegsToPass[i].first,
892 N: RegsToPass[i].second, Glue: InGlue);
893 InGlue = Chain.getValue(R: 1);
894 }
895
896 // If the callee is a GlobalAddress node (quite common, every direct call is)
897 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
898 // Likewise ExternalSymbol -> TargetExternalSymbol.
899 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee))
900 Callee = DAG.getTargetGlobalAddress(GV: G->getGlobal(), DL: dl, VT: MVT::i16);
901 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Val&: Callee))
902 Callee = DAG.getTargetExternalSymbol(Sym: E->getSymbol(), VT: MVT::i16);
903
904 // Returns a chain & a flag for retval copy to use.
905 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
906 SmallVector<SDValue, 8> Ops;
907 Ops.push_back(Elt: Chain);
908 Ops.push_back(Elt: Callee);
909
910 // Add argument registers to the end of the list so that they are
911 // known live into the call.
912 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
913 Ops.push_back(Elt: DAG.getRegister(Reg: RegsToPass[i].first,
914 VT: RegsToPass[i].second.getValueType()));
915
916 if (InGlue.getNode())
917 Ops.push_back(Elt: InGlue);
918
919 Chain = DAG.getNode(Opcode: MSP430ISD::CALL, DL: dl, VTList: NodeTys, Ops);
920 InGlue = Chain.getValue(R: 1);
921
922 // Create the CALLSEQ_END node.
923 Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue: InGlue, DL: dl);
924 InGlue = Chain.getValue(R: 1);
925
926 // Handle result values, copying them out of physregs into vregs that we
927 // return.
928 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl,
929 DAG, InVals);
930}
931
932/// LowerCallResult - Lower the result values of a call into the
933/// appropriate copies out of appropriate physical registers.
934///
935SDValue MSP430TargetLowering::LowerCallResult(
936 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
937 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
938 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
939
940 // Assign locations to each value returned by this call.
941 SmallVector<CCValAssign, 16> RVLocs;
942 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
943 *DAG.getContext());
944
945 AnalyzeReturnValues(State&: CCInfo, RVLocs, Args: Ins);
946
947 // Copy all of the result registers out of their specified physreg.
948 for (unsigned i = 0; i != RVLocs.size(); ++i) {
949 Chain = DAG.getCopyFromReg(Chain, dl, Reg: RVLocs[i].getLocReg(),
950 VT: RVLocs[i].getValVT(), Glue: InGlue).getValue(R: 1);
951 InGlue = Chain.getValue(R: 2);
952 InVals.push_back(Elt: Chain.getValue(R: 0));
953 }
954
955 return Chain;
956}
957
958SDValue MSP430TargetLowering::LowerShifts(SDValue Op,
959 SelectionDAG &DAG) const {
960 unsigned Opc = Op.getOpcode();
961 SDNode* N = Op.getNode();
962 EVT VT = Op.getValueType();
963 SDLoc dl(N);
964
965 // Expand non-constant shifts to loops:
966 if (!isa<ConstantSDNode>(Val: N->getOperand(Num: 1)))
967 return Op;
968
969 uint64_t ShiftAmount = N->getConstantOperandVal(Num: 1);
970
971 // Expand the stuff into sequence of shifts.
972 SDValue Victim = N->getOperand(Num: 0);
973
974 if (ShiftAmount >= 8) {
975 assert(VT == MVT::i16 && "Can not shift i8 by 8 and more");
976 switch(Opc) {
977 default:
978 llvm_unreachable("Unknown shift");
979 case ISD::SHL:
980 // foo << (8 + N) => swpb(zext(foo)) << N
981 Victim = DAG.getZeroExtendInReg(Op: Victim, DL: dl, VT: MVT::i8);
982 Victim = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT, Operand: Victim);
983 break;
984 case ISD::SRA:
985 case ISD::SRL:
986 // foo >> (8 + N) => sxt(swpb(foo)) >> N
987 Victim = DAG.getNode(Opcode: ISD::BSWAP, DL: dl, VT, Operand: Victim);
988 Victim = (Opc == ISD::SRA)
989 ? DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT, N1: Victim,
990 N2: DAG.getValueType(MVT::i8))
991 : DAG.getZeroExtendInReg(Op: Victim, DL: dl, VT: MVT::i8);
992 break;
993 }
994 ShiftAmount -= 8;
995 }
996
997 if (Opc == ISD::SRL && ShiftAmount) {
998 // Emit a special goodness here:
999 // srl A, 1 => clrc; rrc A
1000 Victim = DAG.getNode(Opcode: MSP430ISD::RRCL, DL: dl, VT, Operand: Victim);
1001 ShiftAmount -= 1;
1002 }
1003
1004 while (ShiftAmount--)
1005 Victim = DAG.getNode(Opcode: (Opc == ISD::SHL ? MSP430ISD::RLA : MSP430ISD::RRA),
1006 DL: dl, VT, Operand: Victim);
1007
1008 return Victim;
1009}
1010
1011SDValue MSP430TargetLowering::LowerGlobalAddress(SDValue Op,
1012 SelectionDAG &DAG) const {
1013 const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal();
1014 int64_t Offset = cast<GlobalAddressSDNode>(Val&: Op)->getOffset();
1015 EVT PtrVT = Op.getValueType();
1016
1017 // Create the TargetGlobalAddress node, folding in the constant offset.
1018 SDValue Result = DAG.getTargetGlobalAddress(GV, DL: SDLoc(Op), VT: PtrVT, offset: Offset);
1019 return DAG.getNode(Opcode: MSP430ISD::Wrapper, DL: SDLoc(Op), VT: PtrVT, Operand: Result);
1020}
1021
1022SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op,
1023 SelectionDAG &DAG) const {
1024 SDLoc dl(Op);
1025 const char *Sym = cast<ExternalSymbolSDNode>(Val&: Op)->getSymbol();
1026 EVT PtrVT = Op.getValueType();
1027 SDValue Result = DAG.getTargetExternalSymbol(Sym, VT: PtrVT);
1028
1029 return DAG.getNode(Opcode: MSP430ISD::Wrapper, DL: dl, VT: PtrVT, Operand: Result);
1030}
1031
1032SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
1033 SelectionDAG &DAG) const {
1034 SDLoc dl(Op);
1035 const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress();
1036 EVT PtrVT = Op.getValueType();
1037 SDValue Result = DAG.getTargetBlockAddress(BA, VT: PtrVT);
1038
1039 return DAG.getNode(Opcode: MSP430ISD::Wrapper, DL: dl, VT: PtrVT, Operand: Result);
1040}
1041
1042static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
1043 ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG) {
1044 // FIXME: Handle bittests someday
1045 assert(!LHS.getValueType().isFloatingPoint() && "We don't handle FP yet");
1046
1047 // FIXME: Handle jump negative someday
1048 MSP430CC::CondCodes TCC = MSP430CC::COND_INVALID;
1049 switch (CC) {
1050 default: llvm_unreachable("Invalid integer condition!");
1051 case ISD::SETEQ:
1052 TCC = MSP430CC::COND_E; // aka COND_Z
1053 // Minor optimization: if LHS is a constant, swap operands, then the
1054 // constant can be folded into comparison.
1055 if (LHS.getOpcode() == ISD::Constant)
1056 std::swap(a&: LHS, b&: RHS);
1057 break;
1058 case ISD::SETNE:
1059 TCC = MSP430CC::COND_NE; // aka COND_NZ
1060 // Minor optimization: if LHS is a constant, swap operands, then the
1061 // constant can be folded into comparison.
1062 if (LHS.getOpcode() == ISD::Constant)
1063 std::swap(a&: LHS, b&: RHS);
1064 break;
1065 case ISD::SETULE:
1066 std::swap(a&: LHS, b&: RHS);
1067 [[fallthrough]];
1068 case ISD::SETUGE:
1069 // Turn lhs u>= rhs with lhs constant into rhs u< lhs+1, this allows us to
1070 // fold constant into instruction.
1071 if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(Val&: LHS)) {
1072 LHS = RHS;
1073 RHS = DAG.getConstant(Val: C->getSExtValue() + 1, DL: dl, VT: C->getValueType(ResNo: 0));
1074 TCC = MSP430CC::COND_LO;
1075 break;
1076 }
1077 TCC = MSP430CC::COND_HS; // aka COND_C
1078 break;
1079 case ISD::SETUGT:
1080 std::swap(a&: LHS, b&: RHS);
1081 [[fallthrough]];
1082 case ISD::SETULT:
1083 // Turn lhs u< rhs with lhs constant into rhs u>= lhs+1, this allows us to
1084 // fold constant into instruction.
1085 if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(Val&: LHS)) {
1086 LHS = RHS;
1087 RHS = DAG.getConstant(Val: C->getSExtValue() + 1, DL: dl, VT: C->getValueType(ResNo: 0));
1088 TCC = MSP430CC::COND_HS;
1089 break;
1090 }
1091 TCC = MSP430CC::COND_LO; // aka COND_NC
1092 break;
1093 case ISD::SETLE:
1094 std::swap(a&: LHS, b&: RHS);
1095 [[fallthrough]];
1096 case ISD::SETGE:
1097 // Turn lhs >= rhs with lhs constant into rhs < lhs+1, this allows us to
1098 // fold constant into instruction.
1099 if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(Val&: LHS)) {
1100 LHS = RHS;
1101 RHS = DAG.getConstant(Val: C->getSExtValue() + 1, DL: dl, VT: C->getValueType(ResNo: 0));
1102 TCC = MSP430CC::COND_L;
1103 break;
1104 }
1105 TCC = MSP430CC::COND_GE;
1106 break;
1107 case ISD::SETGT:
1108 std::swap(a&: LHS, b&: RHS);
1109 [[fallthrough]];
1110 case ISD::SETLT:
1111 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
1112 // fold constant into instruction.
1113 if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(Val&: LHS)) {
1114 LHS = RHS;
1115 RHS = DAG.getConstant(Val: C->getSExtValue() + 1, DL: dl, VT: C->getValueType(ResNo: 0));
1116 TCC = MSP430CC::COND_GE;
1117 break;
1118 }
1119 TCC = MSP430CC::COND_L;
1120 break;
1121 }
1122
1123 TargetCC = DAG.getConstant(Val: TCC, DL: dl, VT: MVT::i8);
1124 return DAG.getNode(Opcode: MSP430ISD::CMP, DL: dl, VT: MVT::Glue, N1: LHS, N2: RHS);
1125}
1126
1127
1128SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1129 SDValue Chain = Op.getOperand(i: 0);
1130 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get();
1131 SDValue LHS = Op.getOperand(i: 2);
1132 SDValue RHS = Op.getOperand(i: 3);
1133 SDValue Dest = Op.getOperand(i: 4);
1134 SDLoc dl (Op);
1135
1136 SDValue TargetCC;
1137 SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
1138
1139 return DAG.getNode(Opcode: MSP430ISD::BR_CC, DL: dl, VT: Op.getValueType(),
1140 N1: Chain, N2: Dest, N3: TargetCC, N4: Flag);
1141}
1142
1143SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1144 SDValue LHS = Op.getOperand(i: 0);
1145 SDValue RHS = Op.getOperand(i: 1);
1146 SDLoc dl (Op);
1147
1148 // If we are doing an AND and testing against zero, then the CMP
1149 // will not be generated. The AND (or BIT) will generate the condition codes,
1150 // but they are different from CMP.
1151 // FIXME: since we're doing a post-processing, use a pseudoinstr here, so
1152 // lowering & isel wouldn't diverge.
1153 bool andCC = isNullConstant(V: RHS) && LHS.hasOneUse() &&
1154 (LHS.getOpcode() == ISD::AND ||
1155 (LHS.getOpcode() == ISD::TRUNCATE &&
1156 LHS.getOperand(i: 0).getOpcode() == ISD::AND));
1157 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
1158 SDValue TargetCC;
1159 SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
1160
1161 // Get the condition codes directly from the status register, if its easy.
1162 // Otherwise a branch will be generated. Note that the AND and BIT
1163 // instructions generate different flags than CMP, the carry bit can be used
1164 // for NE/EQ.
1165 bool Invert = false;
1166 bool Shift = false;
1167 bool Convert = true;
1168 switch (TargetCC->getAsZExtVal()) {
1169 default:
1170 Convert = false;
1171 break;
1172 case MSP430CC::COND_HS:
1173 // Res = SR & 1, no processing is required
1174 break;
1175 case MSP430CC::COND_LO:
1176 // Res = ~(SR & 1)
1177 Invert = true;
1178 break;
1179 case MSP430CC::COND_NE:
1180 if (andCC) {
1181 // C = ~Z, thus Res = SR & 1, no processing is required
1182 } else {
1183 // Res = ~((SR >> 1) & 1)
1184 Shift = true;
1185 Invert = true;
1186 }
1187 break;
1188 case MSP430CC::COND_E:
1189 Shift = true;
1190 // C = ~Z for AND instruction, thus we can put Res = ~(SR & 1), however,
1191 // Res = (SR >> 1) & 1 is 1 word shorter.
1192 break;
1193 }
1194 EVT VT = Op.getValueType();
1195 SDValue One = DAG.getConstant(Val: 1, DL: dl, VT);
1196 if (Convert) {
1197 SDValue SR = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: MSP430::SR,
1198 VT: MVT::i16, Glue: Flag);
1199 if (Shift)
1200 // FIXME: somewhere this is turned into a SRL, lower it MSP specific?
1201 SR = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: MVT::i16, N1: SR, N2: One);
1202 SR = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i16, N1: SR, N2: One);
1203 if (Invert)
1204 SR = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: MVT::i16, N1: SR, N2: One);
1205 return SR;
1206 } else {
1207 SDValue Zero = DAG.getConstant(Val: 0, DL: dl, VT);
1208 SDValue Ops[] = {One, Zero, TargetCC, Flag};
1209 return DAG.getNode(Opcode: MSP430ISD::SELECT_CC, DL: dl, VT: Op.getValueType(), Ops);
1210 }
1211}
1212
1213SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op,
1214 SelectionDAG &DAG) const {
1215 SDValue LHS = Op.getOperand(i: 0);
1216 SDValue RHS = Op.getOperand(i: 1);
1217 SDValue TrueV = Op.getOperand(i: 2);
1218 SDValue FalseV = Op.getOperand(i: 3);
1219 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get();
1220 SDLoc dl (Op);
1221
1222 SDValue TargetCC;
1223 SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
1224
1225 SDValue Ops[] = {TrueV, FalseV, TargetCC, Flag};
1226
1227 return DAG.getNode(Opcode: MSP430ISD::SELECT_CC, DL: dl, VT: Op.getValueType(), Ops);
1228}
1229
1230SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op,
1231 SelectionDAG &DAG) const {
1232 SDValue Val = Op.getOperand(i: 0);
1233 EVT VT = Op.getValueType();
1234 SDLoc dl(Op);
1235
1236 assert(VT == MVT::i16 && "Only support i16 for now!");
1237
1238 return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT,
1239 N1: DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT, Operand: Val),
1240 N2: DAG.getValueType(Val.getValueType()));
1241}
1242
1243SDValue
1244MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
1245 MachineFunction &MF = DAG.getMachineFunction();
1246 MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
1247 int ReturnAddrIndex = FuncInfo->getRAIndex();
1248 MVT PtrVT = getFrameIndexTy(DL: MF.getDataLayout());
1249
1250 if (ReturnAddrIndex == 0) {
1251 // Set up a frame object for the return address.
1252 uint64_t SlotSize = PtrVT.getStoreSize();
1253 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(Size: SlotSize, SPOffset: -SlotSize,
1254 IsImmutable: true);
1255 FuncInfo->setRAIndex(ReturnAddrIndex);
1256 }
1257
1258 return DAG.getFrameIndex(FI: ReturnAddrIndex, VT: PtrVT);
1259}
1260
1261SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
1262 SelectionDAG &DAG) const {
1263 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1264 MFI.setReturnAddressIsTaken(true);
1265
1266 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1267 return SDValue();
1268
1269 unsigned Depth = Op.getConstantOperandVal(i: 0);
1270 SDLoc dl(Op);
1271 EVT PtrVT = Op.getValueType();
1272
1273 if (Depth > 0) {
1274 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1275 SDValue Offset =
1276 DAG.getConstant(Val: PtrVT.getStoreSize(), DL: dl, VT: MVT::i16);
1277 return DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(),
1278 Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: FrameAddr, N2: Offset),
1279 PtrInfo: MachinePointerInfo());
1280 }
1281
1282 // Just load the return address.
1283 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
1284 return DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: RetAddrFI,
1285 PtrInfo: MachinePointerInfo());
1286}
1287
1288SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
1289 SelectionDAG &DAG) const {
1290 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1291 MFI.setFrameAddressIsTaken(true);
1292
1293 EVT VT = Op.getValueType();
1294 SDLoc dl(Op); // FIXME probably not meaningful
1295 unsigned Depth = Op.getConstantOperandVal(i: 0);
1296 SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl,
1297 Reg: MSP430::R4, VT);
1298 while (Depth--)
1299 FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr,
1300 PtrInfo: MachinePointerInfo());
1301 return FrameAddr;
1302}
1303
1304SDValue MSP430TargetLowering::LowerVASTART(SDValue Op,
1305 SelectionDAG &DAG) const {
1306 MachineFunction &MF = DAG.getMachineFunction();
1307 MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
1308
1309 SDValue Ptr = Op.getOperand(i: 1);
1310 EVT PtrVT = Ptr.getValueType();
1311
1312 // Frame index of first vararg argument
1313 SDValue FrameIndex =
1314 DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(), VT: PtrVT);
1315 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
1316
1317 // Create a store of the frame index to the location operand
1318 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: SDLoc(Op), Val: FrameIndex, Ptr,
1319 PtrInfo: MachinePointerInfo(SV));
1320}
1321
1322SDValue MSP430TargetLowering::LowerJumpTable(SDValue Op,
1323 SelectionDAG &DAG) const {
1324 JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Op);
1325 EVT PtrVT = Op.getValueType();
1326 SDValue Result = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: PtrVT);
1327 return DAG.getNode(Opcode: MSP430ISD::Wrapper, DL: SDLoc(JT), VT: PtrVT, Operand: Result);
1328}
1329
1330/// getPostIndexedAddressParts - returns true by value, base pointer and
1331/// offset pointer and addressing mode by reference if this node can be
1332/// combined with a load / store to form a post-indexed load / store.
1333bool MSP430TargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
1334 SDValue &Base,
1335 SDValue &Offset,
1336 ISD::MemIndexedMode &AM,
1337 SelectionDAG &DAG) const {
1338
1339 LoadSDNode *LD = cast<LoadSDNode>(Val: N);
1340 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1341 return false;
1342
1343 EVT VT = LD->getMemoryVT();
1344 if (VT != MVT::i8 && VT != MVT::i16)
1345 return false;
1346
1347 if (Op->getOpcode() != ISD::ADD)
1348 return false;
1349
1350 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1))) {
1351 uint64_t RHSC = RHS->getZExtValue();
1352 if ((VT == MVT::i16 && RHSC != 2) ||
1353 (VT == MVT::i8 && RHSC != 1))
1354 return false;
1355
1356 Base = Op->getOperand(Num: 0);
1357 Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(N), VT);
1358 AM = ISD::POST_INC;
1359 return true;
1360 }
1361
1362 return false;
1363}
1364
1365
1366const char *MSP430TargetLowering::getTargetNodeName(unsigned Opcode) const {
1367 switch ((MSP430ISD::NodeType)Opcode) {
1368 case MSP430ISD::FIRST_NUMBER: break;
1369 case MSP430ISD::RET_GLUE: return "MSP430ISD::RET_GLUE";
1370 case MSP430ISD::RETI_GLUE: return "MSP430ISD::RETI_GLUE";
1371 case MSP430ISD::RRA: return "MSP430ISD::RRA";
1372 case MSP430ISD::RLA: return "MSP430ISD::RLA";
1373 case MSP430ISD::RRC: return "MSP430ISD::RRC";
1374 case MSP430ISD::RRCL: return "MSP430ISD::RRCL";
1375 case MSP430ISD::CALL: return "MSP430ISD::CALL";
1376 case MSP430ISD::Wrapper: return "MSP430ISD::Wrapper";
1377 case MSP430ISD::BR_CC: return "MSP430ISD::BR_CC";
1378 case MSP430ISD::CMP: return "MSP430ISD::CMP";
1379 case MSP430ISD::SETCC: return "MSP430ISD::SETCC";
1380 case MSP430ISD::SELECT_CC: return "MSP430ISD::SELECT_CC";
1381 case MSP430ISD::DADD: return "MSP430ISD::DADD";
1382 }
1383 return nullptr;
1384}
1385
1386bool MSP430TargetLowering::isTruncateFree(Type *Ty1,
1387 Type *Ty2) const {
1388 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
1389 return false;
1390
1391 return (Ty1->getPrimitiveSizeInBits().getFixedValue() >
1392 Ty2->getPrimitiveSizeInBits().getFixedValue());
1393}
1394
1395bool MSP430TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1396 if (!VT1.isInteger() || !VT2.isInteger())
1397 return false;
1398
1399 return (VT1.getFixedSizeInBits() > VT2.getFixedSizeInBits());
1400}
1401
1402bool MSP430TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
1403 // MSP430 implicitly zero-extends 8-bit results in 16-bit registers.
1404 return false && Ty1->isIntegerTy(Bitwidth: 8) && Ty2->isIntegerTy(Bitwidth: 16);
1405}
1406
1407bool MSP430TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
1408 // MSP430 implicitly zero-extends 8-bit results in 16-bit registers.
1409 return false && VT1 == MVT::i8 && VT2 == MVT::i16;
1410}
1411
1412//===----------------------------------------------------------------------===//
1413// Other Lowering Code
1414//===----------------------------------------------------------------------===//
1415
1416MachineBasicBlock *
1417MSP430TargetLowering::EmitShiftInstr(MachineInstr &MI,
1418 MachineBasicBlock *BB) const {
1419 MachineFunction *F = BB->getParent();
1420 MachineRegisterInfo &RI = F->getRegInfo();
1421 DebugLoc dl = MI.getDebugLoc();
1422 const TargetInstrInfo &TII = *F->getSubtarget().getInstrInfo();
1423
1424 unsigned Opc;
1425 bool ClearCarry = false;
1426 const TargetRegisterClass * RC;
1427 switch (MI.getOpcode()) {
1428 default: llvm_unreachable("Invalid shift opcode!");
1429 case MSP430::Shl8:
1430 Opc = MSP430::ADD8rr;
1431 RC = &MSP430::GR8RegClass;
1432 break;
1433 case MSP430::Shl16:
1434 Opc = MSP430::ADD16rr;
1435 RC = &MSP430::GR16RegClass;
1436 break;
1437 case MSP430::Sra8:
1438 Opc = MSP430::RRA8r;
1439 RC = &MSP430::GR8RegClass;
1440 break;
1441 case MSP430::Sra16:
1442 Opc = MSP430::RRA16r;
1443 RC = &MSP430::GR16RegClass;
1444 break;
1445 case MSP430::Srl8:
1446 ClearCarry = true;
1447 Opc = MSP430::RRC8r;
1448 RC = &MSP430::GR8RegClass;
1449 break;
1450 case MSP430::Srl16:
1451 ClearCarry = true;
1452 Opc = MSP430::RRC16r;
1453 RC = &MSP430::GR16RegClass;
1454 break;
1455 case MSP430::Rrcl8:
1456 case MSP430::Rrcl16: {
1457 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: MSP430::BIC16rc), DestReg: MSP430::SR)
1458 .addReg(RegNo: MSP430::SR).addImm(Val: 1);
1459 Register SrcReg = MI.getOperand(i: 1).getReg();
1460 Register DstReg = MI.getOperand(i: 0).getReg();
1461 unsigned RrcOpc = MI.getOpcode() == MSP430::Rrcl16
1462 ? MSP430::RRC16r : MSP430::RRC8r;
1463 BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII.get(Opcode: RrcOpc), DestReg: DstReg)
1464 .addReg(RegNo: SrcReg);
1465 MI.eraseFromParent(); // The pseudo instruction is gone now.
1466 return BB;
1467 }
1468 }
1469
1470 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1471 MachineFunction::iterator I = ++BB->getIterator();
1472
1473 // Create loop block
1474 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1475 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1476
1477 F->insert(MBBI: I, MBB: LoopBB);
1478 F->insert(MBBI: I, MBB: RemBB);
1479
1480 // Update machine-CFG edges by transferring all successors of the current
1481 // block to the block containing instructions after shift.
1482 RemBB->splice(Where: RemBB->begin(), Other: BB, From: std::next(x: MachineBasicBlock::iterator(MI)),
1483 To: BB->end());
1484 RemBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1485
1486 // Add edges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB
1487 BB->addSuccessor(Succ: LoopBB);
1488 BB->addSuccessor(Succ: RemBB);
1489 LoopBB->addSuccessor(Succ: RemBB);
1490 LoopBB->addSuccessor(Succ: LoopBB);
1491
1492 Register ShiftAmtReg = RI.createVirtualRegister(RegClass: &MSP430::GR8RegClass);
1493 Register ShiftAmtReg2 = RI.createVirtualRegister(RegClass: &MSP430::GR8RegClass);
1494 Register ShiftReg = RI.createVirtualRegister(RegClass: RC);
1495 Register ShiftReg2 = RI.createVirtualRegister(RegClass: RC);
1496 Register ShiftAmtSrcReg = MI.getOperand(i: 2).getReg();
1497 Register SrcReg = MI.getOperand(i: 1).getReg();
1498 Register DstReg = MI.getOperand(i: 0).getReg();
1499
1500 // BB:
1501 // cmp 0, N
1502 // je RemBB
1503 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: MSP430::CMP8ri))
1504 .addReg(RegNo: ShiftAmtSrcReg).addImm(Val: 0);
1505 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: MSP430::JCC))
1506 .addMBB(MBB: RemBB)
1507 .addImm(Val: MSP430CC::COND_E);
1508
1509 // LoopBB:
1510 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1511 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
1512 // ShiftReg2 = shift ShiftReg
1513 // ShiftAmt2 = ShiftAmt - 1;
1514 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: MSP430::PHI), DestReg: ShiftReg)
1515 .addReg(RegNo: SrcReg).addMBB(MBB: BB)
1516 .addReg(RegNo: ShiftReg2).addMBB(MBB: LoopBB);
1517 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: MSP430::PHI), DestReg: ShiftAmtReg)
1518 .addReg(RegNo: ShiftAmtSrcReg).addMBB(MBB: BB)
1519 .addReg(RegNo: ShiftAmtReg2).addMBB(MBB: LoopBB);
1520 if (ClearCarry)
1521 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: MSP430::BIC16rc), DestReg: MSP430::SR)
1522 .addReg(RegNo: MSP430::SR).addImm(Val: 1);
1523 if (Opc == MSP430::ADD8rr || Opc == MSP430::ADD16rr)
1524 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: Opc), DestReg: ShiftReg2)
1525 .addReg(RegNo: ShiftReg)
1526 .addReg(RegNo: ShiftReg);
1527 else
1528 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: Opc), DestReg: ShiftReg2)
1529 .addReg(RegNo: ShiftReg);
1530 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: MSP430::SUB8ri), DestReg: ShiftAmtReg2)
1531 .addReg(RegNo: ShiftAmtReg).addImm(Val: 1);
1532 BuildMI(BB: LoopBB, MIMD: dl, MCID: TII.get(Opcode: MSP430::JCC))
1533 .addMBB(MBB: LoopBB)
1534 .addImm(Val: MSP430CC::COND_NE);
1535
1536 // RemBB:
1537 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
1538 BuildMI(BB&: *RemBB, I: RemBB->begin(), MIMD: dl, MCID: TII.get(Opcode: MSP430::PHI), DestReg: DstReg)
1539 .addReg(RegNo: SrcReg).addMBB(MBB: BB)
1540 .addReg(RegNo: ShiftReg2).addMBB(MBB: LoopBB);
1541
1542 MI.eraseFromParent(); // The pseudo instruction is gone now.
1543 return RemBB;
1544}
1545
1546MachineBasicBlock *
1547MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1548 MachineBasicBlock *BB) const {
1549 unsigned Opc = MI.getOpcode();
1550
1551 if (Opc == MSP430::Shl8 || Opc == MSP430::Shl16 ||
1552 Opc == MSP430::Sra8 || Opc == MSP430::Sra16 ||
1553 Opc == MSP430::Srl8 || Opc == MSP430::Srl16 ||
1554 Opc == MSP430::Rrcl8 || Opc == MSP430::Rrcl16)
1555 return EmitShiftInstr(MI, BB);
1556
1557 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1558 DebugLoc dl = MI.getDebugLoc();
1559
1560 assert((Opc == MSP430::Select16 || Opc == MSP430::Select8) &&
1561 "Unexpected instr type to insert");
1562
1563 // To "insert" a SELECT instruction, we actually have to insert the diamond
1564 // control-flow pattern. The incoming instruction knows the destination vreg
1565 // to set, the condition code register to branch on, the true/false values to
1566 // select between, and a branch opcode to use.
1567 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1568 MachineFunction::iterator I = ++BB->getIterator();
1569
1570 // thisMBB:
1571 // ...
1572 // TrueVal = ...
1573 // cmpTY ccX, r1, r2
1574 // jCC copy1MBB
1575 // fallthrough --> copy0MBB
1576 MachineBasicBlock *thisMBB = BB;
1577 MachineFunction *F = BB->getParent();
1578 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1579 MachineBasicBlock *copy1MBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
1580 F->insert(MBBI: I, MBB: copy0MBB);
1581 F->insert(MBBI: I, MBB: copy1MBB);
1582 // Update machine-CFG edges by transferring all successors of the current
1583 // block to the new block which will contain the Phi node for the select.
1584 copy1MBB->splice(Where: copy1MBB->begin(), Other: BB,
1585 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
1586 copy1MBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
1587 // Next, add the true and fallthrough blocks as its successors.
1588 BB->addSuccessor(Succ: copy0MBB);
1589 BB->addSuccessor(Succ: copy1MBB);
1590
1591 BuildMI(BB, MIMD: dl, MCID: TII.get(Opcode: MSP430::JCC))
1592 .addMBB(MBB: copy1MBB)
1593 .addImm(Val: MI.getOperand(i: 3).getImm());
1594
1595 // copy0MBB:
1596 // %FalseValue = ...
1597 // # fallthrough to copy1MBB
1598 BB = copy0MBB;
1599
1600 // Update machine-CFG edges
1601 BB->addSuccessor(Succ: copy1MBB);
1602
1603 // copy1MBB:
1604 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1605 // ...
1606 BB = copy1MBB;
1607 BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII.get(Opcode: MSP430::PHI), DestReg: MI.getOperand(i: 0).getReg())
1608 .addReg(RegNo: MI.getOperand(i: 2).getReg())
1609 .addMBB(MBB: copy0MBB)
1610 .addReg(RegNo: MI.getOperand(i: 1).getReg())
1611 .addMBB(MBB: thisMBB);
1612
1613 MI.eraseFromParent(); // The pseudo instruction is gone now.
1614 return BB;
1615}
1616