1 | //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "RISCVMatInt.h" |
10 | #include "MCTargetDesc/RISCVMCTargetDesc.h" |
11 | #include "llvm/ADT/APInt.h" |
12 | #include "llvm/MC/MCInstBuilder.h" |
13 | #include "llvm/Support/MathExtras.h" |
14 | using namespace llvm; |
15 | |
16 | static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC) { |
17 | if (!HasRVC) |
18 | return Res.size(); |
19 | |
20 | int Cost = 0; |
21 | for (auto Instr : Res) { |
22 | // Assume instructions that aren't listed aren't compressible. |
23 | bool Compressed = false; |
24 | switch (Instr.getOpcode()) { |
25 | case RISCV::SLLI: |
26 | case RISCV::SRLI: |
27 | Compressed = true; |
28 | break; |
29 | case RISCV::ADDI: |
30 | case RISCV::ADDIW: |
31 | case RISCV::LUI: |
32 | Compressed = isInt<6>(x: Instr.getImm()); |
33 | break; |
34 | } |
35 | // Two RVC instructions take the same space as one RVI instruction, but |
36 | // can take longer to execute than the single RVI instruction. Thus, we |
37 | // consider that two RVC instruction are slightly more costly than one |
38 | // RVI instruction. For longer sequences of RVC instructions the space |
39 | // savings can be worth it, though. The costs below try to model that. |
40 | if (!Compressed) |
41 | Cost += 100; // Baseline cost of one RVI instruction: 100%. |
42 | else |
43 | Cost += 70; // 70% cost of baseline. |
44 | } |
45 | return Cost; |
46 | } |
47 | |
48 | // Recursively generate a sequence for materializing an integer. |
49 | static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, |
50 | RISCVMatInt::InstSeq &Res) { |
51 | bool IsRV64 = STI.hasFeature(Feature: RISCV::Feature64Bit); |
52 | |
53 | // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI. |
54 | if (STI.hasFeature(Feature: RISCV::FeatureStdExtZbs) && isPowerOf2_64(Value: Val) && |
55 | (!isInt<32>(x: Val) || Val == 0x800)) { |
56 | Res.emplace_back(Args: RISCV::BSETI, Args: Log2_64(Value: Val)); |
57 | return; |
58 | } |
59 | |
60 | if (isInt<32>(x: Val)) { |
61 | // Depending on the active bits in the immediate Value v, the following |
62 | // instruction sequences are emitted: |
63 | // |
64 | // v == 0 : ADDI |
65 | // v[0,12) != 0 && v[12,32) == 0 : ADDI |
66 | // v[0,12) == 0 && v[12,32) != 0 : LUI |
67 | // v[0,32) != 0 : LUI+ADDI(W) |
68 | int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF; |
69 | int64_t Lo12 = SignExtend64<12>(x: Val); |
70 | |
71 | if (Hi20) |
72 | Res.emplace_back(Args: RISCV::LUI, Args&: Hi20); |
73 | |
74 | if (Lo12 || Hi20 == 0) { |
75 | unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI; |
76 | Res.emplace_back(Args&: AddiOpc, Args&: Lo12); |
77 | } |
78 | return; |
79 | } |
80 | |
81 | assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target" ); |
82 | |
83 | // In the worst case, for a full 64-bit constant, a sequence of 8 instructions |
84 | // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note |
85 | // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits |
86 | // while the following ADDI instructions contribute up to 12 bits each. |
87 | // |
88 | // On the first glance, implementing this seems to be possible by simply |
89 | // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left |
90 | // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the |
91 | // fact that ADDI performs a sign extended addition, doing it like that would |
92 | // only be possible when at most 11 bits of the ADDI instructions are used. |
93 | // Using all 12 bits of the ADDI instructions, like done by GAS, actually |
94 | // requires that the constant is processed starting with the least significant |
95 | // bit. |
96 | // |
97 | // In the following, constants are processed from LSB to MSB but instruction |
98 | // emission is performed from MSB to LSB by recursively calling |
99 | // generateInstSeq. In each recursion, first the lowest 12 bits are removed |
100 | // from the constant and the optimal shift amount, which can be greater than |
101 | // 12 bits if the constant is sparse, is determined. Then, the shifted |
102 | // remaining constant is processed recursively and gets emitted as soon as it |
103 | // fits into 32 bits. The emission of the shifts and additions is subsequently |
104 | // performed when the recursion returns. |
105 | |
106 | int64_t Lo12 = SignExtend64<12>(x: Val); |
107 | Val = (uint64_t)Val - (uint64_t)Lo12; |
108 | |
109 | int ShiftAmount = 0; |
110 | bool Unsigned = false; |
111 | |
112 | // Val might now be valid for LUI without needing a shift. |
113 | if (!isInt<32>(x: Val)) { |
114 | ShiftAmount = llvm::countr_zero(Val: (uint64_t)Val); |
115 | Val >>= ShiftAmount; |
116 | |
117 | // If the remaining bits don't fit in 12 bits, we might be able to reduce |
118 | // the // shift amount in order to use LUI which will zero the lower 12 |
119 | // bits. |
120 | if (ShiftAmount > 12 && !isInt<12>(x: Val)) { |
121 | if (isInt<32>(x: (uint64_t)Val << 12)) { |
122 | // Reduce the shift amount and add zeros to the LSBs so it will match |
123 | // LUI. |
124 | ShiftAmount -= 12; |
125 | Val = (uint64_t)Val << 12; |
126 | } else if (isUInt<32>(x: (uint64_t)Val << 12) && |
127 | STI.hasFeature(Feature: RISCV::FeatureStdExtZba)) { |
128 | // Reduce the shift amount and add zeros to the LSBs so it will match |
129 | // LUI, then shift left with SLLI.UW to clear the upper 32 set bits. |
130 | ShiftAmount -= 12; |
131 | Val = ((uint64_t)Val << 12) | (0xffffffffull << 32); |
132 | Unsigned = true; |
133 | } |
134 | } |
135 | |
136 | // Try to use SLLI_UW for Val when it is uint32 but not int32. |
137 | if (isUInt<32>(x: (uint64_t)Val) && !isInt<32>(x: (uint64_t)Val) && |
138 | STI.hasFeature(Feature: RISCV::FeatureStdExtZba)) { |
139 | // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with |
140 | // SLLI_UW. |
141 | Val = ((uint64_t)Val) | (0xffffffffull << 32); |
142 | Unsigned = true; |
143 | } |
144 | } |
145 | |
146 | generateInstSeqImpl(Val, STI, Res); |
147 | |
148 | // Skip shift if we were able to use LUI directly. |
149 | if (ShiftAmount) { |
150 | unsigned Opc = Unsigned ? RISCV::SLLI_UW : RISCV::SLLI; |
151 | Res.emplace_back(Args&: Opc, Args&: ShiftAmount); |
152 | } |
153 | |
154 | if (Lo12) |
155 | Res.emplace_back(Args: RISCV::ADDI, Args&: Lo12); |
156 | } |
157 | |
158 | static unsigned (int64_t Val) { |
159 | // for case: 0b111..1..xxxxxx1..1.. |
160 | unsigned LeadingOnes = llvm::countl_one(Value: (uint64_t)Val); |
161 | unsigned TrailingOnes = llvm::countr_one(Value: (uint64_t)Val); |
162 | if (TrailingOnes > 0 && TrailingOnes < 64 && |
163 | (LeadingOnes + TrailingOnes) > (64 - 12)) |
164 | return 64 - TrailingOnes; |
165 | |
166 | // for case: 0bxxx1..1..1...xxx |
167 | unsigned UpperTrailingOnes = llvm::countr_one(Value: Hi_32(Value: Val)); |
168 | unsigned LowerLeadingOnes = llvm::countl_one(Value: Lo_32(Value: Val)); |
169 | if (UpperTrailingOnes < 32 && |
170 | (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12)) |
171 | return 32 - UpperTrailingOnes; |
172 | |
173 | return 0; |
174 | } |
175 | |
176 | static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, |
177 | RISCVMatInt::InstSeq &Res) { |
178 | assert(Val > 0 && "Expected postive val" ); |
179 | |
180 | unsigned LeadingZeros = llvm::countl_zero(Val: (uint64_t)Val); |
181 | uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros; |
182 | // Fill in the bits that will be shifted out with 1s. An example where this |
183 | // helps is trailing one masks with 32 or more ones. This will generate |
184 | // ADDI -1 and an SRLI. |
185 | ShiftedVal |= maskTrailingOnes<uint64_t>(N: LeadingZeros); |
186 | |
187 | RISCVMatInt::InstSeq TmpSeq; |
188 | generateInstSeqImpl(Val: ShiftedVal, STI, Res&: TmpSeq); |
189 | |
190 | // Keep the new sequence if it is an improvement or the original is empty. |
191 | if ((TmpSeq.size() + 1) < Res.size() || |
192 | (Res.empty() && TmpSeq.size() < 8)) { |
193 | TmpSeq.emplace_back(Args: RISCV::SRLI, Args&: LeadingZeros); |
194 | Res = TmpSeq; |
195 | } |
196 | |
197 | // Some cases can benefit from filling the lower bits with zeros instead. |
198 | ShiftedVal &= maskTrailingZeros<uint64_t>(N: LeadingZeros); |
199 | TmpSeq.clear(); |
200 | generateInstSeqImpl(Val: ShiftedVal, STI, Res&: TmpSeq); |
201 | |
202 | // Keep the new sequence if it is an improvement or the original is empty. |
203 | if ((TmpSeq.size() + 1) < Res.size() || |
204 | (Res.empty() && TmpSeq.size() < 8)) { |
205 | TmpSeq.emplace_back(Args: RISCV::SRLI, Args&: LeadingZeros); |
206 | Res = TmpSeq; |
207 | } |
208 | |
209 | // If we have exactly 32 leading zeros and Zba, we can try using zext.w at |
210 | // the end of the sequence. |
211 | if (LeadingZeros == 32 && STI.hasFeature(Feature: RISCV::FeatureStdExtZba)) { |
212 | // Try replacing upper bits with 1. |
213 | uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(N: LeadingZeros); |
214 | TmpSeq.clear(); |
215 | generateInstSeqImpl(Val: LeadingOnesVal, STI, Res&: TmpSeq); |
216 | |
217 | // Keep the new sequence if it is an improvement. |
218 | if ((TmpSeq.size() + 1) < Res.size() || |
219 | (Res.empty() && TmpSeq.size() < 8)) { |
220 | TmpSeq.emplace_back(Args: RISCV::ADD_UW, Args: 0); |
221 | Res = TmpSeq; |
222 | } |
223 | } |
224 | } |
225 | |
226 | namespace llvm::RISCVMatInt { |
227 | InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) { |
228 | RISCVMatInt::InstSeq Res; |
229 | generateInstSeqImpl(Val, STI, Res); |
230 | |
231 | // If the low 12 bits are non-zero, the first expansion may end with an ADDI |
232 | // or ADDIW. If there are trailing zeros, try generating a sign extended |
233 | // constant with no trailing zeros and use a final SLLI to restore them. |
234 | if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) { |
235 | unsigned TrailingZeros = llvm::countr_zero(Val: (uint64_t)Val); |
236 | int64_t ShiftedVal = Val >> TrailingZeros; |
237 | // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since |
238 | // its more compressible. But only if LUI+ADDI(W) isn't fusable. |
239 | // NOTE: We don't check for C extension to minimize differences in generated |
240 | // code. |
241 | bool IsShiftedCompressible = |
242 | isInt<6>(x: ShiftedVal) && !STI.hasFeature(Feature: RISCV::TuneLUIADDIFusion); |
243 | RISCVMatInt::InstSeq TmpSeq; |
244 | generateInstSeqImpl(Val: ShiftedVal, STI, Res&: TmpSeq); |
245 | |
246 | // Keep the new sequence if it is an improvement. |
247 | if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) { |
248 | TmpSeq.emplace_back(Args: RISCV::SLLI, Args&: TrailingZeros); |
249 | Res = TmpSeq; |
250 | } |
251 | } |
252 | |
253 | // If we have a 1 or 2 instruction sequence this is the best we can do. This |
254 | // will always be true for RV32 and will often be true for RV64. |
255 | if (Res.size() <= 2) |
256 | return Res; |
257 | |
258 | assert(STI.hasFeature(RISCV::Feature64Bit) && |
259 | "Expected RV32 to only need 2 instructions" ); |
260 | |
261 | // If the lower 13 bits are something like 0x17ff, try to add 1 to change the |
262 | // lower 13 bits to 0x1800. We can restore this with an ADDI of -1 at the end |
263 | // of the sequence. Call generateInstSeqImpl on the new constant which may |
264 | // subtract 0xfffffffffffff800 to create another ADDI. This will leave a |
265 | // constant with more than 12 trailing zeros for the next recursive step. |
266 | if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) { |
267 | int64_t Imm12 = -(0x800 - (Val & 0xfff)); |
268 | int64_t AdjustedVal = Val - Imm12; |
269 | RISCVMatInt::InstSeq TmpSeq; |
270 | generateInstSeqImpl(Val: AdjustedVal, STI, Res&: TmpSeq); |
271 | |
272 | // Keep the new sequence if it is an improvement. |
273 | if ((TmpSeq.size() + 1) < Res.size()) { |
274 | TmpSeq.emplace_back(Args: RISCV::ADDI, Args&: Imm12); |
275 | Res = TmpSeq; |
276 | } |
277 | } |
278 | |
279 | // If the constant is positive we might be able to generate a shifted constant |
280 | // with no leading zeros and use a final SRLI to restore them. |
281 | if (Val > 0 && Res.size() > 2) { |
282 | generateInstSeqLeadingZeros(Val, STI, Res); |
283 | } |
284 | |
285 | // If the constant is negative, trying inverting and using our trailing zero |
286 | // optimizations. Use an xori to invert the final value. |
287 | if (Val < 0 && Res.size() > 3) { |
288 | uint64_t InvertedVal = ~(uint64_t)Val; |
289 | RISCVMatInt::InstSeq TmpSeq; |
290 | generateInstSeqLeadingZeros(Val: InvertedVal, STI, Res&: TmpSeq); |
291 | |
292 | // Keep it if we found a sequence that is smaller after inverting. |
293 | if (!TmpSeq.empty() && (TmpSeq.size() + 1) < Res.size()) { |
294 | TmpSeq.emplace_back(Args: RISCV::XORI, Args: -1); |
295 | Res = TmpSeq; |
296 | } |
297 | } |
298 | |
299 | // If the Low and High halves are the same, use pack. The pack instruction |
300 | // packs the XLEN/2-bit lower halves of rs1 and rs2 into rd, with rs1 in the |
301 | // lower half and rs2 in the upper half. |
302 | if (Res.size() > 2 && STI.hasFeature(Feature: RISCV::FeatureStdExtZbkb)) { |
303 | int64_t LoVal = SignExtend64<32>(x: Val); |
304 | int64_t HiVal = SignExtend64<32>(x: Val >> 32); |
305 | if (LoVal == HiVal) { |
306 | RISCVMatInt::InstSeq TmpSeq; |
307 | generateInstSeqImpl(Val: LoVal, STI, Res&: TmpSeq); |
308 | if ((TmpSeq.size() + 1) < Res.size()) { |
309 | TmpSeq.emplace_back(Args: RISCV::PACK, Args: 0); |
310 | Res = TmpSeq; |
311 | } |
312 | } |
313 | } |
314 | |
315 | // Perform optimization with BSETI in the Zbs extension. |
316 | if (Res.size() > 2 && STI.hasFeature(Feature: RISCV::FeatureStdExtZbs)) { |
317 | // Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to zero. |
318 | // Xor that with original value to get which bits should be set by BSETI. |
319 | uint64_t Lo = Val & 0x7fffffff; |
320 | uint64_t Hi = Val ^ Lo; |
321 | assert(Hi != 0); |
322 | RISCVMatInt::InstSeq TmpSeq; |
323 | |
324 | if (Lo != 0) |
325 | generateInstSeqImpl(Val: Lo, STI, Res&: TmpSeq); |
326 | |
327 | if (TmpSeq.size() + llvm::popcount(Value: Hi) < Res.size()) { |
328 | do { |
329 | TmpSeq.emplace_back(Args: RISCV::BSETI, Args: llvm::countr_zero(Val: Hi)); |
330 | Hi &= (Hi - 1); // Clear lowest set bit. |
331 | } while (Hi != 0); |
332 | Res = TmpSeq; |
333 | } |
334 | } |
335 | |
336 | // Perform optimization with BCLRI in the Zbs extension. |
337 | if (Res.size() > 2 && STI.hasFeature(Feature: RISCV::FeatureStdExtZbs)) { |
338 | // Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to one. |
339 | // Xor that with original value to get which bits should be cleared by |
340 | // BCLRI. |
341 | uint64_t Lo = Val | 0xffffffff80000000; |
342 | uint64_t Hi = Val ^ Lo; |
343 | assert(Hi != 0); |
344 | |
345 | RISCVMatInt::InstSeq TmpSeq; |
346 | generateInstSeqImpl(Val: Lo, STI, Res&: TmpSeq); |
347 | |
348 | if (TmpSeq.size() + llvm::popcount(Value: Hi) < Res.size()) { |
349 | do { |
350 | TmpSeq.emplace_back(Args: RISCV::BCLRI, Args: llvm::countr_zero(Val: Hi)); |
351 | Hi &= (Hi - 1); // Clear lowest set bit. |
352 | } while (Hi != 0); |
353 | Res = TmpSeq; |
354 | } |
355 | } |
356 | |
357 | // Perform optimization with SH*ADD in the Zba extension. |
358 | if (Res.size() > 2 && STI.hasFeature(Feature: RISCV::FeatureStdExtZba)) { |
359 | int64_t Div = 0; |
360 | unsigned Opc = 0; |
361 | RISCVMatInt::InstSeq TmpSeq; |
362 | // Select the opcode and divisor. |
363 | if ((Val % 3) == 0 && isInt<32>(x: Val / 3)) { |
364 | Div = 3; |
365 | Opc = RISCV::SH1ADD; |
366 | } else if ((Val % 5) == 0 && isInt<32>(x: Val / 5)) { |
367 | Div = 5; |
368 | Opc = RISCV::SH2ADD; |
369 | } else if ((Val % 9) == 0 && isInt<32>(x: Val / 9)) { |
370 | Div = 9; |
371 | Opc = RISCV::SH3ADD; |
372 | } |
373 | // Build the new instruction sequence. |
374 | if (Div > 0) { |
375 | generateInstSeqImpl(Val: Val / Div, STI, Res&: TmpSeq); |
376 | if ((TmpSeq.size() + 1) < Res.size()) { |
377 | TmpSeq.emplace_back(Args&: Opc, Args: 0); |
378 | Res = TmpSeq; |
379 | } |
380 | } else { |
381 | // Try to use LUI+SH*ADD+ADDI. |
382 | int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull; |
383 | int64_t Lo12 = SignExtend64<12>(x: Val); |
384 | Div = 0; |
385 | if (isInt<32>(x: Hi52 / 3) && (Hi52 % 3) == 0) { |
386 | Div = 3; |
387 | Opc = RISCV::SH1ADD; |
388 | } else if (isInt<32>(x: Hi52 / 5) && (Hi52 % 5) == 0) { |
389 | Div = 5; |
390 | Opc = RISCV::SH2ADD; |
391 | } else if (isInt<32>(x: Hi52 / 9) && (Hi52 % 9) == 0) { |
392 | Div = 9; |
393 | Opc = RISCV::SH3ADD; |
394 | } |
395 | // Build the new instruction sequence. |
396 | if (Div > 0) { |
397 | // For Val that has zero Lo12 (implies Val equals to Hi52) should has |
398 | // already been processed to LUI+SH*ADD by previous optimization. |
399 | assert(Lo12 != 0 && |
400 | "unexpected instruction sequence for immediate materialisation" ); |
401 | assert(TmpSeq.empty() && "Expected empty TmpSeq" ); |
402 | generateInstSeqImpl(Val: Hi52 / Div, STI, Res&: TmpSeq); |
403 | if ((TmpSeq.size() + 2) < Res.size()) { |
404 | TmpSeq.emplace_back(Args&: Opc, Args: 0); |
405 | TmpSeq.emplace_back(Args: RISCV::ADDI, Args&: Lo12); |
406 | Res = TmpSeq; |
407 | } |
408 | } |
409 | } |
410 | } |
411 | |
412 | // Perform optimization with rori in the Zbb and th.srri in the XTheadBb |
413 | // extension. |
414 | if (Res.size() > 2 && (STI.hasFeature(Feature: RISCV::FeatureStdExtZbb) || |
415 | STI.hasFeature(Feature: RISCV::FeatureVendorXTHeadBb))) { |
416 | if (unsigned Rotate = extractRotateInfo(Val)) { |
417 | RISCVMatInt::InstSeq TmpSeq; |
418 | uint64_t NegImm12 = llvm::rotl<uint64_t>(V: Val, R: Rotate); |
419 | assert(isInt<12>(NegImm12)); |
420 | TmpSeq.emplace_back(Args: RISCV::ADDI, Args&: NegImm12); |
421 | TmpSeq.emplace_back(Args: STI.hasFeature(Feature: RISCV::FeatureStdExtZbb) |
422 | ? RISCV::RORI |
423 | : RISCV::TH_SRRI, |
424 | Args&: Rotate); |
425 | Res = TmpSeq; |
426 | } |
427 | } |
428 | return Res; |
429 | } |
430 | |
431 | void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI, |
432 | MCRegister DestReg, SmallVectorImpl<MCInst> &Insts) { |
433 | RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, STI); |
434 | |
435 | MCRegister SrcReg = RISCV::X0; |
436 | for (RISCVMatInt::Inst &Inst : Seq) { |
437 | switch (Inst.getOpndKind()) { |
438 | case RISCVMatInt::Imm: |
439 | Insts.push_back(Elt: MCInstBuilder(Inst.getOpcode()) |
440 | .addReg(Reg: DestReg) |
441 | .addImm(Val: Inst.getImm())); |
442 | break; |
443 | case RISCVMatInt::RegX0: |
444 | Insts.push_back(Elt: MCInstBuilder(Inst.getOpcode()) |
445 | .addReg(Reg: DestReg) |
446 | .addReg(Reg: SrcReg) |
447 | .addReg(Reg: RISCV::X0)); |
448 | break; |
449 | case RISCVMatInt::RegReg: |
450 | Insts.push_back(Elt: MCInstBuilder(Inst.getOpcode()) |
451 | .addReg(Reg: DestReg) |
452 | .addReg(Reg: SrcReg) |
453 | .addReg(Reg: SrcReg)); |
454 | break; |
455 | case RISCVMatInt::RegImm: |
456 | Insts.push_back(Elt: MCInstBuilder(Inst.getOpcode()) |
457 | .addReg(Reg: DestReg) |
458 | .addReg(Reg: SrcReg) |
459 | .addImm(Val: Inst.getImm())); |
460 | break; |
461 | } |
462 | |
463 | // Only the first instruction has X0 as its source. |
464 | SrcReg = DestReg; |
465 | } |
466 | } |
467 | |
468 | InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, |
469 | unsigned &ShiftAmt, unsigned &AddOpc) { |
470 | int64_t LoVal = SignExtend64<32>(x: Val); |
471 | if (LoVal == 0) |
472 | return RISCVMatInt::InstSeq(); |
473 | |
474 | // Subtract the LoVal to emulate the effect of the final ADD. |
475 | uint64_t Tmp = (uint64_t)Val - (uint64_t)LoVal; |
476 | assert(Tmp != 0); |
477 | |
478 | // Use trailing zero counts to figure how far we need to shift LoVal to line |
479 | // up with the remaining constant. |
480 | // TODO: This algorithm assumes all non-zero bits in the low 32 bits of the |
481 | // final constant come from LoVal. |
482 | unsigned TzLo = llvm::countr_zero(Val: (uint64_t)LoVal); |
483 | unsigned TzHi = llvm::countr_zero(Val: Tmp); |
484 | assert(TzLo < 32 && TzHi >= 32); |
485 | ShiftAmt = TzHi - TzLo; |
486 | AddOpc = RISCV::ADD; |
487 | |
488 | if (Tmp == ((uint64_t)LoVal << ShiftAmt)) |
489 | return RISCVMatInt::generateInstSeq(Val: LoVal, STI); |
490 | |
491 | // If we have Zba, we can use (ADD_UW X, (SLLI X, 32)). |
492 | if (STI.hasFeature(Feature: RISCV::FeatureStdExtZba) && Lo_32(Value: Val) == Hi_32(Value: Val)) { |
493 | ShiftAmt = 32; |
494 | AddOpc = RISCV::ADD_UW; |
495 | return RISCVMatInt::generateInstSeq(Val: LoVal, STI); |
496 | } |
497 | |
498 | return RISCVMatInt::InstSeq(); |
499 | } |
500 | |
501 | int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, |
502 | bool CompressionCost, bool FreeZeroes) { |
503 | bool IsRV64 = STI.hasFeature(Feature: RISCV::Feature64Bit); |
504 | bool HasRVC = CompressionCost && (STI.hasFeature(Feature: RISCV::FeatureStdExtC) || |
505 | STI.hasFeature(Feature: RISCV::FeatureStdExtZca)); |
506 | int PlatRegSize = IsRV64 ? 64 : 32; |
507 | |
508 | // Split the constant into platform register sized chunks, and calculate cost |
509 | // of each chunk. |
510 | int Cost = 0; |
511 | for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) { |
512 | APInt Chunk = Val.ashr(ShiftAmt: ShiftVal).sextOrTrunc(width: PlatRegSize); |
513 | if (FreeZeroes && Chunk.getSExtValue() == 0) |
514 | continue; |
515 | InstSeq MatSeq = generateInstSeq(Val: Chunk.getSExtValue(), STI); |
516 | Cost += getInstSeqCost(Res&: MatSeq, HasRVC); |
517 | } |
518 | return std::max(a: FreeZeroes ? 0 : 1, b: Cost); |
519 | } |
520 | |
521 | OpndKind Inst::getOpndKind() const { |
522 | switch (Opc) { |
523 | default: |
524 | llvm_unreachable("Unexpected opcode!" ); |
525 | case RISCV::LUI: |
526 | return RISCVMatInt::Imm; |
527 | case RISCV::ADD_UW: |
528 | return RISCVMatInt::RegX0; |
529 | case RISCV::SH1ADD: |
530 | case RISCV::SH2ADD: |
531 | case RISCV::SH3ADD: |
532 | case RISCV::PACK: |
533 | return RISCVMatInt::RegReg; |
534 | case RISCV::ADDI: |
535 | case RISCV::ADDIW: |
536 | case RISCV::XORI: |
537 | case RISCV::SLLI: |
538 | case RISCV::SRLI: |
539 | case RISCV::SLLI_UW: |
540 | case RISCV::RORI: |
541 | case RISCV::BSETI: |
542 | case RISCV::BCLRI: |
543 | case RISCV::TH_SRRI: |
544 | return RISCVMatInt::RegImm; |
545 | } |
546 | } |
547 | |
548 | } // namespace llvm::RISCVMatInt |
549 | |