1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "Utils/WebAssemblyTypeUtilities.h"
17#include "WebAssemblyMachineFunctionInfo.h"
18#include "WebAssemblySubtarget.h"
19#include "WebAssemblyTargetMachine.h"
20#include "WebAssemblyUtilities.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineJumpTableInfo.h"
25#include "llvm/CodeGen/MachineModuleInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/SDPatternMatch.h"
28#include "llvm/CodeGen/SelectionDAG.h"
29#include "llvm/CodeGen/SelectionDAGNodes.h"
30#include "llvm/IR/DiagnosticInfo.h"
31#include "llvm/IR/DiagnosticPrinter.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/IntrinsicsWebAssembly.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/KnownBits.h"
38#include "llvm/Support/MathExtras.h"
39#include "llvm/Target/TargetOptions.h"
40using namespace llvm;
41
42#define DEBUG_TYPE "wasm-lower"
43
44WebAssemblyTargetLowering::WebAssemblyTargetLowering(
45 const TargetMachine &TM, const WebAssemblySubtarget &STI)
46 : TargetLowering(TM, STI), Subtarget(&STI) {
47 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
48
49 // Set the load count for memcmp expand optimization
50 MaxLoadsPerMemcmp = 8;
51 MaxLoadsPerMemcmpOptSize = 4;
52
53 // Booleans always contain 0 or 1.
54 setBooleanContents(ZeroOrOneBooleanContent);
55 // Except in SIMD vectors
56 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
57 // We don't know the microarchitecture here, so just reduce register pressure.
58 setSchedulingPreference(Sched::RegPressure);
59 // Tell ISel that we have a stack pointer.
60 setStackPointerRegisterToSaveRestore(
61 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
62 // Set up the register classes.
63 addRegisterClass(VT: MVT::i32, RC: &WebAssembly::I32RegClass);
64 addRegisterClass(VT: MVT::i64, RC: &WebAssembly::I64RegClass);
65 addRegisterClass(VT: MVT::f32, RC: &WebAssembly::F32RegClass);
66 addRegisterClass(VT: MVT::f64, RC: &WebAssembly::F64RegClass);
67 if (Subtarget->hasSIMD128()) {
68 addRegisterClass(VT: MVT::v16i8, RC: &WebAssembly::V128RegClass);
69 addRegisterClass(VT: MVT::v8i16, RC: &WebAssembly::V128RegClass);
70 addRegisterClass(VT: MVT::v4i32, RC: &WebAssembly::V128RegClass);
71 addRegisterClass(VT: MVT::v4f32, RC: &WebAssembly::V128RegClass);
72 addRegisterClass(VT: MVT::v2i64, RC: &WebAssembly::V128RegClass);
73 addRegisterClass(VT: MVT::v2f64, RC: &WebAssembly::V128RegClass);
74 }
75 if (Subtarget->hasFP16()) {
76 addRegisterClass(VT: MVT::v8f16, RC: &WebAssembly::V128RegClass);
77 }
78 if (Subtarget->hasReferenceTypes()) {
79 addRegisterClass(VT: MVT::externref, RC: &WebAssembly::EXTERNREFRegClass);
80 addRegisterClass(VT: MVT::funcref, RC: &WebAssembly::FUNCREFRegClass);
81 if (Subtarget->hasExceptionHandling()) {
82 addRegisterClass(VT: MVT::exnref, RC: &WebAssembly::EXNREFRegClass);
83 }
84 }
85 // Compute derived properties from the register classes.
86 computeRegisterProperties(TRI: Subtarget->getRegisterInfo());
87
88 // Transform loads and stores to pointers in address space 1 to loads and
89 // stores to WebAssembly global variables, outside linear memory.
90 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
91 setOperationAction(Op: ISD::LOAD, VT: T, Action: Custom);
92 setOperationAction(Op: ISD::STORE, VT: T, Action: Custom);
93 }
94 if (Subtarget->hasSIMD128()) {
95 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
96 MVT::v2f64}) {
97 setOperationAction(Op: ISD::LOAD, VT: T, Action: Custom);
98 setOperationAction(Op: ISD::STORE, VT: T, Action: Custom);
99 }
100 }
101 if (Subtarget->hasFP16()) {
102 setOperationAction(Op: ISD::LOAD, VT: MVT::v8f16, Action: Custom);
103 setOperationAction(Op: ISD::STORE, VT: MVT::v8f16, Action: Custom);
104 }
105 if (Subtarget->hasReferenceTypes()) {
106 // We need custom load and store lowering for both externref, funcref and
107 // Other. The MVT::Other here represents tables of reference types.
108 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
109 setOperationAction(Op: ISD::LOAD, VT: T, Action: Custom);
110 setOperationAction(Op: ISD::STORE, VT: T, Action: Custom);
111 }
112 }
113
114 setOperationAction(Op: ISD::GlobalAddress, VT: MVTPtr, Action: Custom);
115 setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVTPtr, Action: Custom);
116 setOperationAction(Op: ISD::ExternalSymbol, VT: MVTPtr, Action: Custom);
117 setOperationAction(Op: ISD::JumpTable, VT: MVTPtr, Action: Custom);
118 setOperationAction(Op: ISD::BlockAddress, VT: MVTPtr, Action: Custom);
119 setOperationAction(Op: ISD::BRIND, VT: MVT::Other, Action: Custom);
120 setOperationAction(Op: ISD::CLEAR_CACHE, VT: MVT::Other, Action: Custom);
121
122 // Take the default expansion for va_arg, va_copy, and va_end. There is no
123 // default action for va_start, so we do that custom.
124 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
125 setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand);
126 setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand);
127 setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand);
128
129 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64, MVT::v8f16}) {
130 if (!Subtarget->hasFP16() && T == MVT::v8f16) {
131 continue;
132 }
133 // Don't expand the floating-point types to constant pools.
134 setOperationAction(Op: ISD::ConstantFP, VT: T, Action: Legal);
135 // Expand floating-point comparisons.
136 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
137 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
138 setCondCodeAction(CCs: CC, VT: T, Action: Expand);
139 // Expand floating-point library function operators.
140 for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FMA})
141 setOperationAction(Op, VT: T, Action: Expand);
142 // Expand vector FREM, but use a libcall rather than an expansion for scalar
143 if (MVT(T).isVector())
144 setOperationAction(Op: ISD::FREM, VT: T, Action: Expand);
145 else
146 setOperationAction(Op: ISD::FREM, VT: T, Action: LibCall);
147 // Note supported floating-point library function operators that otherwise
148 // default to expand.
149 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
150 ISD::FRINT, ISD::FROUNDEVEN})
151 setOperationAction(Op, VT: T, Action: Legal);
152 // Support minimum and maximum, which otherwise default to expand.
153 setOperationAction(Op: ISD::FMINIMUM, VT: T, Action: Legal);
154 setOperationAction(Op: ISD::FMAXIMUM, VT: T, Action: Legal);
155 // When experimental v8f16 support is enabled these instructions don't need
156 // to be expanded.
157 if (T != MVT::v8f16) {
158 setOperationAction(Op: ISD::FP16_TO_FP, VT: T, Action: Expand);
159 setOperationAction(Op: ISD::FP_TO_FP16, VT: T, Action: Expand);
160 }
161 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: T, MemVT: MVT::f16, Action: Expand);
162 setTruncStoreAction(ValVT: T, MemVT: MVT::f16, Action: Expand);
163 }
164
165 // Expand unavailable integer operations.
166 for (auto Op :
167 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
168 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
169 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
170 for (auto T : {MVT::i32, MVT::i64})
171 setOperationAction(Op, VT: T, Action: Expand);
172 if (Subtarget->hasSIMD128())
173 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
174 setOperationAction(Op, VT: T, Action: Expand);
175 }
176
177 if (Subtarget->hasWideArithmetic()) {
178 setOperationAction(Op: ISD::ADD, VT: MVT::i128, Action: Custom);
179 setOperationAction(Op: ISD::SUB, VT: MVT::i128, Action: Custom);
180 setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i64, Action: Custom);
181 setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i64, Action: Custom);
182 setOperationAction(Op: ISD::UADDO, VT: MVT::i64, Action: Custom);
183 }
184
185 if (Subtarget->hasNontrappingFPToInt())
186 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
187 for (auto T : {MVT::i32, MVT::i64})
188 setOperationAction(Op, VT: T, Action: Custom);
189
190 if (Subtarget->hasRelaxedSIMD()) {
191 setOperationAction(
192 Ops: {ISD::FMINNUM, ISD::FMINIMUMNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM},
193 VTs: {MVT::v4f32, MVT::v2f64}, Action: Custom);
194 }
195 // SIMD-specific configuration
196 if (Subtarget->hasSIMD128()) {
197
198 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
199
200 // Combine wide-vector muls, with extend inputs, to extmul_half.
201 setTargetDAGCombine(ISD::MUL);
202
203 // Combine vector mask reductions into alltrue/anytrue
204 setTargetDAGCombine(ISD::SETCC);
205
206 // Convert vector to integer bitcasts to bitmask
207 setTargetDAGCombine(ISD::BITCAST);
208
209 // Hoist bitcasts out of shuffles
210 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
211
212 // Combine extends of extract_subvectors into widening ops
213 setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND});
214
215 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
216 // conversions ops
217 setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND,
218 ISD::EXTRACT_SUBVECTOR});
219
220 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
221 // into conversion ops
222 setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
223 ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_ROUND,
224 ISD::CONCAT_VECTORS});
225
226 setTargetDAGCombine(ISD::TRUNCATE);
227
228 // Support saturating add/sub for i8x16 and i16x8
229 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
230 for (auto T : {MVT::v16i8, MVT::v8i16})
231 setOperationAction(Op, VT: T, Action: Legal);
232
233 // Support integer abs
234 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
235 setOperationAction(Op: ISD::ABS, VT: T, Action: Legal);
236
237 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
238 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
239 MVT::v2f64})
240 setOperationAction(Op: ISD::BUILD_VECTOR, VT: T, Action: Custom);
241
242 if (Subtarget->hasFP16())
243 setOperationAction(Op: ISD::BUILD_VECTOR, VT: MVT::f16, Action: Custom);
244
245 // We have custom shuffle lowering to expose the shuffle mask
246 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
247 MVT::v2f64})
248 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: T, Action: Custom);
249
250 if (Subtarget->hasFP16())
251 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT: MVT::v8f16, Action: Custom);
252
253 // Support splatting
254 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
255 MVT::v2f64})
256 setOperationAction(Op: ISD::SPLAT_VECTOR, VT: T, Action: Legal);
257
258 setOperationAction(Ops: ISD::AVGCEILU, VTs: {MVT::v8i16, MVT::v16i8}, Action: Legal);
259
260 // Custom lowering since wasm shifts must have a scalar shift amount
261 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
262 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
263 setOperationAction(Op, VT: T, Action: Custom);
264
265 // Custom lower lane accesses to expand out variable indices
266 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
267 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
268 MVT::v2f64})
269 setOperationAction(Op, VT: T, Action: Custom);
270
271 // There is no i8x16.mul instruction
272 setOperationAction(Op: ISD::MUL, VT: MVT::v16i8, Action: Expand);
273
274 // There is no vector conditional select instruction
275 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
276 MVT::v2f64})
277 setOperationAction(Op: ISD::SELECT_CC, VT: T, Action: Expand);
278
279 // Expand integer operations supported for scalars but not SIMD
280 for (auto Op :
281 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
282 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
283 setOperationAction(Op, VT: T, Action: Expand);
284
285 // But we do have integer min and max operations
286 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
287 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
288 setOperationAction(Op, VT: T, Action: Legal);
289
290 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
291 setOperationAction(Op: ISD::CTPOP, VT: MVT::v16i8, Action: Legal);
292 setOperationAction(Op: ISD::CTLZ, VT: MVT::v16i8, Action: Expand);
293 setOperationAction(Op: ISD::CTTZ, VT: MVT::v16i8, Action: Expand);
294
295 // Custom lower bit counting operations for other types to scalarize them.
296 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
297 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
298 setOperationAction(Op, VT: T, Action: Custom);
299
300 // Expand float operations supported for scalars but not SIMD
301 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
302 ISD::FEXP, ISD::FEXP2, ISD::FEXP10})
303 for (auto T : {MVT::v4f32, MVT::v2f64})
304 setOperationAction(Op, VT: T, Action: Expand);
305
306 // Unsigned comparison operations are unavailable for i64x2 vectors.
307 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
308 setCondCodeAction(CCs: CC, VT: MVT::v2i64, Action: Custom);
309
310 // 64x2 conversions are not in the spec
311 for (auto Op :
312 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
313 for (auto T : {MVT::v2i64, MVT::v2f64})
314 setOperationAction(Op, VT: T, Action: Expand);
315
316 // But saturating fp_to_int converstions are
317 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) {
318 setOperationAction(Op, VT: MVT::v4i32, Action: Custom);
319 if (Subtarget->hasFP16()) {
320 setOperationAction(Op, VT: MVT::v8i16, Action: Custom);
321 }
322 }
323
324 // Support vector extending
325 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) {
326 setOperationAction(Op: ISD::ANY_EXTEND_VECTOR_INREG, VT: T, Action: Custom);
327 setOperationAction(Op: ISD::SIGN_EXTEND_VECTOR_INREG, VT: T, Action: Custom);
328 setOperationAction(Op: ISD::ZERO_EXTEND_VECTOR_INREG, VT: T, Action: Custom);
329 }
330
331 if (Subtarget->hasFP16()) {
332 setOperationAction(Op: ISD::FMA, VT: MVT::v8f16, Action: Legal);
333 }
334
335 if (Subtarget->hasRelaxedSIMD()) {
336 setOperationAction(Op: ISD::FMULADD, VT: MVT::v4f32, Action: Legal);
337 setOperationAction(Op: ISD::FMULADD, VT: MVT::v2f64, Action: Legal);
338 }
339
340 // Partial MLA reductions.
341 for (auto Op : {ISD::PARTIAL_REDUCE_SMLA, ISD::PARTIAL_REDUCE_UMLA}) {
342 setPartialReduceMLAAction(Opc: Op, AccVT: MVT::v4i32, InputVT: MVT::v16i8, Action: Legal);
343 setPartialReduceMLAAction(Opc: Op, AccVT: MVT::v4i32, InputVT: MVT::v8i16, Action: Legal);
344 }
345 }
346
347 // As a special case, these operators use the type to mean the type to
348 // sign-extend from.
349 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand);
350 if (!Subtarget->hasSignExt()) {
351 // Sign extends are legal only when extending a vector extract
352 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
353 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
354 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: T, Action);
355 }
356 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
357 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: T, Action: Expand);
358
359 // Dynamic stack allocation: use the default expansion.
360 setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand);
361 setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand);
362 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVTPtr, Action: Expand);
363
364 setOperationAction(Op: ISD::FrameIndex, VT: MVT::i32, Action: Custom);
365 setOperationAction(Op: ISD::FrameIndex, VT: MVT::i64, Action: Custom);
366 setOperationAction(Op: ISD::CopyToReg, VT: MVT::Other, Action: Custom);
367
368 // Expand these forms; we pattern-match the forms that we can handle in isel.
369 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
370 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
371 setOperationAction(Op, VT: T, Action: Expand);
372
373 // We have custom switch handling.
374 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom);
375
376 // WebAssembly doesn't have:
377 // - Floating-point extending loads.
378 // - Floating-point truncating stores.
379 // - i1 extending loads.
380 // - truncating SIMD stores and most extending loads
381 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
382 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
383 for (auto T : MVT::integer_valuetypes())
384 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
385 setLoadExtAction(ExtType: Ext, ValVT: T, MemVT: MVT::i1, Action: Promote);
386 if (Subtarget->hasSIMD128()) {
387 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
388 MVT::v2f64}) {
389 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
390 if (MVT(T) != MemT) {
391 setTruncStoreAction(ValVT: T, MemVT: MemT, Action: Expand);
392 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
393 setLoadExtAction(ExtType: Ext, ValVT: T, MemVT: MemT, Action: Expand);
394 }
395 }
396 }
397 // But some vector extending loads are legal
398 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
399 setLoadExtAction(ExtType: Ext, ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Legal);
400 setLoadExtAction(ExtType: Ext, ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Legal);
401 setLoadExtAction(ExtType: Ext, ValVT: MVT::v2i64, MemVT: MVT::v2i32, Action: Legal);
402 }
403 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::v2f64, MemVT: MVT::v2f32, Action: Legal);
404 }
405
406 // Don't do anything clever with build_pairs
407 setOperationAction(Op: ISD::BUILD_PAIR, VT: MVT::i64, Action: Expand);
408
409 // Trap lowers to wasm unreachable
410 setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal);
411 setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal);
412
413 // Exception handling intrinsics
414 setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom);
415 setOperationAction(Op: ISD::INTRINSIC_W_CHAIN, VT: MVT::Other, Action: Custom);
416 setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom);
417
418 setMaxAtomicSizeInBitsSupported(64);
419
420 // Always convert switches to br_tables unless there is only one case, which
421 // is equivalent to a simple branch. This reduces code size for wasm, and we
422 // defer possible jump table optimizations to the VM.
423 setMinimumJumpTableEntries(2);
424}
425
426MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
427 uint32_t AS) const {
428 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
429 return MVT::externref;
430 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
431 return MVT::funcref;
432 return TargetLowering::getPointerTy(DL, AS);
433}
434
435MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
436 uint32_t AS) const {
437 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
438 return MVT::externref;
439 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
440 return MVT::funcref;
441 return TargetLowering::getPointerMemTy(DL, AS);
442}
443
444TargetLowering::AtomicExpansionKind
445WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(
446 const AtomicRMWInst *AI) const {
447 // We have wasm instructions for these
448 switch (AI->getOperation()) {
449 case AtomicRMWInst::Add:
450 case AtomicRMWInst::Sub:
451 case AtomicRMWInst::And:
452 case AtomicRMWInst::Or:
453 case AtomicRMWInst::Xor:
454 case AtomicRMWInst::Xchg:
455 return AtomicExpansionKind::None;
456 default:
457 break;
458 }
459 return AtomicExpansionKind::CmpXChg;
460}
461
462bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
463 // Implementation copied from X86TargetLowering.
464 unsigned Opc = VecOp.getOpcode();
465
466 // Assume target opcodes can't be scalarized.
467 // TODO - do we have any exceptions?
468 if (Opc >= ISD::BUILTIN_OP_END || !isBinOp(Opcode: Opc))
469 return false;
470
471 // If the vector op is not supported, try to convert to scalar.
472 EVT VecVT = VecOp.getValueType();
473 if (!isOperationLegalOrCustomOrPromote(Op: Opc, VT: VecVT))
474 return true;
475
476 // If the vector op is supported, but the scalar op is not, the transform may
477 // not be worthwhile.
478 EVT ScalarVT = VecVT.getScalarType();
479 return isOperationLegalOrCustomOrPromote(Op: Opc, VT: ScalarVT);
480}
481
482FastISel *WebAssemblyTargetLowering::createFastISel(
483 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo,
484 const LibcallLoweringInfo *LibcallLowering) const {
485 return WebAssembly::createFastISel(funcInfo&: FuncInfo, libInfo: LibInfo, libcallLowering: LibcallLowering);
486}
487
488MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
489 EVT VT) const {
490 unsigned BitWidth = NextPowerOf2(A: VT.getSizeInBits() - 1);
491 if (BitWidth > 1 && BitWidth < 8)
492 BitWidth = 8;
493
494 if (BitWidth > 64) {
495 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
496 // the count to be an i32.
497 BitWidth = 32;
498 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
499 "32-bit shift counts ought to be enough for anyone");
500 }
501
502 MVT Result = MVT::getIntegerVT(BitWidth);
503 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
504 "Unable to represent scalar shift amount type");
505 return Result;
506}
507
508// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
509// undefined result on invalid/overflow, to the WebAssembly opcode, which
510// traps on invalid/overflow.
511static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
512 MachineBasicBlock *BB,
513 const TargetInstrInfo &TII,
514 bool IsUnsigned, bool Int64,
515 bool Float64, unsigned LoweredOpcode) {
516 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
517
518 Register OutReg = MI.getOperand(i: 0).getReg();
519 Register InReg = MI.getOperand(i: 1).getReg();
520
521 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
522 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
523 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
524 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
525 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
526 unsigned Eqz = WebAssembly::EQZ_I32;
527 unsigned And = WebAssembly::AND_I32;
528 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
529 int64_t Substitute = IsUnsigned ? 0 : Limit;
530 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
531 auto &Context = BB->getParent()->getFunction().getContext();
532 Type *Ty = Float64 ? Type::getDoubleTy(C&: Context) : Type::getFloatTy(C&: Context);
533
534 const BasicBlock *LLVMBB = BB->getBasicBlock();
535 MachineFunction *F = BB->getParent();
536 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
537 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
538 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
539
540 MachineFunction::iterator It = ++BB->getIterator();
541 F->insert(MBBI: It, MBB: FalseMBB);
542 F->insert(MBBI: It, MBB: TrueMBB);
543 F->insert(MBBI: It, MBB: DoneMBB);
544
545 // Transfer the remainder of BB and its successor edges to DoneMBB.
546 DoneMBB->splice(Where: DoneMBB->begin(), Other: BB, From: std::next(x: MI.getIterator()), To: BB->end());
547 DoneMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
548
549 BB->addSuccessor(Succ: TrueMBB);
550 BB->addSuccessor(Succ: FalseMBB);
551 TrueMBB->addSuccessor(Succ: DoneMBB);
552 FalseMBB->addSuccessor(Succ: DoneMBB);
553
554 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
555 Tmp0 = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: InReg));
556 Tmp1 = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: InReg));
557 CmpReg = MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
558 EqzReg = MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
559 FalseReg = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: OutReg));
560 TrueReg = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: OutReg));
561
562 MI.eraseFromParent();
563 // For signed numbers, we can do a single comparison to determine whether
564 // fabs(x) is within range.
565 if (IsUnsigned) {
566 Tmp0 = InReg;
567 } else {
568 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: Abs), DestReg: Tmp0).addReg(RegNo: InReg);
569 }
570 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: FConst), DestReg: Tmp1)
571 .addFPImm(Val: cast<ConstantFP>(Val: ConstantFP::get(Ty, V: CmpVal)));
572 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: LT), DestReg: CmpReg).addReg(RegNo: Tmp0).addReg(RegNo: Tmp1);
573
574 // For unsigned numbers, we have to do a separate comparison with zero.
575 if (IsUnsigned) {
576 Tmp1 = MRI.createVirtualRegister(RegClass: MRI.getRegClass(Reg: InReg));
577 Register SecondCmpReg =
578 MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
579 Register AndReg = MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
580 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: FConst), DestReg: Tmp1)
581 .addFPImm(Val: cast<ConstantFP>(Val: ConstantFP::get(Ty, V: 0.0)));
582 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: GE), DestReg: SecondCmpReg).addReg(RegNo: Tmp0).addReg(RegNo: Tmp1);
583 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: And), DestReg: AndReg).addReg(RegNo: CmpReg).addReg(RegNo: SecondCmpReg);
584 CmpReg = AndReg;
585 }
586
587 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: Eqz), DestReg: EqzReg).addReg(RegNo: CmpReg);
588
589 // Create the CFG diamond to select between doing the conversion or using
590 // the substitute value.
591 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR_IF)).addMBB(MBB: TrueMBB).addReg(RegNo: EqzReg);
592 BuildMI(BB: FalseMBB, MIMD: DL, MCID: TII.get(Opcode: LoweredOpcode), DestReg: FalseReg).addReg(RegNo: InReg);
593 BuildMI(BB: FalseMBB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR)).addMBB(MBB: DoneMBB);
594 BuildMI(BB: TrueMBB, MIMD: DL, MCID: TII.get(Opcode: IConst), DestReg: TrueReg).addImm(Val: Substitute);
595 BuildMI(BB&: *DoneMBB, I: DoneMBB->begin(), MIMD: DL, MCID: TII.get(Opcode: TargetOpcode::PHI), DestReg: OutReg)
596 .addReg(RegNo: FalseReg)
597 .addMBB(MBB: FalseMBB)
598 .addReg(RegNo: TrueReg)
599 .addMBB(MBB: TrueMBB);
600
601 return DoneMBB;
602}
603
604// Lower a `MEMCPY` instruction into a CFG triangle around a `MEMORY_COPY`
605// instuction to handle the zero-length case.
606static MachineBasicBlock *LowerMemcpy(MachineInstr &MI, DebugLoc DL,
607 MachineBasicBlock *BB,
608 const TargetInstrInfo &TII, bool Int64) {
609 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
610
611 MachineOperand DstMem = MI.getOperand(i: 0);
612 MachineOperand SrcMem = MI.getOperand(i: 1);
613 MachineOperand Dst = MI.getOperand(i: 2);
614 MachineOperand Src = MI.getOperand(i: 3);
615 MachineOperand Len = MI.getOperand(i: 4);
616
617 // If the length is a constant, we don't actually need the check.
618 if (MachineInstr *Def = MRI.getVRegDef(Reg: Len.getReg())) {
619 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
620 Def->getOpcode() == WebAssembly::CONST_I64) {
621 if (Def->getOperand(i: 1).getImm() == 0) {
622 // A zero-length memcpy is a no-op.
623 MI.eraseFromParent();
624 return BB;
625 }
626 // A non-zero-length memcpy doesn't need a zero check.
627 unsigned MemoryCopy =
628 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
629 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: MemoryCopy))
630 .add(MO: DstMem)
631 .add(MO: SrcMem)
632 .add(MO: Dst)
633 .add(MO: Src)
634 .add(MO: Len);
635 MI.eraseFromParent();
636 return BB;
637 }
638 }
639
640 // We're going to add an extra use to `Len` to test if it's zero; that
641 // use shouldn't be a kill, even if the original use is.
642 MachineOperand NoKillLen = Len;
643 NoKillLen.setIsKill(false);
644
645 // Decide on which `MachineInstr` opcode we're going to use.
646 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
647 unsigned MemoryCopy =
648 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
649
650 // Create two new basic blocks; one for the new `memory.fill` that we can
651 // branch over, and one for the rest of the instructions after the original
652 // `memory.fill`.
653 const BasicBlock *LLVMBB = BB->getBasicBlock();
654 MachineFunction *F = BB->getParent();
655 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
656 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
657
658 MachineFunction::iterator It = ++BB->getIterator();
659 F->insert(MBBI: It, MBB: TrueMBB);
660 F->insert(MBBI: It, MBB: DoneMBB);
661
662 // Transfer the remainder of BB and its successor edges to DoneMBB.
663 DoneMBB->splice(Where: DoneMBB->begin(), Other: BB, From: std::next(x: MI.getIterator()), To: BB->end());
664 DoneMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
665
666 // Connect the CFG edges.
667 BB->addSuccessor(Succ: TrueMBB);
668 BB->addSuccessor(Succ: DoneMBB);
669 TrueMBB->addSuccessor(Succ: DoneMBB);
670
671 // Create a virtual register for the `Eqz` result.
672 unsigned EqzReg;
673 EqzReg = MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
674
675 // Erase the original `memory.copy`.
676 MI.eraseFromParent();
677
678 // Test if `Len` is zero.
679 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: Eqz), DestReg: EqzReg).add(MO: NoKillLen);
680
681 // Insert a new `memory.copy`.
682 BuildMI(BB: TrueMBB, MIMD: DL, MCID: TII.get(Opcode: MemoryCopy))
683 .add(MO: DstMem)
684 .add(MO: SrcMem)
685 .add(MO: Dst)
686 .add(MO: Src)
687 .add(MO: Len);
688
689 // Create the CFG triangle.
690 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR_IF)).addMBB(MBB: DoneMBB).addReg(RegNo: EqzReg);
691 BuildMI(BB: TrueMBB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR)).addMBB(MBB: DoneMBB);
692
693 return DoneMBB;
694}
695
696// Lower a `MEMSET` instruction into a CFG triangle around a `MEMORY_FILL`
697// instuction to handle the zero-length case.
698static MachineBasicBlock *LowerMemset(MachineInstr &MI, DebugLoc DL,
699 MachineBasicBlock *BB,
700 const TargetInstrInfo &TII, bool Int64) {
701 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
702
703 MachineOperand Mem = MI.getOperand(i: 0);
704 MachineOperand Dst = MI.getOperand(i: 1);
705 MachineOperand Val = MI.getOperand(i: 2);
706 MachineOperand Len = MI.getOperand(i: 3);
707
708 // If the length is a constant, we don't actually need the check.
709 if (MachineInstr *Def = MRI.getVRegDef(Reg: Len.getReg())) {
710 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
711 Def->getOpcode() == WebAssembly::CONST_I64) {
712 if (Def->getOperand(i: 1).getImm() == 0) {
713 // A zero-length memset is a no-op.
714 MI.eraseFromParent();
715 return BB;
716 }
717 // A non-zero-length memset doesn't need a zero check.
718 unsigned MemoryFill =
719 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
720 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: MemoryFill))
721 .add(MO: Mem)
722 .add(MO: Dst)
723 .add(MO: Val)
724 .add(MO: Len);
725 MI.eraseFromParent();
726 return BB;
727 }
728 }
729
730 // We're going to add an extra use to `Len` to test if it's zero; that
731 // use shouldn't be a kill, even if the original use is.
732 MachineOperand NoKillLen = Len;
733 NoKillLen.setIsKill(false);
734
735 // Decide on which `MachineInstr` opcode we're going to use.
736 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
737 unsigned MemoryFill =
738 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
739
740 // Create two new basic blocks; one for the new `memory.fill` that we can
741 // branch over, and one for the rest of the instructions after the original
742 // `memory.fill`.
743 const BasicBlock *LLVMBB = BB->getBasicBlock();
744 MachineFunction *F = BB->getParent();
745 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
746 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB: LLVMBB);
747
748 MachineFunction::iterator It = ++BB->getIterator();
749 F->insert(MBBI: It, MBB: TrueMBB);
750 F->insert(MBBI: It, MBB: DoneMBB);
751
752 // Transfer the remainder of BB and its successor edges to DoneMBB.
753 DoneMBB->splice(Where: DoneMBB->begin(), Other: BB, From: std::next(x: MI.getIterator()), To: BB->end());
754 DoneMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
755
756 // Connect the CFG edges.
757 BB->addSuccessor(Succ: TrueMBB);
758 BB->addSuccessor(Succ: DoneMBB);
759 TrueMBB->addSuccessor(Succ: DoneMBB);
760
761 // Create a virtual register for the `Eqz` result.
762 unsigned EqzReg;
763 EqzReg = MRI.createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
764
765 // Erase the original `memory.fill`.
766 MI.eraseFromParent();
767
768 // Test if `Len` is zero.
769 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: Eqz), DestReg: EqzReg).add(MO: NoKillLen);
770
771 // Insert a new `memory.copy`.
772 BuildMI(BB: TrueMBB, MIMD: DL, MCID: TII.get(Opcode: MemoryFill)).add(MO: Mem).add(MO: Dst).add(MO: Val).add(MO: Len);
773
774 // Create the CFG triangle.
775 BuildMI(BB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR_IF)).addMBB(MBB: DoneMBB).addReg(RegNo: EqzReg);
776 BuildMI(BB: TrueMBB, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::BR)).addMBB(MBB: DoneMBB);
777
778 return DoneMBB;
779}
780
781static MachineBasicBlock *
782LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
783 const WebAssemblySubtarget *Subtarget,
784 const TargetInstrInfo &TII) {
785 MachineInstr &CallParams = *CallResults.getPrevNode();
786 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
787 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
788 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
789
790 bool IsIndirect =
791 CallParams.getOperand(i: 0).isReg() || CallParams.getOperand(i: 0).isFI();
792 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
793
794 bool IsFuncrefCall = false;
795 if (IsIndirect && CallParams.getOperand(i: 0).isReg()) {
796 Register Reg = CallParams.getOperand(i: 0).getReg();
797 const MachineFunction *MF = BB->getParent();
798 const MachineRegisterInfo &MRI = MF->getRegInfo();
799 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
800 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
801 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
802 }
803
804 unsigned CallOp;
805 if (IsIndirect && IsRetCall) {
806 CallOp = WebAssembly::RET_CALL_INDIRECT;
807 } else if (IsIndirect) {
808 CallOp = WebAssembly::CALL_INDIRECT;
809 } else if (IsRetCall) {
810 CallOp = WebAssembly::RET_CALL;
811 } else {
812 CallOp = WebAssembly::CALL;
813 }
814
815 MachineFunction &MF = *BB->getParent();
816 const MCInstrDesc &MCID = TII.get(Opcode: CallOp);
817 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
818
819 // Move the function pointer to the end of the arguments for indirect calls
820 if (IsIndirect) {
821 auto FnPtr = CallParams.getOperand(i: 0);
822 CallParams.removeOperand(OpNo: 0);
823
824 // For funcrefs, call_indirect is done through __funcref_call_table and the
825 // funcref is always installed in slot 0 of the table, therefore instead of
826 // having the function pointer added at the end of the params list, a zero
827 // (the index in
828 // __funcref_call_table is added).
829 if (IsFuncrefCall) {
830 Register RegZero =
831 MF.getRegInfo().createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
832 MachineInstrBuilder MIBC0 =
833 BuildMI(MF, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::CONST_I32), DestReg: RegZero).addImm(Val: 0);
834
835 BB->insert(I: CallResults.getIterator(), M: MIBC0);
836 MachineInstrBuilder(MF, CallParams).addReg(RegNo: RegZero);
837 } else
838 CallParams.addOperand(Op: FnPtr);
839 }
840
841 for (auto Def : CallResults.defs())
842 MIB.add(MO: Def);
843
844 if (IsIndirect) {
845 // Placeholder for the type index.
846 // This gets replaced with the correct value in WebAssemblyMCInstLower.cpp
847 MIB.addImm(Val: 0);
848 // The table into which this call_indirect indexes.
849 MCSymbolWasm *Table = IsFuncrefCall
850 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
851 Ctx&: MF.getContext(), Subtarget)
852 : WebAssembly::getOrCreateFunctionTableSymbol(
853 Ctx&: MF.getContext(), Subtarget);
854 if (Subtarget->hasCallIndirectOverlong()) {
855 MIB.addSym(Sym: Table);
856 } else {
857 // For the MVP there is at most one table whose number is 0, but we can't
858 // write a table symbol or issue relocations. Instead we just ensure the
859 // table is live and write a zero.
860 Table->setNoStrip();
861 MIB.addImm(Val: 0);
862 }
863 }
864
865 for (auto Use : CallParams.uses())
866 MIB.add(MO: Use);
867
868 BB->insert(I: CallResults.getIterator(), M: MIB);
869 CallParams.eraseFromParent();
870 CallResults.eraseFromParent();
871
872 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
873 // table slot with ref.null upon call_indirect return.
874 //
875 // This generates the following code, which comes right after a call_indirect
876 // of a funcref:
877 //
878 // i32.const 0
879 // ref.null func
880 // table.set __funcref_call_table
881 if (IsIndirect && IsFuncrefCall) {
882 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
883 Ctx&: MF.getContext(), Subtarget);
884 Register RegZero =
885 MF.getRegInfo().createVirtualRegister(RegClass: &WebAssembly::I32RegClass);
886 MachineInstr *Const0 =
887 BuildMI(MF, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::CONST_I32), DestReg: RegZero).addImm(Val: 0);
888 BB->insertAfter(I: MIB.getInstr()->getIterator(), MI: Const0);
889
890 Register RegFuncref =
891 MF.getRegInfo().createVirtualRegister(RegClass: &WebAssembly::FUNCREFRegClass);
892 MachineInstr *RefNull =
893 BuildMI(MF, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::REF_NULL_FUNCREF), DestReg: RegFuncref);
894 BB->insertAfter(I: Const0->getIterator(), MI: RefNull);
895
896 MachineInstr *TableSet =
897 BuildMI(MF, MIMD: DL, MCID: TII.get(Opcode: WebAssembly::TABLE_SET_FUNCREF))
898 .addSym(Sym: Table)
899 .addReg(RegNo: RegZero)
900 .addReg(RegNo: RegFuncref);
901 BB->insertAfter(I: RefNull->getIterator(), MI: TableSet);
902 }
903
904 return BB;
905}
906
907MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
908 MachineInstr &MI, MachineBasicBlock *BB) const {
909 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
910 DebugLoc DL = MI.getDebugLoc();
911
912 switch (MI.getOpcode()) {
913 default:
914 llvm_unreachable("Unexpected instr type to insert");
915 case WebAssembly::FP_TO_SINT_I32_F32:
916 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: false, Int64: false, Float64: false,
917 LoweredOpcode: WebAssembly::I32_TRUNC_S_F32);
918 case WebAssembly::FP_TO_UINT_I32_F32:
919 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: true, Int64: false, Float64: false,
920 LoweredOpcode: WebAssembly::I32_TRUNC_U_F32);
921 case WebAssembly::FP_TO_SINT_I64_F32:
922 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: false, Int64: true, Float64: false,
923 LoweredOpcode: WebAssembly::I64_TRUNC_S_F32);
924 case WebAssembly::FP_TO_UINT_I64_F32:
925 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: true, Int64: true, Float64: false,
926 LoweredOpcode: WebAssembly::I64_TRUNC_U_F32);
927 case WebAssembly::FP_TO_SINT_I32_F64:
928 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: false, Int64: false, Float64: true,
929 LoweredOpcode: WebAssembly::I32_TRUNC_S_F64);
930 case WebAssembly::FP_TO_UINT_I32_F64:
931 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: true, Int64: false, Float64: true,
932 LoweredOpcode: WebAssembly::I32_TRUNC_U_F64);
933 case WebAssembly::FP_TO_SINT_I64_F64:
934 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: false, Int64: true, Float64: true,
935 LoweredOpcode: WebAssembly::I64_TRUNC_S_F64);
936 case WebAssembly::FP_TO_UINT_I64_F64:
937 return LowerFPToInt(MI, DL, BB, TII, IsUnsigned: true, Int64: true, Float64: true,
938 LoweredOpcode: WebAssembly::I64_TRUNC_U_F64);
939 case WebAssembly::MEMCPY_A32:
940 return LowerMemcpy(MI, DL, BB, TII, Int64: false);
941 case WebAssembly::MEMCPY_A64:
942 return LowerMemcpy(MI, DL, BB, TII, Int64: true);
943 case WebAssembly::MEMSET_A32:
944 return LowerMemset(MI, DL, BB, TII, Int64: false);
945 case WebAssembly::MEMSET_A64:
946 return LowerMemset(MI, DL, BB, TII, Int64: true);
947 case WebAssembly::CALL_RESULTS:
948 case WebAssembly::RET_CALL_RESULTS:
949 return LowerCallResults(CallResults&: MI, DL, BB, Subtarget, TII);
950 }
951}
952
953std::pair<unsigned, const TargetRegisterClass *>
954WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
955 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
956 // First, see if this is a constraint that directly corresponds to a
957 // WebAssembly register class.
958 if (Constraint.size() == 1) {
959 switch (Constraint[0]) {
960 case 'r':
961 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
962 if (Subtarget->hasSIMD128() && VT.isVector()) {
963 if (VT.getSizeInBits() == 128)
964 return std::make_pair(x: 0U, y: &WebAssembly::V128RegClass);
965 }
966 if (VT.isInteger() && !VT.isVector()) {
967 if (VT.getSizeInBits() <= 32)
968 return std::make_pair(x: 0U, y: &WebAssembly::I32RegClass);
969 if (VT.getSizeInBits() <= 64)
970 return std::make_pair(x: 0U, y: &WebAssembly::I64RegClass);
971 }
972 if (VT.isFloatingPoint() && !VT.isVector()) {
973 switch (VT.getSizeInBits()) {
974 case 32:
975 return std::make_pair(x: 0U, y: &WebAssembly::F32RegClass);
976 case 64:
977 return std::make_pair(x: 0U, y: &WebAssembly::F64RegClass);
978 default:
979 break;
980 }
981 }
982 break;
983 default:
984 break;
985 }
986 }
987
988 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
989}
990
991bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
992 // Assume ctz is a relatively cheap operation.
993 return true;
994}
995
996bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
997 // Assume clz is a relatively cheap operation.
998 return true;
999}
1000
1001bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1002 const AddrMode &AM,
1003 Type *Ty, unsigned AS,
1004 Instruction *I) const {
1005 // WebAssembly offsets are added as unsigned without wrapping. The
1006 // isLegalAddressingMode gives us no way to determine if wrapping could be
1007 // happening, so we approximate this by accepting only non-negative offsets.
1008 if (AM.BaseOffs < 0)
1009 return false;
1010
1011 // WebAssembly has no scale register operands.
1012 if (AM.Scale != 0)
1013 return false;
1014
1015 // Everything else is legal.
1016 return true;
1017}
1018
1019bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
1020 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
1021 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
1022 // WebAssembly supports unaligned accesses, though it should be declared
1023 // with the p2align attribute on loads and stores which do so, and there
1024 // may be a performance impact. We tell LLVM they're "fast" because
1025 // for the kinds of things that LLVM uses this for (merging adjacent stores
1026 // of constants, etc.), WebAssembly implementations will either want the
1027 // unaligned access or they'll split anyway.
1028 if (Fast)
1029 *Fast = 1;
1030 return true;
1031}
1032
1033bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
1034 AttributeList Attr) const {
1035 // The current thinking is that wasm engines will perform this optimization,
1036 // so we can save on code size.
1037 return true;
1038}
1039
1040bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
1041 EVT ExtT = ExtVal.getValueType();
1042 EVT MemT = cast<LoadSDNode>(Val: ExtVal->getOperand(Num: 0))->getValueType(ResNo: 0);
1043 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1044 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1045 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1046}
1047
1048bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1049 const GlobalAddressSDNode *GA) const {
1050 // Wasm doesn't support function addresses with offsets
1051 const GlobalValue *GV = GA->getGlobal();
1052 return isa<Function>(Val: GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
1053}
1054
1055EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
1056 LLVMContext &C,
1057 EVT VT) const {
1058 if (VT.isVector())
1059 return VT.changeVectorElementTypeToInteger();
1060
1061 // So far, all branch instructions in Wasm take an I32 condition.
1062 // The default TargetLowering::getSetCCResultType returns the pointer size,
1063 // which would be useful to reduce instruction counts when testing
1064 // against 64-bit pointers/values if at some point Wasm supports that.
1065 return EVT::getIntegerVT(Context&: C, BitWidth: 32);
1066}
1067
1068void WebAssemblyTargetLowering::getTgtMemIntrinsic(
1069 SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I,
1070 MachineFunction &MF, unsigned Intrinsic) const {
1071 IntrinsicInfo Info;
1072 switch (Intrinsic) {
1073 case Intrinsic::wasm_memory_atomic_notify:
1074 Info.opc = ISD::INTRINSIC_W_CHAIN;
1075 Info.memVT = MVT::i32;
1076 Info.ptrVal = I.getArgOperand(i: 0);
1077 Info.offset = 0;
1078 Info.align = Align(4);
1079 // atomic.notify instruction does not really load the memory specified with
1080 // this argument, but MachineMemOperand should either be load or store, so
1081 // we set this to a load.
1082 // FIXME Volatile isn't really correct, but currently all LLVM atomic
1083 // instructions are treated as volatiles in the backend, so we should be
1084 // consistent. The same applies for wasm_atomic_wait intrinsics too.
1085 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
1086 Infos.push_back(Elt: Info);
1087 return;
1088 case Intrinsic::wasm_memory_atomic_wait32:
1089 Info.opc = ISD::INTRINSIC_W_CHAIN;
1090 Info.memVT = MVT::i32;
1091 Info.ptrVal = I.getArgOperand(i: 0);
1092 Info.offset = 0;
1093 Info.align = Align(4);
1094 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
1095 Infos.push_back(Elt: Info);
1096 return;
1097 case Intrinsic::wasm_memory_atomic_wait64:
1098 Info.opc = ISD::INTRINSIC_W_CHAIN;
1099 Info.memVT = MVT::i64;
1100 Info.ptrVal = I.getArgOperand(i: 0);
1101 Info.offset = 0;
1102 Info.align = Align(8);
1103 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
1104 Infos.push_back(Elt: Info);
1105 return;
1106 case Intrinsic::wasm_loadf16_f32:
1107 Info.opc = ISD::INTRINSIC_W_CHAIN;
1108 Info.memVT = MVT::f16;
1109 Info.ptrVal = I.getArgOperand(i: 0);
1110 Info.offset = 0;
1111 Info.align = Align(2);
1112 Info.flags = MachineMemOperand::MOLoad;
1113 Infos.push_back(Elt: Info);
1114 return;
1115 case Intrinsic::wasm_storef16_f32:
1116 Info.opc = ISD::INTRINSIC_VOID;
1117 Info.memVT = MVT::f16;
1118 Info.ptrVal = I.getArgOperand(i: 1);
1119 Info.offset = 0;
1120 Info.align = Align(2);
1121 Info.flags = MachineMemOperand::MOStore;
1122 Infos.push_back(Elt: Info);
1123 return;
1124 default:
1125 return;
1126 }
1127}
1128
1129void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1130 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1131 const SelectionDAG &DAG, unsigned Depth) const {
1132 switch (Op.getOpcode()) {
1133 default:
1134 break;
1135 case ISD::INTRINSIC_WO_CHAIN: {
1136 unsigned IntNo = Op.getConstantOperandVal(i: 0);
1137 switch (IntNo) {
1138 default:
1139 break;
1140 case Intrinsic::wasm_bitmask: {
1141 unsigned BitWidth = Known.getBitWidth();
1142 EVT VT = Op.getOperand(i: 1).getSimpleValueType();
1143 unsigned PossibleBits = VT.getVectorNumElements();
1144 APInt ZeroMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - PossibleBits);
1145 Known.Zero |= ZeroMask;
1146 break;
1147 }
1148 }
1149 break;
1150 }
1151 case WebAssemblyISD::EXTEND_LOW_U:
1152 case WebAssemblyISD::EXTEND_HIGH_U: {
1153 // We know the high half, of each destination vector element, will be zero.
1154 SDValue SrcOp = Op.getOperand(i: 0);
1155 EVT VT = SrcOp.getSimpleValueType();
1156 unsigned BitWidth = Known.getBitWidth();
1157 if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1158 assert(BitWidth >= 8 && "Unexpected width!");
1159 APInt Mask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 8);
1160 Known.Zero |= Mask;
1161 } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1162 assert(BitWidth >= 16 && "Unexpected width!");
1163 APInt Mask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 16);
1164 Known.Zero |= Mask;
1165 } else if (VT == MVT::v2i32 || VT == MVT::v4i32) {
1166 assert(BitWidth >= 32 && "Unexpected width!");
1167 APInt Mask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 32);
1168 Known.Zero |= Mask;
1169 }
1170 break;
1171 }
1172 // For 128-bit addition if the upper bits are all zero then it's known that
1173 // the upper bits of the result will have all bits guaranteed zero except the
1174 // first.
1175 case WebAssemblyISD::I64_ADD128:
1176 if (Op.getResNo() == 1) {
1177 SDValue LHS_HI = Op.getOperand(i: 1);
1178 SDValue RHS_HI = Op.getOperand(i: 3);
1179 if (isNullConstant(V: LHS_HI) && isNullConstant(V: RHS_HI))
1180 Known.Zero.setBitsFrom(1);
1181 }
1182 break;
1183 }
1184}
1185
1186TargetLoweringBase::LegalizeTypeAction
1187WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
1188 if (VT.isFixedLengthVector()) {
1189 MVT EltVT = VT.getVectorElementType();
1190 // We have legal vector types with these lane types, so widening the
1191 // vector would let us use some of the lanes directly without having to
1192 // extend or truncate values.
1193 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1194 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1195 return TypeWidenVector;
1196 }
1197
1198 return TargetLoweringBase::getPreferredVectorAction(VT);
1199}
1200
1201bool WebAssemblyTargetLowering::isFMAFasterThanFMulAndFAdd(
1202 const MachineFunction &MF, EVT VT) const {
1203 if (!Subtarget->hasFP16() || !VT.isVector())
1204 return false;
1205
1206 EVT ScalarVT = VT.getScalarType();
1207 if (!ScalarVT.isSimple())
1208 return false;
1209
1210 return ScalarVT.getSimpleVT().SimpleTy == MVT::f16;
1211}
1212
1213bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1214 SDValue Op, const TargetLoweringOpt &TLO) const {
1215 // ISel process runs DAGCombiner after legalization; this step is called
1216 // SelectionDAG optimization phase. This post-legalization combining process
1217 // runs DAGCombiner on each node, and if there was a change to be made,
1218 // re-runs legalization again on it and its user nodes to make sure
1219 // everythiing is in a legalized state.
1220 //
1221 // The legalization calls lowering routines, and we do our custom lowering for
1222 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
1223 // into zeros. But there is a set of routines in DAGCombiner that turns unused
1224 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
1225 // turns unused vector elements into undefs. But this routine does not work
1226 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
1227 // combination can result in a infinite loop, in which undefs are converted to
1228 // zeros in legalization and back to undefs in combining.
1229 //
1230 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
1231 // running for build_vectors.
1232 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
1233 return false;
1234 return true;
1235}
1236
1237//===----------------------------------------------------------------------===//
1238// WebAssembly Lowering private implementation.
1239//===----------------------------------------------------------------------===//
1240
1241//===----------------------------------------------------------------------===//
1242// Lowering Code
1243//===----------------------------------------------------------------------===//
1244
1245static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
1246 MachineFunction &MF = DAG.getMachineFunction();
1247 DAG.getContext()->diagnose(
1248 DI: DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
1249}
1250
1251// Test whether the given calling convention is supported.
1252static bool callingConvSupported(CallingConv::ID CallConv) {
1253 // We currently support the language-independent target-independent
1254 // conventions. We don't yet have a way to annotate calls with properties like
1255 // "cold", and we don't have any call-clobbered registers, so these are mostly
1256 // all handled the same.
1257 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
1258 CallConv == CallingConv::Cold ||
1259 CallConv == CallingConv::PreserveMost ||
1260 CallConv == CallingConv::PreserveAll ||
1261 CallConv == CallingConv::CXX_FAST_TLS ||
1262 CallConv == CallingConv::WASM_EmscriptenInvoke ||
1263 CallConv == CallingConv::Swift;
1264}
1265
1266SDValue
1267WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1268 SmallVectorImpl<SDValue> &InVals) const {
1269 SelectionDAG &DAG = CLI.DAG;
1270 SDLoc DL = CLI.DL;
1271 SDValue Chain = CLI.Chain;
1272 SDValue Callee = CLI.Callee;
1273 MachineFunction &MF = DAG.getMachineFunction();
1274 auto Layout = MF.getDataLayout();
1275
1276 CallingConv::ID CallConv = CLI.CallConv;
1277 if (!callingConvSupported(CallConv))
1278 fail(DL, DAG,
1279 Msg: "WebAssembly doesn't support language-specific or target-specific "
1280 "calling conventions yet");
1281 if (CLI.IsPatchPoint)
1282 fail(DL, DAG, Msg: "WebAssembly doesn't support patch point yet");
1283
1284 if (CLI.IsTailCall) {
1285 auto NoTail = [&](const char *Msg) {
1286 if (CLI.CB && CLI.CB->isMustTailCall())
1287 fail(DL, DAG, Msg);
1288 CLI.IsTailCall = false;
1289 };
1290
1291 if (!Subtarget->hasTailCall())
1292 NoTail("WebAssembly 'tail-call' feature not enabled");
1293
1294 // Varargs calls cannot be tail calls because the buffer is on the stack
1295 if (CLI.IsVarArg)
1296 NoTail("WebAssembly does not support varargs tail calls");
1297
1298 // Do not tail call unless caller and callee return types match
1299 const Function &F = MF.getFunction();
1300 const TargetMachine &TM = getTargetMachine();
1301 Type *RetTy = F.getReturnType();
1302 SmallVector<MVT, 4> CallerRetTys;
1303 SmallVector<MVT, 4> CalleeRetTys;
1304 computeLegalValueVTs(F, TM, Ty: RetTy, ValueVTs&: CallerRetTys);
1305 computeLegalValueVTs(F, TM, Ty: CLI.RetTy, ValueVTs&: CalleeRetTys);
1306 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1307 std::equal(first1: CallerRetTys.begin(), last1: CallerRetTys.end(),
1308 first2: CalleeRetTys.begin());
1309 if (!TypesMatch)
1310 NoTail("WebAssembly tail call requires caller and callee return types to "
1311 "match");
1312
1313 // If pointers to local stack values are passed, we cannot tail call
1314 if (CLI.CB) {
1315 for (auto &Arg : CLI.CB->args()) {
1316 Value *Val = Arg.get();
1317 // Trace the value back through pointer operations
1318 while (true) {
1319 Value *Src = Val->stripPointerCastsAndAliases();
1320 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: Src))
1321 Src = GEP->getPointerOperand();
1322 if (Val == Src)
1323 break;
1324 Val = Src;
1325 }
1326 if (isa<AllocaInst>(Val)) {
1327 NoTail(
1328 "WebAssembly does not support tail calling with stack arguments");
1329 break;
1330 }
1331 }
1332 }
1333 }
1334
1335 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1336 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1337 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1338
1339 // The generic code may have added an sret argument. If we're lowering an
1340 // invoke function, the ABI requires that the function pointer be the first
1341 // argument, so we may have to swap the arguments.
1342 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1343 Outs[0].Flags.isSRet()) {
1344 std::swap(a&: Outs[0], b&: Outs[1]);
1345 std::swap(a&: OutVals[0], b&: OutVals[1]);
1346 }
1347
1348 bool HasSwiftSelfArg = false;
1349 bool HasSwiftErrorArg = false;
1350 unsigned NumFixedArgs = 0;
1351 for (unsigned I = 0; I < Outs.size(); ++I) {
1352 const ISD::OutputArg &Out = Outs[I];
1353 SDValue &OutVal = OutVals[I];
1354 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1355 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1356 if (Out.Flags.isNest())
1357 fail(DL, DAG, Msg: "WebAssembly hasn't implemented nest arguments");
1358 if (Out.Flags.isInAlloca())
1359 fail(DL, DAG, Msg: "WebAssembly hasn't implemented inalloca arguments");
1360 if (Out.Flags.isInConsecutiveRegs())
1361 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs arguments");
1362 if (Out.Flags.isInConsecutiveRegsLast())
1363 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs last arguments");
1364 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1365 auto &MFI = MF.getFrameInfo();
1366 int FI = MFI.CreateStackObject(Size: Out.Flags.getByValSize(),
1367 Alignment: Out.Flags.getNonZeroByValAlign(),
1368 /*isSS=*/isSpillSlot: false);
1369 SDValue SizeNode =
1370 DAG.getConstant(Val: Out.Flags.getByValSize(), DL, VT: MVT::i32);
1371 SDValue FINode = DAG.getFrameIndex(FI, VT: getPointerTy(DL: Layout));
1372 Chain = DAG.getMemcpy(Chain, dl: DL, Dst: FINode, Src: OutVal, Size: SizeNode,
1373 Alignment: Out.Flags.getNonZeroByValAlign(),
1374 /*isVolatile*/ isVol: false, /*AlwaysInline=*/false,
1375 /*CI=*/nullptr, OverrideTailCall: std::nullopt, DstPtrInfo: MachinePointerInfo(),
1376 SrcPtrInfo: MachinePointerInfo());
1377 OutVal = FINode;
1378 }
1379 // Count the number of fixed args *after* legalization.
1380 NumFixedArgs += !Out.Flags.isVarArg();
1381 }
1382
1383 bool IsVarArg = CLI.IsVarArg;
1384 auto PtrVT = getPointerTy(DL: Layout);
1385
1386 // For swiftcc, emit additional swiftself and swifterror arguments
1387 // if there aren't. These additional arguments are also added for callee
1388 // signature They are necessary to match callee and caller signature for
1389 // indirect call.
1390 if (CallConv == CallingConv::Swift) {
1391 Type *PtrTy = PointerType::getUnqual(C&: *DAG.getContext());
1392 if (!HasSwiftSelfArg) {
1393 NumFixedArgs++;
1394 ISD::ArgFlagsTy Flags;
1395 Flags.setSwiftSelf();
1396 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1397 CLI.Outs.push_back(Elt: Arg);
1398 SDValue ArgVal = DAG.getUNDEF(VT: PtrVT);
1399 CLI.OutVals.push_back(Elt: ArgVal);
1400 }
1401 if (!HasSwiftErrorArg) {
1402 NumFixedArgs++;
1403 ISD::ArgFlagsTy Flags;
1404 Flags.setSwiftError();
1405 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1406 CLI.Outs.push_back(Elt: Arg);
1407 SDValue ArgVal = DAG.getUNDEF(VT: PtrVT);
1408 CLI.OutVals.push_back(Elt: ArgVal);
1409 }
1410 }
1411
1412 // Analyze operands of the call, assigning locations to each operand.
1413 SmallVector<CCValAssign, 16> ArgLocs;
1414 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1415
1416 if (IsVarArg) {
1417 // Outgoing non-fixed arguments are placed in a buffer. First
1418 // compute their offsets and the total amount of buffer space needed.
1419 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1420 const ISD::OutputArg &Out = Outs[I];
1421 SDValue &Arg = OutVals[I];
1422 EVT VT = Arg.getValueType();
1423 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1424 Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext());
1425 Align Alignment =
1426 std::max(a: Out.Flags.getNonZeroOrigAlign(), b: Layout.getABITypeAlign(Ty));
1427 unsigned Offset =
1428 CCInfo.AllocateStack(Size: Layout.getTypeAllocSize(Ty), Alignment);
1429 CCInfo.addLoc(V: CCValAssign::getMem(ValNo: ArgLocs.size(), ValVT: VT.getSimpleVT(),
1430 Offset, LocVT: VT.getSimpleVT(),
1431 HTP: CCValAssign::Full));
1432 }
1433 }
1434
1435 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1436
1437 SDValue FINode;
1438 if (IsVarArg && NumBytes) {
1439 // For non-fixed arguments, next emit stores to store the argument values
1440 // to the stack buffer at the offsets computed above.
1441 MaybeAlign StackAlign = Layout.getStackAlignment();
1442 assert(StackAlign && "data layout string is missing stack alignment");
1443 int FI = MF.getFrameInfo().CreateStackObject(Size: NumBytes, Alignment: *StackAlign,
1444 /*isSS=*/isSpillSlot: false);
1445 unsigned ValNo = 0;
1446 SmallVector<SDValue, 8> Chains;
1447 for (SDValue Arg : drop_begin(RangeOrContainer&: OutVals, N: NumFixedArgs)) {
1448 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1449 "ArgLocs should remain in order and only hold varargs args");
1450 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1451 FINode = DAG.getFrameIndex(FI, VT: getPointerTy(DL: Layout));
1452 SDValue Add = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: FINode,
1453 N2: DAG.getConstant(Val: Offset, DL, VT: PtrVT));
1454 Chains.push_back(
1455 Elt: DAG.getStore(Chain, dl: DL, Val: Arg, Ptr: Add,
1456 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1457 }
1458 if (!Chains.empty())
1459 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains);
1460 } else if (IsVarArg) {
1461 FINode = DAG.getIntPtrConstant(Val: 0, DL);
1462 }
1463
1464 if (Callee->getOpcode() == ISD::GlobalAddress) {
1465 // If the callee is a GlobalAddress node (quite common, every direct call
1466 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1467 // doesn't at MO_GOT which is not needed for direct calls.
1468 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Callee);
1469 Callee = DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL,
1470 VT: getPointerTy(DL: DAG.getDataLayout()),
1471 offset: GA->getOffset());
1472 Callee = DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL,
1473 VT: getPointerTy(DL: DAG.getDataLayout()), Operand: Callee);
1474 }
1475
1476 // Compute the operands for the CALLn node.
1477 SmallVector<SDValue, 16> Ops;
1478 Ops.push_back(Elt: Chain);
1479 Ops.push_back(Elt: Callee);
1480
1481 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1482 // isn't reliable.
1483 Ops.append(in_start: OutVals.begin(),
1484 in_end: IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1485 // Add a pointer to the vararg buffer.
1486 if (IsVarArg)
1487 Ops.push_back(Elt: FINode);
1488
1489 SmallVector<EVT, 8> InTys;
1490 for (const auto &In : Ins) {
1491 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1492 assert(!In.Flags.isNest() && "nest is not valid for return values");
1493 if (In.Flags.isInAlloca())
1494 fail(DL, DAG, Msg: "WebAssembly hasn't implemented inalloca return values");
1495 if (In.Flags.isInConsecutiveRegs())
1496 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs return values");
1497 if (In.Flags.isInConsecutiveRegsLast())
1498 fail(DL, DAG,
1499 Msg: "WebAssembly hasn't implemented cons regs last return values");
1500 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1501 // registers.
1502 InTys.push_back(Elt: In.VT);
1503 }
1504
1505 // Lastly, if this is a call to a funcref we need to add an instruction
1506 // table.set to the chain and transform the call.
1507 if (CLI.CB && WebAssembly::isWebAssemblyFuncrefType(
1508 Ty: CLI.CB->getCalledOperand()->getType())) {
1509 // In the absence of function references proposal where a funcref call is
1510 // lowered to call_ref, using reference types we generate a table.set to set
1511 // the funcref to a special table used solely for this purpose, followed by
1512 // a call_indirect. Here we just generate the table set, and return the
1513 // SDValue of the table.set so that LowerCall can finalize the lowering by
1514 // generating the call_indirect.
1515 SDValue Chain = Ops[0];
1516
1517 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1518 Ctx&: MF.getContext(), Subtarget);
1519 SDValue Sym = DAG.getMCSymbol(Sym: Table, VT: PtrVT);
1520 SDValue TableSlot = DAG.getConstant(Val: 0, DL, VT: MVT::i32);
1521 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1522 SDValue TableSet = DAG.getMemIntrinsicNode(
1523 Opcode: WebAssemblyISD::TABLE_SET, dl: DL, VTList: DAG.getVTList(VT: MVT::Other), Ops: TableSetOps,
1524 MemVT: MVT::funcref,
1525 // Machine Mem Operand args
1526 PtrInfo: MachinePointerInfo(
1527 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
1528 Alignment: CLI.CB->getCalledOperand()->getPointerAlignment(DL: DAG.getDataLayout()),
1529 Flags: MachineMemOperand::MOStore);
1530
1531 Ops[0] = TableSet; // The new chain is the TableSet itself
1532 }
1533
1534 if (CLI.IsTailCall) {
1535 // ret_calls do not return values to the current frame
1536 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
1537 return DAG.getNode(Opcode: WebAssemblyISD::RET_CALL, DL, VTList: NodeTys, Ops);
1538 }
1539
1540 InTys.push_back(Elt: MVT::Other);
1541 SDVTList InTyList = DAG.getVTList(VTs: InTys);
1542 SDValue Res = DAG.getNode(Opcode: WebAssemblyISD::CALL, DL, VTList: InTyList, Ops);
1543
1544 for (size_t I = 0; I < Ins.size(); ++I)
1545 InVals.push_back(Elt: Res.getValue(R: I));
1546
1547 // Return the chain
1548 return Res.getValue(R: Ins.size());
1549}
1550
1551bool WebAssemblyTargetLowering::CanLowerReturn(
1552 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1553 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext & /*Context*/,
1554 const Type *RetTy) const {
1555 // WebAssembly can only handle returning tuples with multivalue enabled
1556 return WebAssembly::canLowerReturn(ResultSize: Outs.size(), Subtarget);
1557}
1558
1559SDValue WebAssemblyTargetLowering::LowerReturn(
1560 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1561 const SmallVectorImpl<ISD::OutputArg> &Outs,
1562 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1563 SelectionDAG &DAG) const {
1564 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) &&
1565 "MVP WebAssembly can only return up to one value");
1566 if (!callingConvSupported(CallConv))
1567 fail(DL, DAG, Msg: "WebAssembly doesn't support non-C calling conventions");
1568
1569 SmallVector<SDValue, 4> RetOps(1, Chain);
1570 RetOps.append(in_start: OutVals.begin(), in_end: OutVals.end());
1571 Chain = DAG.getNode(Opcode: WebAssemblyISD::RETURN, DL, VT: MVT::Other, Ops: RetOps);
1572
1573 // Record the number and types of the return values.
1574 for (const ISD::OutputArg &Out : Outs) {
1575 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1576 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1577 assert(!Out.Flags.isVarArg() && "non-fixed return value is not valid");
1578 if (Out.Flags.isInAlloca())
1579 fail(DL, DAG, Msg: "WebAssembly hasn't implemented inalloca results");
1580 if (Out.Flags.isInConsecutiveRegs())
1581 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs results");
1582 if (Out.Flags.isInConsecutiveRegsLast())
1583 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs last results");
1584 }
1585
1586 return Chain;
1587}
1588
1589SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1590 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1591 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1592 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1593 if (!callingConvSupported(CallConv))
1594 fail(DL, DAG, Msg: "WebAssembly doesn't support non-C calling conventions");
1595
1596 MachineFunction &MF = DAG.getMachineFunction();
1597 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1598
1599 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1600 // of the incoming values before they're represented by virtual registers.
1601 MF.getRegInfo().addLiveIn(Reg: WebAssembly::ARGUMENTS);
1602
1603 bool HasSwiftErrorArg = false;
1604 bool HasSwiftSelfArg = false;
1605 for (const ISD::InputArg &In : Ins) {
1606 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1607 HasSwiftErrorArg |= In.Flags.isSwiftError();
1608 if (In.Flags.isInAlloca())
1609 fail(DL, DAG, Msg: "WebAssembly hasn't implemented inalloca arguments");
1610 if (In.Flags.isNest())
1611 fail(DL, DAG, Msg: "WebAssembly hasn't implemented nest arguments");
1612 if (In.Flags.isInConsecutiveRegs())
1613 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs arguments");
1614 if (In.Flags.isInConsecutiveRegsLast())
1615 fail(DL, DAG, Msg: "WebAssembly hasn't implemented cons regs last arguments");
1616 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1617 // registers.
1618 InVals.push_back(Elt: In.Used ? DAG.getNode(Opcode: WebAssemblyISD::ARGUMENT, DL, VT: In.VT,
1619 Operand: DAG.getTargetConstant(Val: InVals.size(),
1620 DL, VT: MVT::i32))
1621 : DAG.getUNDEF(VT: In.VT));
1622
1623 // Record the number and types of arguments.
1624 MFI->addParam(VT: In.VT);
1625 }
1626
1627 // For swiftcc, emit additional swiftself and swifterror arguments
1628 // if there aren't. These additional arguments are also added for callee
1629 // signature They are necessary to match callee and caller signature for
1630 // indirect call.
1631 auto PtrVT = getPointerTy(DL: MF.getDataLayout());
1632 if (CallConv == CallingConv::Swift) {
1633 if (!HasSwiftSelfArg) {
1634 MFI->addParam(VT: PtrVT);
1635 }
1636 if (!HasSwiftErrorArg) {
1637 MFI->addParam(VT: PtrVT);
1638 }
1639 }
1640 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1641 // the buffer is passed as an argument.
1642 if (IsVarArg) {
1643 MVT PtrVT = getPointerTy(DL: MF.getDataLayout());
1644 Register VarargVreg =
1645 MF.getRegInfo().createVirtualRegister(RegClass: getRegClassFor(VT: PtrVT));
1646 MFI->setVarargBufferVreg(VarargVreg);
1647 Chain = DAG.getCopyToReg(
1648 Chain, dl: DL, Reg: VarargVreg,
1649 N: DAG.getNode(Opcode: WebAssemblyISD::ARGUMENT, DL, VT: PtrVT,
1650 Operand: DAG.getTargetConstant(Val: Ins.size(), DL, VT: MVT::i32)));
1651 MFI->addParam(VT: PtrVT);
1652 }
1653
1654 // Record the number and types of arguments and results.
1655 SmallVector<MVT, 4> Params;
1656 SmallVector<MVT, 4> Results;
1657 computeSignatureVTs(Ty: MF.getFunction().getFunctionType(), TargetFunc: &MF.getFunction(),
1658 ContextFunc: MF.getFunction(), TM: DAG.getTarget(), Params, Results);
1659 for (MVT VT : Results)
1660 MFI->addResult(VT);
1661 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1662 // the param logic here with ComputeSignatureVTs
1663 assert(MFI->getParams().size() == Params.size() &&
1664 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1665 Params.begin()));
1666
1667 return Chain;
1668}
1669
1670void WebAssemblyTargetLowering::ReplaceNodeResults(
1671 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1672 switch (N->getOpcode()) {
1673 case ISD::SIGN_EXTEND_INREG:
1674 // Do not add any results, signifying that N should not be custom lowered
1675 // after all. This happens because simd128 turns on custom lowering for
1676 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1677 // illegal type.
1678 break;
1679 case ISD::ANY_EXTEND_VECTOR_INREG:
1680 case ISD::SIGN_EXTEND_VECTOR_INREG:
1681 case ISD::ZERO_EXTEND_VECTOR_INREG:
1682 // Do not add any results, signifying that N should not be custom lowered.
1683 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1684 break;
1685 case ISD::ADD:
1686 case ISD::SUB:
1687 Results.push_back(Elt: Replace128Op(N, DAG));
1688 break;
1689 default:
1690 llvm_unreachable(
1691 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1692 }
1693}
1694
1695//===----------------------------------------------------------------------===//
1696// Custom lowering hooks.
1697//===----------------------------------------------------------------------===//
1698
1699SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1700 SelectionDAG &DAG) const {
1701 SDLoc DL(Op);
1702 switch (Op.getOpcode()) {
1703 default:
1704 llvm_unreachable("unimplemented operation lowering");
1705 return SDValue();
1706 case ISD::FrameIndex:
1707 return LowerFrameIndex(Op, DAG);
1708 case ISD::GlobalAddress:
1709 return LowerGlobalAddress(Op, DAG);
1710 case ISD::GlobalTLSAddress:
1711 return LowerGlobalTLSAddress(Op, DAG);
1712 case ISD::ExternalSymbol:
1713 return LowerExternalSymbol(Op, DAG);
1714 case ISD::JumpTable:
1715 return LowerJumpTable(Op, DAG);
1716 case ISD::BR_JT:
1717 return LowerBR_JT(Op, DAG);
1718 case ISD::VASTART:
1719 return LowerVASTART(Op, DAG);
1720 case ISD::BlockAddress:
1721 case ISD::BRIND:
1722 fail(DL, DAG, Msg: "WebAssembly hasn't implemented computed gotos");
1723 return SDValue();
1724 case ISD::RETURNADDR:
1725 return LowerRETURNADDR(Op, DAG);
1726 case ISD::FRAMEADDR:
1727 return LowerFRAMEADDR(Op, DAG);
1728 case ISD::CopyToReg:
1729 return LowerCopyToReg(Op, DAG);
1730 case ISD::EXTRACT_VECTOR_ELT:
1731 case ISD::INSERT_VECTOR_ELT:
1732 return LowerAccessVectorElement(Op, DAG);
1733 case ISD::INTRINSIC_VOID:
1734 case ISD::INTRINSIC_WO_CHAIN:
1735 case ISD::INTRINSIC_W_CHAIN:
1736 return LowerIntrinsic(Op, DAG);
1737 case ISD::SIGN_EXTEND_INREG:
1738 return LowerSIGN_EXTEND_INREG(Op, DAG);
1739 case ISD::ZERO_EXTEND_VECTOR_INREG:
1740 case ISD::SIGN_EXTEND_VECTOR_INREG:
1741 case ISD::ANY_EXTEND_VECTOR_INREG:
1742 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1743 case ISD::BUILD_VECTOR:
1744 return LowerBUILD_VECTOR(Op, DAG);
1745 case ISD::VECTOR_SHUFFLE:
1746 return LowerVECTOR_SHUFFLE(Op, DAG);
1747 case ISD::SETCC:
1748 return LowerSETCC(Op, DAG);
1749 case ISD::SHL:
1750 case ISD::SRA:
1751 case ISD::SRL:
1752 return LowerShift(Op, DAG);
1753 case ISD::FP_TO_SINT_SAT:
1754 case ISD::FP_TO_UINT_SAT:
1755 return LowerFP_TO_INT_SAT(Op, DAG);
1756 case ISD::FMINNUM:
1757 case ISD::FMINIMUMNUM:
1758 return LowerFMIN(Op, DAG);
1759 case ISD::FMAXNUM:
1760 case ISD::FMAXIMUMNUM:
1761 return LowerFMAX(Op, DAG);
1762 case ISD::LOAD:
1763 return LowerLoad(Op, DAG);
1764 case ISD::STORE:
1765 return LowerStore(Op, DAG);
1766 case ISD::CTPOP:
1767 case ISD::CTLZ:
1768 case ISD::CTTZ:
1769 return DAG.UnrollVectorOp(N: Op.getNode());
1770 case ISD::CLEAR_CACHE:
1771 report_fatal_error(reason: "llvm.clear_cache is not supported on wasm");
1772 case ISD::SMUL_LOHI:
1773 case ISD::UMUL_LOHI:
1774 return LowerMUL_LOHI(Op, DAG);
1775 case ISD::UADDO:
1776 return LowerUADDO(Op, DAG);
1777 }
1778}
1779
1780static bool IsWebAssemblyGlobal(SDValue Op) {
1781 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val&: Op))
1782 return WebAssembly::isWasmVarAddressSpace(AS: GA->getAddressSpace());
1783
1784 return false;
1785}
1786
1787static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1788 SelectionDAG &DAG) {
1789 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Val&: Op);
1790 if (!FI)
1791 return std::nullopt;
1792
1793 auto &MF = DAG.getMachineFunction();
1794 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FrameIndex: FI->getIndex());
1795}
1796
1797SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1798 SelectionDAG &DAG) const {
1799 SDLoc DL(Op);
1800 StoreSDNode *SN = cast<StoreSDNode>(Val: Op.getNode());
1801 const SDValue &Value = SN->getValue();
1802 const SDValue &Base = SN->getBasePtr();
1803 const SDValue &Offset = SN->getOffset();
1804
1805 if (IsWebAssemblyGlobal(Op: Base)) {
1806 if (!Offset->isUndef())
1807 report_fatal_error(reason: "unexpected offset when storing to webassembly global",
1808 gen_crash_diag: false);
1809
1810 SDVTList Tys = DAG.getVTList(VT: MVT::Other);
1811 SDValue Ops[] = {SN->getChain(), Value, Base};
1812 return DAG.getMemIntrinsicNode(Opcode: WebAssemblyISD::GLOBAL_SET, dl: DL, VTList: Tys, Ops,
1813 MemVT: SN->getMemoryVT(), MMO: SN->getMemOperand());
1814 }
1815
1816 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Op: Base, DAG)) {
1817 if (!Offset->isUndef())
1818 report_fatal_error(reason: "unexpected offset when storing to webassembly local",
1819 gen_crash_diag: false);
1820
1821 SDValue Idx = DAG.getTargetConstant(Val: *Local, DL: Base, VT: MVT::i32);
1822 SDVTList Tys = DAG.getVTList(VT: MVT::Other); // The chain.
1823 SDValue Ops[] = {SN->getChain(), Idx, Value};
1824 return DAG.getNode(Opcode: WebAssemblyISD::LOCAL_SET, DL, VTList: Tys, Ops);
1825 }
1826
1827 if (WebAssembly::isWasmVarAddressSpace(AS: SN->getAddressSpace()))
1828 report_fatal_error(
1829 reason: "Encountered an unlowerable store to the wasm_var address space",
1830 gen_crash_diag: false);
1831
1832 return Op;
1833}
1834
1835SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1836 SelectionDAG &DAG) const {
1837 SDLoc DL(Op);
1838 LoadSDNode *LN = cast<LoadSDNode>(Val: Op.getNode());
1839 const SDValue &Base = LN->getBasePtr();
1840 const SDValue &Offset = LN->getOffset();
1841
1842 if (IsWebAssemblyGlobal(Op: Base)) {
1843 if (!Offset->isUndef())
1844 report_fatal_error(
1845 reason: "unexpected offset when loading from webassembly global", gen_crash_diag: false);
1846
1847 SDVTList Tys = DAG.getVTList(VT1: LN->getValueType(ResNo: 0), VT2: MVT::Other);
1848 SDValue Ops[] = {LN->getChain(), Base};
1849 return DAG.getMemIntrinsicNode(Opcode: WebAssemblyISD::GLOBAL_GET, dl: DL, VTList: Tys, Ops,
1850 MemVT: LN->getMemoryVT(), MMO: LN->getMemOperand());
1851 }
1852
1853 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Op: Base, DAG)) {
1854 if (!Offset->isUndef())
1855 report_fatal_error(
1856 reason: "unexpected offset when loading from webassembly local", gen_crash_diag: false);
1857
1858 SDValue Idx = DAG.getTargetConstant(Val: *Local, DL: Base, VT: MVT::i32);
1859 EVT LocalVT = LN->getValueType(ResNo: 0);
1860 return DAG.getNode(Opcode: WebAssemblyISD::LOCAL_GET, DL, ResultTys: {LocalVT, MVT::Other},
1861 Ops: {LN->getChain(), Idx});
1862 }
1863
1864 if (WebAssembly::isWasmVarAddressSpace(AS: LN->getAddressSpace()))
1865 report_fatal_error(
1866 reason: "Encountered an unlowerable load from the wasm_var address space",
1867 gen_crash_diag: false);
1868
1869 return Op;
1870}
1871
1872SDValue WebAssemblyTargetLowering::LowerMUL_LOHI(SDValue Op,
1873 SelectionDAG &DAG) const {
1874 assert(Subtarget->hasWideArithmetic());
1875 assert(Op.getValueType() == MVT::i64);
1876 SDLoc DL(Op);
1877 unsigned Opcode;
1878 switch (Op.getOpcode()) {
1879 case ISD::UMUL_LOHI:
1880 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1881 break;
1882 case ISD::SMUL_LOHI:
1883 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1884 break;
1885 default:
1886 llvm_unreachable("unexpected opcode");
1887 }
1888 SDValue LHS = Op.getOperand(i: 0);
1889 SDValue RHS = Op.getOperand(i: 1);
1890 SDValue Lo =
1891 DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: MVT::i64, VT2: MVT::i64), N1: LHS, N2: RHS);
1892 SDValue Hi(Lo.getNode(), 1);
1893 SDValue Ops[] = {Lo, Hi};
1894 return DAG.getMergeValues(Ops, dl: DL);
1895}
1896
1897// Lowers `UADDO` intrinsics to an `i64.add128` instruction when it's enabled.
1898//
1899// This enables generating a single wasm instruction for this operation where
1900// the upper half of both operands are constant zeros. The upper half of the
1901// result is then whether the overflow happened.
1902SDValue WebAssemblyTargetLowering::LowerUADDO(SDValue Op,
1903 SelectionDAG &DAG) const {
1904 assert(Subtarget->hasWideArithmetic());
1905 assert(Op.getValueType() == MVT::i64);
1906 assert(Op.getOpcode() == ISD::UADDO);
1907 SDLoc DL(Op);
1908 SDValue LHS = Op.getOperand(i: 0);
1909 SDValue RHS = Op.getOperand(i: 1);
1910 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i64);
1911 SDValue Result =
1912 DAG.getNode(Opcode: WebAssemblyISD::I64_ADD128, DL,
1913 VTList: DAG.getVTList(VT1: MVT::i64, VT2: MVT::i64), N1: LHS, N2: Zero, N3: RHS, N4: Zero);
1914 SDValue CarryI64(Result.getNode(), 1);
1915 SDValue CarryI32 = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: CarryI64);
1916 SDValue Ops[] = {Result, CarryI32};
1917 return DAG.getMergeValues(Ops, dl: DL);
1918}
1919
1920SDValue WebAssemblyTargetLowering::Replace128Op(SDNode *N,
1921 SelectionDAG &DAG) const {
1922 assert(Subtarget->hasWideArithmetic());
1923 assert(N->getValueType(0) == MVT::i128);
1924 SDLoc DL(N);
1925 unsigned Opcode;
1926 switch (N->getOpcode()) {
1927 case ISD::ADD:
1928 Opcode = WebAssemblyISD::I64_ADD128;
1929 break;
1930 case ISD::SUB:
1931 Opcode = WebAssemblyISD::I64_SUB128;
1932 break;
1933 default:
1934 llvm_unreachable("unexpected opcode");
1935 }
1936 SDValue LHS = N->getOperand(Num: 0);
1937 SDValue RHS = N->getOperand(Num: 1);
1938
1939 SDValue C0 = DAG.getConstant(Val: 0, DL, VT: MVT::i64);
1940 SDValue C1 = DAG.getConstant(Val: 1, DL, VT: MVT::i64);
1941 SDValue LHS_0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i64, N1: LHS, N2: C0);
1942 SDValue LHS_1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i64, N1: LHS, N2: C1);
1943 SDValue RHS_0 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i64, N1: RHS, N2: C0);
1944 SDValue RHS_1 = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i64, N1: RHS, N2: C1);
1945 SDValue Result_LO = DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: MVT::i64, VT2: MVT::i64),
1946 N1: LHS_0, N2: LHS_1, N3: RHS_0, N4: RHS_1);
1947 SDValue Result_HI(Result_LO.getNode(), 1);
1948 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VTList: N->getVTList(), N1: Result_LO, N2: Result_HI);
1949}
1950
1951SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1952 SelectionDAG &DAG) const {
1953 SDValue Src = Op.getOperand(i: 2);
1954 if (isa<FrameIndexSDNode>(Val: Src.getNode())) {
1955 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1956 // the FI to some LEA-like instruction, but since we don't have that, we
1957 // need to insert some kind of instruction that can take an FI operand and
1958 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1959 // local.copy between Op and its FI operand.
1960 SDValue Chain = Op.getOperand(i: 0);
1961 SDLoc DL(Op);
1962 Register Reg = cast<RegisterSDNode>(Val: Op.getOperand(i: 1))->getReg();
1963 EVT VT = Src.getValueType();
1964 SDValue Copy(DAG.getMachineNode(Opcode: VT == MVT::i32 ? WebAssembly::COPY_I32
1965 : WebAssembly::COPY_I64,
1966 dl: DL, VT, Op1: Src),
1967 0);
1968 return Op.getNode()->getNumValues() == 1
1969 ? DAG.getCopyToReg(Chain, dl: DL, Reg, N: Copy)
1970 : DAG.getCopyToReg(Chain, dl: DL, Reg, N: Copy,
1971 Glue: Op.getNumOperands() == 4 ? Op.getOperand(i: 3)
1972 : SDValue());
1973 }
1974 return SDValue();
1975}
1976
1977SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1978 SelectionDAG &DAG) const {
1979 int FI = cast<FrameIndexSDNode>(Val&: Op)->getIndex();
1980 return DAG.getTargetFrameIndex(FI, VT: Op.getValueType());
1981}
1982
1983SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1984 SelectionDAG &DAG) const {
1985 SDLoc DL(Op);
1986
1987 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1988 fail(DL, DAG,
1989 Msg: "Non-Emscripten WebAssembly hasn't implemented "
1990 "__builtin_return_address");
1991 return SDValue();
1992 }
1993
1994 unsigned Depth = Op.getConstantOperandVal(i: 0);
1995 MakeLibCallOptions CallOptions;
1996 return makeLibCall(DAG, LC: RTLIB::RETURN_ADDRESS, RetVT: Op.getValueType(),
1997 Ops: {DAG.getConstant(Val: Depth, DL, VT: MVT::i32)}, CallOptions, dl: DL)
1998 .first;
1999}
2000
2001SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
2002 SelectionDAG &DAG) const {
2003 // Non-zero depths are not supported by WebAssembly currently. Use the
2004 // legalizer's default expansion, which is to return 0 (what this function is
2005 // documented to do).
2006 if (Op.getConstantOperandVal(i: 0) > 0)
2007 return SDValue();
2008
2009 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
2010 EVT VT = Op.getValueType();
2011 Register FP =
2012 Subtarget->getRegisterInfo()->getFrameRegister(MF: DAG.getMachineFunction());
2013 return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: SDLoc(Op), Reg: FP, VT);
2014}
2015
2016SDValue
2017WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2018 SelectionDAG &DAG) const {
2019 SDLoc DL(Op);
2020 const auto *GA = cast<GlobalAddressSDNode>(Val&: Op);
2021
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
2024 report_fatal_error(reason: "cannot use thread-local storage without bulk memory",
2025 gen_crash_diag: false);
2026
2027 const GlobalValue *GV = GA->getGlobal();
2028
2029 // Currently only Emscripten supports dynamic linking with threads. Therefore,
2030 // on other targets, if we have thread-local storage, only the local-exec
2031 // model is possible.
2032 auto model = Subtarget->getTargetTriple().isOSEmscripten()
2033 ? GV->getThreadLocalMode()
2034 : GlobalValue::LocalExecTLSModel;
2035
2036 // Unsupported TLS modes
2037 assert(model != GlobalValue::NotThreadLocal);
2038 assert(model != GlobalValue::InitialExecTLSModel);
2039
2040 if (model == GlobalValue::LocalExecTLSModel ||
2041 model == GlobalValue::LocalDynamicTLSModel ||
2042 (model == GlobalValue::GeneralDynamicTLSModel &&
2043 getTargetMachine().shouldAssumeDSOLocal(GV))) {
2044 // For DSO-local TLS variables we use offset from __tls_base
2045
2046 MVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
2047 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2048 : WebAssembly::GLOBAL_GET_I32;
2049 const char *BaseName = MF.createExternalSymbolName(Name: "__tls_base");
2050
2051 SDValue BaseAddr(
2052 DAG.getMachineNode(Opcode: GlobalGet, dl: DL, VT: PtrVT,
2053 Op1: DAG.getTargetExternalSymbol(Sym: BaseName, VT: PtrVT)),
2054 0);
2055
2056 SDValue TLSOffset = DAG.getTargetGlobalAddress(
2057 GV, DL, VT: PtrVT, offset: GA->getOffset(), TargetFlags: WebAssemblyII::MO_TLS_BASE_REL);
2058 SDValue SymOffset =
2059 DAG.getNode(Opcode: WebAssemblyISD::WrapperREL, DL, VT: PtrVT, Operand: TLSOffset);
2060
2061 return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: BaseAddr, N2: SymOffset);
2062 }
2063
2064 assert(model == GlobalValue::GeneralDynamicTLSModel);
2065
2066 EVT VT = Op.getValueType();
2067 return DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT,
2068 Operand: DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL, VT,
2069 offset: GA->getOffset(),
2070 TargetFlags: WebAssemblyII::MO_GOT_TLS));
2071}
2072
2073SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
2074 SelectionDAG &DAG) const {
2075 SDLoc DL(Op);
2076 const auto *GA = cast<GlobalAddressSDNode>(Val&: Op);
2077 EVT VT = Op.getValueType();
2078 assert(GA->getTargetFlags() == 0 &&
2079 "Unexpected target flags on generic GlobalAddressSDNode");
2080 if (!WebAssembly::isValidAddressSpace(AS: GA->getAddressSpace()))
2081 fail(DL, DAG, Msg: "Invalid address space for WebAssembly target");
2082
2083 unsigned OperandFlags = 0;
2084 const GlobalValue *GV = GA->getGlobal();
2085 // Since WebAssembly tables cannot yet be shared accross modules, we don't
2086 // need special treatment for tables in PIC mode.
2087 if (isPositionIndependent() &&
2088 !WebAssembly::isWebAssemblyTableType(Ty: GV->getValueType())) {
2089 if (getTargetMachine().shouldAssumeDSOLocal(GV)) {
2090 MachineFunction &MF = DAG.getMachineFunction();
2091 MVT PtrVT = getPointerTy(DL: MF.getDataLayout());
2092 const char *BaseName;
2093 if (GV->getValueType()->isFunctionTy()) {
2094 BaseName = MF.createExternalSymbolName(Name: "__table_base");
2095 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
2096 } else {
2097 BaseName = MF.createExternalSymbolName(Name: "__memory_base");
2098 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
2099 }
2100 SDValue BaseAddr =
2101 DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT: PtrVT,
2102 Operand: DAG.getTargetExternalSymbol(Sym: BaseName, VT: PtrVT));
2103
2104 SDValue SymAddr = DAG.getNode(
2105 Opcode: WebAssemblyISD::WrapperREL, DL, VT,
2106 Operand: DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL, VT, offset: GA->getOffset(),
2107 TargetFlags: OperandFlags));
2108
2109 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: BaseAddr, N2: SymAddr);
2110 }
2111 OperandFlags = WebAssemblyII::MO_GOT;
2112 }
2113
2114 return DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT,
2115 Operand: DAG.getTargetGlobalAddress(GV: GA->getGlobal(), DL, VT,
2116 offset: GA->getOffset(), TargetFlags: OperandFlags));
2117}
2118
2119SDValue
2120WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
2121 SelectionDAG &DAG) const {
2122 SDLoc DL(Op);
2123 const auto *ES = cast<ExternalSymbolSDNode>(Val&: Op);
2124 EVT VT = Op.getValueType();
2125 assert(ES->getTargetFlags() == 0 &&
2126 "Unexpected target flags on generic ExternalSymbolSDNode");
2127 return DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT,
2128 Operand: DAG.getTargetExternalSymbol(Sym: ES->getSymbol(), VT));
2129}
2130
2131SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
2132 SelectionDAG &DAG) const {
2133 // There's no need for a Wrapper node because we always incorporate a jump
2134 // table operand into a BR_TABLE instruction, rather than ever
2135 // materializing it in a register.
2136 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Op);
2137 return DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: Op.getValueType(),
2138 TargetFlags: JT->getTargetFlags());
2139}
2140
2141SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
2142 SelectionDAG &DAG) const {
2143 SDLoc DL(Op);
2144 SDValue Chain = Op.getOperand(i: 0);
2145 const auto *JT = cast<JumpTableSDNode>(Val: Op.getOperand(i: 1));
2146 SDValue Index = Op.getOperand(i: 2);
2147 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
2148
2149 SmallVector<SDValue, 8> Ops;
2150 Ops.push_back(Elt: Chain);
2151 Ops.push_back(Elt: Index);
2152
2153 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
2154 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
2155
2156 // Add an operand for each case.
2157 for (auto *MBB : MBBs)
2158 Ops.push_back(Elt: DAG.getBasicBlock(MBB));
2159
2160 // Add the first MBB as a dummy default target for now. This will be replaced
2161 // with the proper default target (and the preceding range check eliminated)
2162 // if possible by WebAssemblyFixBrTableDefaults.
2163 Ops.push_back(Elt: DAG.getBasicBlock(MBB: *MBBs.begin()));
2164 return DAG.getNode(Opcode: WebAssemblyISD::BR_TABLE, DL, VT: MVT::Other, Ops);
2165}
2166
2167SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
2168 SelectionDAG &DAG) const {
2169 SDLoc DL(Op);
2170 EVT PtrVT = getPointerTy(DL: DAG.getMachineFunction().getDataLayout());
2171
2172 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
2173 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
2174
2175 SDValue ArgN = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: DL,
2176 Reg: MFI->getVarargBufferVreg(), VT: PtrVT);
2177 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, Val: ArgN, Ptr: Op.getOperand(i: 1),
2178 PtrInfo: MachinePointerInfo(SV));
2179}
2180
2181SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
2182 SelectionDAG &DAG) const {
2183 MachineFunction &MF = DAG.getMachineFunction();
2184 unsigned IntNo;
2185 switch (Op.getOpcode()) {
2186 case ISD::INTRINSIC_VOID:
2187 case ISD::INTRINSIC_W_CHAIN:
2188 IntNo = Op.getConstantOperandVal(i: 1);
2189 break;
2190 case ISD::INTRINSIC_WO_CHAIN:
2191 IntNo = Op.getConstantOperandVal(i: 0);
2192 break;
2193 default:
2194 llvm_unreachable("Invalid intrinsic");
2195 }
2196 SDLoc DL(Op);
2197
2198 switch (IntNo) {
2199 default:
2200 return SDValue(); // Don't custom lower most intrinsics.
2201
2202 case Intrinsic::wasm_lsda: {
2203 auto PtrVT = getPointerTy(DL: MF.getDataLayout());
2204 const char *SymName = MF.createExternalSymbolName(
2205 Name: "GCC_except_table" + std::to_string(val: MF.getFunctionNumber()));
2206 if (isPositionIndependent()) {
2207 SDValue Node = DAG.getTargetExternalSymbol(
2208 Sym: SymName, VT: PtrVT, TargetFlags: WebAssemblyII::MO_MEMORY_BASE_REL);
2209 const char *BaseName = MF.createExternalSymbolName(Name: "__memory_base");
2210 SDValue BaseAddr =
2211 DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT: PtrVT,
2212 Operand: DAG.getTargetExternalSymbol(Sym: BaseName, VT: PtrVT));
2213 SDValue SymAddr =
2214 DAG.getNode(Opcode: WebAssemblyISD::WrapperREL, DL, VT: PtrVT, Operand: Node);
2215 return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: BaseAddr, N2: SymAddr);
2216 }
2217 SDValue Node = DAG.getTargetExternalSymbol(Sym: SymName, VT: PtrVT);
2218 return DAG.getNode(Opcode: WebAssemblyISD::Wrapper, DL, VT: PtrVT, Operand: Node);
2219 }
2220
2221 case Intrinsic::wasm_shuffle: {
2222 // Drop in-chain and replace undefs, but otherwise pass through unchanged
2223 SDValue Ops[18];
2224 size_t OpIdx = 0;
2225 Ops[OpIdx++] = Op.getOperand(i: 1);
2226 Ops[OpIdx++] = Op.getOperand(i: 2);
2227 while (OpIdx < 18) {
2228 const SDValue &MaskIdx = Op.getOperand(i: OpIdx + 1);
2229 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
2230 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
2231 Ops[OpIdx++] = DAG.getConstant(Val: 0, DL, VT: MVT::i32, isTarget);
2232 } else {
2233 Ops[OpIdx++] = MaskIdx;
2234 }
2235 }
2236 return DAG.getNode(Opcode: WebAssemblyISD::SHUFFLE, DL, VT: Op.getValueType(), Ops);
2237 }
2238
2239 case Intrinsic::thread_pointer: {
2240 MVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
2241 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2242 : WebAssembly::GLOBAL_GET_I32;
2243 const char *TlsBase = MF.createExternalSymbolName(Name: "__tls_base");
2244 return SDValue(
2245 DAG.getMachineNode(Opcode: GlobalGet, dl: DL, VT: PtrVT,
2246 Op1: DAG.getTargetExternalSymbol(Sym: TlsBase, VT: PtrVT)),
2247 0);
2248 }
2249 }
2250}
2251
2252SDValue
2253WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2254 SelectionDAG &DAG) const {
2255 SDLoc DL(Op);
2256 // If sign extension operations are disabled, allow sext_inreg only if operand
2257 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
2258 // extension operations, but allowing sext_inreg in this context lets us have
2259 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
2260 // everywhere would be simpler in this file, but would necessitate large and
2261 // brittle patterns to undo the expansion and select extract_lane_s
2262 // instructions.
2263 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
2264 if (Op.getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2265 return SDValue();
2266
2267 const SDValue &Extract = Op.getOperand(i: 0);
2268 MVT VecT = Extract.getOperand(i: 0).getSimpleValueType();
2269 if (VecT.getVectorElementType().getSizeInBits() > 32)
2270 return SDValue();
2271 MVT ExtractedLaneT =
2272 cast<VTSDNode>(Val: Op.getOperand(i: 1).getNode())->getVT().getSimpleVT();
2273 MVT ExtractedVecT =
2274 MVT::getVectorVT(VT: ExtractedLaneT, NumElements: 128 / ExtractedLaneT.getSizeInBits());
2275 if (ExtractedVecT == VecT)
2276 return Op;
2277
2278 // Bitcast vector to appropriate type to ensure ISel pattern coverage
2279 const SDNode *Index = Extract.getOperand(i: 1).getNode();
2280 if (!isa<ConstantSDNode>(Val: Index))
2281 return SDValue();
2282 unsigned IndexVal = Index->getAsZExtVal();
2283 unsigned Scale =
2284 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
2285 assert(Scale > 1);
2286 SDValue NewIndex =
2287 DAG.getConstant(Val: IndexVal * Scale, DL, VT: Index->getValueType(ResNo: 0));
2288 SDValue NewExtract = DAG.getNode(
2289 Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: Extract.getValueType(),
2290 N1: DAG.getBitcast(VT: ExtractedVecT, V: Extract.getOperand(i: 0)), N2: NewIndex);
2291 return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: Op.getValueType(), N1: NewExtract,
2292 N2: Op.getOperand(i: 1));
2293}
2294
2295static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT,
2296 SelectionDAG &DAG) {
2297 SDValue Source = peekThroughBitcasts(V: Op);
2298 if (Source.getOpcode() != ISD::VECTOR_SHUFFLE)
2299 return SDValue();
2300
2301 assert((UserOpc == WebAssemblyISD::EXTEND_LOW_U ||
2302 UserOpc == WebAssemblyISD::EXTEND_LOW_S) &&
2303 "expected extend_low");
2304 auto *Shuffle = cast<ShuffleVectorSDNode>(Val: Source.getNode());
2305
2306 ArrayRef<int> Mask = Shuffle->getMask();
2307 // Look for a shuffle which moves from the high half to the low half.
2308 size_t FirstIdx = Mask.size() / 2;
2309 for (size_t i = 0; i < Mask.size() / 2; ++i) {
2310 if (Mask[i] != static_cast<int>(FirstIdx + i)) {
2311 return SDValue();
2312 }
2313 }
2314
2315 SDLoc DL(Op);
2316 unsigned Opc = UserOpc == WebAssemblyISD::EXTEND_LOW_S
2317 ? WebAssemblyISD::EXTEND_HIGH_S
2318 : WebAssemblyISD::EXTEND_HIGH_U;
2319 SDValue ShuffleSrc = Shuffle->getOperand(Num: 0);
2320 if (Op.getOpcode() == ISD::BITCAST)
2321 ShuffleSrc = DAG.getBitcast(VT: Op.getValueType(), V: ShuffleSrc);
2322
2323 return DAG.getNode(Opcode: Opc, DL, VT, Operand: ShuffleSrc);
2324}
2325
2326SDValue
2327WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
2328 SelectionDAG &DAG) const {
2329 SDLoc DL(Op);
2330 EVT VT = Op.getValueType();
2331 SDValue Src = Op.getOperand(i: 0);
2332 EVT SrcVT = Src.getValueType();
2333
2334 if (SrcVT.getVectorElementType() == MVT::i1 ||
2335 SrcVT.getVectorElementType() == MVT::i64)
2336 return SDValue();
2337
2338 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
2339 "Unexpected extension factor.");
2340 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
2341
2342 if (Scale != 2 && Scale != 4 && Scale != 8)
2343 return SDValue();
2344
2345 unsigned Ext;
2346 switch (Op.getOpcode()) {
2347 default:
2348 llvm_unreachable("unexpected opcode");
2349 case ISD::ANY_EXTEND_VECTOR_INREG:
2350 case ISD::ZERO_EXTEND_VECTOR_INREG:
2351 Ext = WebAssemblyISD::EXTEND_LOW_U;
2352 break;
2353 case ISD::SIGN_EXTEND_VECTOR_INREG:
2354 Ext = WebAssemblyISD::EXTEND_LOW_S;
2355 break;
2356 }
2357
2358 if (Scale == 2) {
2359 // See if we can use EXTEND_HIGH.
2360 if (auto ExtendHigh = GetExtendHigh(Op: Op.getOperand(i: 0), UserOpc: Ext, VT, DAG))
2361 return ExtendHigh;
2362 }
2363
2364 SDValue Ret = Src;
2365 while (Scale != 1) {
2366 Ret = DAG.getNode(Opcode: Ext, DL,
2367 VT: Ret.getValueType()
2368 .widenIntegerVectorElementType(Context&: *DAG.getContext())
2369 .getHalfNumVectorElementsVT(Context&: *DAG.getContext()),
2370 Operand: Ret);
2371 Scale /= 2;
2372 }
2373 assert(Ret.getValueType() == VT);
2374 return Ret;
2375}
2376
2377static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
2378 SDLoc DL(Op);
2379 if (Op.getValueType() != MVT::v2f64)
2380 return SDValue();
2381
2382 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
2383 unsigned &Index) -> bool {
2384 switch (Op.getOpcode()) {
2385 case ISD::SINT_TO_FP:
2386 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2387 break;
2388 case ISD::UINT_TO_FP:
2389 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2390 break;
2391 case ISD::FP_EXTEND:
2392 Opcode = WebAssemblyISD::PROMOTE_LOW;
2393 break;
2394 default:
2395 return false;
2396 }
2397
2398 auto ExtractVector = Op.getOperand(i: 0);
2399 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2400 return false;
2401
2402 if (!isa<ConstantSDNode>(Val: ExtractVector.getOperand(i: 1).getNode()))
2403 return false;
2404
2405 SrcVec = ExtractVector.getOperand(i: 0);
2406 Index = ExtractVector.getConstantOperandVal(i: 1);
2407 return true;
2408 };
2409
2410 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2411 SDValue LHSSrcVec, RHSSrcVec;
2412 if (!GetConvertedLane(Op.getOperand(i: 0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2413 !GetConvertedLane(Op.getOperand(i: 1), RHSOpcode, RHSSrcVec, RHSIndex))
2414 return SDValue();
2415
2416 if (LHSOpcode != RHSOpcode)
2417 return SDValue();
2418
2419 MVT ExpectedSrcVT;
2420 switch (LHSOpcode) {
2421 case WebAssemblyISD::CONVERT_LOW_S:
2422 case WebAssemblyISD::CONVERT_LOW_U:
2423 ExpectedSrcVT = MVT::v4i32;
2424 break;
2425 case WebAssemblyISD::PROMOTE_LOW:
2426 ExpectedSrcVT = MVT::v4f32;
2427 break;
2428 }
2429 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2430 return SDValue();
2431
2432 auto Src = LHSSrcVec;
2433 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2434 // Shuffle the source vector so that the converted lanes are the low lanes.
2435 Src = DAG.getVectorShuffle(
2436 VT: ExpectedSrcVT, dl: DL, N1: LHSSrcVec, N2: RHSSrcVec,
2437 Mask: {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2438 }
2439 return DAG.getNode(Opcode: LHSOpcode, DL, VT: MVT::v2f64, Operand: Src);
2440}
2441
2442SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2443 SelectionDAG &DAG) const {
2444 MVT VT = Op.getSimpleValueType();
2445 if (VT == MVT::v8f16) {
2446 // BUILD_VECTOR can't handle FP16 operands since Wasm doesn't have a scaler
2447 // FP16 type, so cast them to I16s.
2448 MVT IVT = VT.changeVectorElementType(EltVT: MVT::i16);
2449 SmallVector<SDValue, 8> NewOps;
2450 for (unsigned I = 0, E = Op.getNumOperands(); I < E; ++I)
2451 NewOps.push_back(Elt: DAG.getBitcast(VT: MVT::i16, V: Op.getOperand(i: I)));
2452 SDValue Res = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL: SDLoc(), VT: IVT, Ops: NewOps);
2453 return DAG.getBitcast(VT, V: Res);
2454 }
2455
2456 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2457 return ConvertLow;
2458
2459 SDLoc DL(Op);
2460 const EVT VecT = Op.getValueType();
2461 const EVT LaneT = Op.getOperand(i: 0).getValueType();
2462 const size_t Lanes = Op.getNumOperands();
2463 bool CanSwizzle = VecT == MVT::v16i8;
2464
2465 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2466 // possible number of lanes at once followed by a sequence of replace_lane
2467 // instructions to individually initialize any remaining lanes.
2468
2469 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2470 // swizzled lanes should be given greater weight.
2471
2472 // TODO: Investigate looping rather than always extracting/replacing specific
2473 // lanes to fill gaps.
2474
2475 auto IsConstant = [](const SDValue &V) {
2476 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2477 };
2478
2479 // Returns the source vector and index vector pair if they exist. Checks for:
2480 // (extract_vector_elt
2481 // $src,
2482 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2483 // )
2484 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2485 auto Bail = std::make_pair(x: SDValue(), y: SDValue());
2486 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2487 return Bail;
2488 const SDValue &SwizzleSrc = Lane->getOperand(Num: 0);
2489 const SDValue &IndexExt = Lane->getOperand(Num: 1);
2490 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2491 return Bail;
2492 const SDValue &Index = IndexExt->getOperand(Num: 0);
2493 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2494 return Bail;
2495 const SDValue &SwizzleIndices = Index->getOperand(Num: 0);
2496 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2497 SwizzleIndices.getValueType() != MVT::v16i8 ||
2498 Index->getOperand(Num: 1)->getOpcode() != ISD::Constant ||
2499 Index->getConstantOperandVal(Num: 1) != I)
2500 return Bail;
2501 return std::make_pair(x: SwizzleSrc, y: SwizzleIndices);
2502 };
2503
2504 // If the lane is extracted from another vector at a constant index, return
2505 // that vector. The source vector must not have more lanes than the dest
2506 // because the shufflevector indices are in terms of the destination lanes and
2507 // would not be able to address the smaller individual source lanes.
2508 auto GetShuffleSrc = [&](const SDValue &Lane) {
2509 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2510 return SDValue();
2511 if (!isa<ConstantSDNode>(Val: Lane->getOperand(Num: 1).getNode()))
2512 return SDValue();
2513 if (Lane->getOperand(Num: 0).getValueType().getVectorNumElements() >
2514 VecT.getVectorNumElements())
2515 return SDValue();
2516 return Lane->getOperand(Num: 0);
2517 };
2518
2519 using ValueEntry = std::pair<SDValue, size_t>;
2520 SmallVector<ValueEntry, 16> SplatValueCounts;
2521
2522 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2523 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2524
2525 using ShuffleEntry = std::pair<SDValue, size_t>;
2526 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2527
2528 auto AddCount = [](auto &Counts, const auto &Val) {
2529 auto CountIt =
2530 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2531 if (CountIt == Counts.end()) {
2532 Counts.emplace_back(Val, 1);
2533 } else {
2534 CountIt->second++;
2535 }
2536 };
2537
2538 auto GetMostCommon = [](auto &Counts) {
2539 auto CommonIt = llvm::max_element(Counts, llvm::less_second());
2540 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2541 return *CommonIt;
2542 };
2543
2544 size_t NumConstantLanes = 0;
2545
2546 // Count eligible lanes for each type of vector creation op
2547 for (size_t I = 0; I < Lanes; ++I) {
2548 const SDValue &Lane = Op->getOperand(Num: I);
2549 if (Lane.isUndef())
2550 continue;
2551
2552 AddCount(SplatValueCounts, Lane);
2553
2554 if (IsConstant(Lane))
2555 NumConstantLanes++;
2556 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2557 AddCount(ShuffleCounts, ShuffleSrc);
2558 if (CanSwizzle) {
2559 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2560 if (SwizzleSrcs.first)
2561 AddCount(SwizzleCounts, SwizzleSrcs);
2562 }
2563 }
2564
2565 SDValue SplatValue;
2566 size_t NumSplatLanes;
2567 std::tie(args&: SplatValue, args&: NumSplatLanes) = GetMostCommon(SplatValueCounts);
2568
2569 SDValue SwizzleSrc;
2570 SDValue SwizzleIndices;
2571 size_t NumSwizzleLanes = 0;
2572 if (SwizzleCounts.size())
2573 std::forward_as_tuple(args: std::tie(args&: SwizzleSrc, args&: SwizzleIndices),
2574 args&: NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2575
2576 // Shuffles can draw from up to two vectors, so find the two most common
2577 // sources.
2578 SDValue ShuffleSrc1, ShuffleSrc2;
2579 size_t NumShuffleLanes = 0;
2580 if (ShuffleCounts.size()) {
2581 std::tie(args&: ShuffleSrc1, args&: NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2582 llvm::erase_if(C&: ShuffleCounts,
2583 P: [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2584 }
2585 if (ShuffleCounts.size()) {
2586 size_t AdditionalShuffleLanes;
2587 std::tie(args&: ShuffleSrc2, args&: AdditionalShuffleLanes) =
2588 GetMostCommon(ShuffleCounts);
2589 NumShuffleLanes += AdditionalShuffleLanes;
2590 }
2591
2592 // Predicate returning true if the lane is properly initialized by the
2593 // original instruction
2594 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2595 SDValue Result;
2596 // Prefer swizzles over shuffles over vector consts over splats
2597 if (NumSwizzleLanes >= NumShuffleLanes &&
2598 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2599 Result = DAG.getNode(Opcode: WebAssemblyISD::SWIZZLE, DL, VT: VecT, N1: SwizzleSrc,
2600 N2: SwizzleIndices);
2601 auto Swizzled = std::make_pair(x&: SwizzleSrc, y&: SwizzleIndices);
2602 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2603 return Swizzled == GetSwizzleSrcs(I, Lane);
2604 };
2605 } else if (NumShuffleLanes >= NumConstantLanes &&
2606 NumShuffleLanes >= NumSplatLanes) {
2607 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2608 size_t DestLaneCount = VecT.getVectorNumElements();
2609 size_t Scale1 = 1;
2610 size_t Scale2 = 1;
2611 SDValue Src1 = ShuffleSrc1;
2612 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VT: VecT);
2613 if (Src1.getValueType() != VecT) {
2614 size_t LaneSize =
2615 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2616 assert(LaneSize > DestLaneSize);
2617 Scale1 = LaneSize / DestLaneSize;
2618 Src1 = DAG.getBitcast(VT: VecT, V: Src1);
2619 }
2620 if (Src2.getValueType() != VecT) {
2621 size_t LaneSize =
2622 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2623 assert(LaneSize > DestLaneSize);
2624 Scale2 = LaneSize / DestLaneSize;
2625 Src2 = DAG.getBitcast(VT: VecT, V: Src2);
2626 }
2627
2628 int Mask[16];
2629 assert(DestLaneCount <= 16);
2630 for (size_t I = 0; I < DestLaneCount; ++I) {
2631 const SDValue &Lane = Op->getOperand(Num: I);
2632 SDValue Src = GetShuffleSrc(Lane);
2633 if (Src == ShuffleSrc1) {
2634 Mask[I] = Lane->getConstantOperandVal(Num: 1) * Scale1;
2635 } else if (Src && Src == ShuffleSrc2) {
2636 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(Num: 1) * Scale2;
2637 } else {
2638 Mask[I] = -1;
2639 }
2640 }
2641 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2642 Result = DAG.getVectorShuffle(VT: VecT, dl: DL, N1: Src1, N2: Src2, Mask: MaskRef);
2643 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2644 auto Src = GetShuffleSrc(Lane);
2645 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2646 };
2647 } else if (NumConstantLanes >= NumSplatLanes) {
2648 SmallVector<SDValue, 16> ConstLanes;
2649 for (const SDValue &Lane : Op->op_values()) {
2650 if (IsConstant(Lane)) {
2651 // Values may need to be fixed so that they will sign extend to be
2652 // within the expected range during ISel. Check whether the value is in
2653 // bounds based on the lane bit width and if it is out of bounds, lop
2654 // off the extra bits.
2655 uint64_t LaneBits = 128 / Lanes;
2656 if (auto *Const = dyn_cast<ConstantSDNode>(Val: Lane.getNode())) {
2657 ConstLanes.push_back(Elt: DAG.getConstant(
2658 Val: Const->getAPIntValue().trunc(width: LaneBits).getZExtValue(),
2659 DL: SDLoc(Lane), VT: LaneT));
2660 } else {
2661 ConstLanes.push_back(Elt: Lane);
2662 }
2663 } else if (LaneT.isFloatingPoint()) {
2664 ConstLanes.push_back(Elt: DAG.getConstantFP(Val: 0, DL, VT: LaneT));
2665 } else {
2666 ConstLanes.push_back(Elt: DAG.getConstant(Val: 0, DL, VT: LaneT));
2667 }
2668 }
2669 Result = DAG.getBuildVector(VT: VecT, DL, Ops: ConstLanes);
2670 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2671 return IsConstant(Lane);
2672 };
2673 } else {
2674 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits();
2675 if (NumSplatLanes == 1 && Op->getOperand(Num: 0) == SplatValue &&
2676 (DestLaneSize == 32 || DestLaneSize == 64)) {
2677 // Could be selected to load_zero.
2678 Result = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: VecT, Operand: SplatValue);
2679 } else {
2680 // Use a splat (which might be selected as a load splat)
2681 Result = DAG.getSplatBuildVector(VT: VecT, DL, Op: SplatValue);
2682 }
2683 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2684 return Lane == SplatValue;
2685 };
2686 }
2687
2688 assert(Result);
2689 assert(IsLaneConstructed);
2690
2691 // Add replace_lane instructions for any unhandled values
2692 for (size_t I = 0; I < Lanes; ++I) {
2693 const SDValue &Lane = Op->getOperand(Num: I);
2694 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2695 Result = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL, VT: VecT, N1: Result, N2: Lane,
2696 N3: DAG.getConstant(Val: I, DL, VT: MVT::i32));
2697 }
2698
2699 return Result;
2700}
2701
2702SDValue
2703WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2704 SelectionDAG &DAG) const {
2705 SDLoc DL(Op);
2706 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Val: Op.getNode())->getMask();
2707 MVT VecType = Op.getOperand(i: 0).getSimpleValueType();
2708 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2709 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2710
2711 // Space for two vector args and sixteen mask indices
2712 SDValue Ops[18];
2713 size_t OpIdx = 0;
2714 Ops[OpIdx++] = Op.getOperand(i: 0);
2715 Ops[OpIdx++] = Op.getOperand(i: 1);
2716
2717 // Expand mask indices to byte indices and materialize them as operands
2718 for (int M : Mask) {
2719 for (size_t J = 0; J < LaneBytes; ++J) {
2720 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2721 // whole lane of vector input, to allow further reduction at VM. E.g.
2722 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2723 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2724 Ops[OpIdx++] = DAG.getConstant(Val: ByteIndex, DL, VT: MVT::i32);
2725 }
2726 }
2727
2728 return DAG.getNode(Opcode: WebAssemblyISD::SHUFFLE, DL, VT: Op.getValueType(), Ops);
2729}
2730
2731SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2732 SelectionDAG &DAG) const {
2733 SDLoc DL(Op);
2734 // The legalizer does not know how to expand the unsupported comparison modes
2735 // of i64x2 vectors, so we manually unroll them here.
2736 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2737 SmallVector<SDValue, 2> LHS, RHS;
2738 DAG.ExtractVectorElements(Op: Op->getOperand(Num: 0), Args&: LHS);
2739 DAG.ExtractVectorElements(Op: Op->getOperand(Num: 1), Args&: RHS);
2740 const SDValue &CC = Op->getOperand(Num: 2);
2741 auto MakeLane = [&](unsigned I) {
2742 return DAG.getNode(Opcode: ISD::SELECT_CC, DL, VT: MVT::i64, N1: LHS[I], N2: RHS[I],
2743 N3: DAG.getConstant(Val: uint64_t(-1), DL, VT: MVT::i64),
2744 N4: DAG.getConstant(Val: uint64_t(0), DL, VT: MVT::i64), N5: CC);
2745 };
2746 return DAG.getBuildVector(VT: Op->getValueType(ResNo: 0), DL,
2747 Ops: {MakeLane(0), MakeLane(1)});
2748}
2749
2750SDValue
2751WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2752 SelectionDAG &DAG) const {
2753 // Allow constant lane indices, expand variable lane indices
2754 SDNode *IdxNode = Op.getOperand(i: Op.getNumOperands() - 1).getNode();
2755 if (isa<ConstantSDNode>(Val: IdxNode)) {
2756 // Ensure the index type is i32 to match the tablegen patterns
2757 uint64_t Idx = IdxNode->getAsZExtVal();
2758 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2759 Ops[Op.getNumOperands() - 1] =
2760 DAG.getConstant(Val: Idx, DL: SDLoc(IdxNode), VT: MVT::i32);
2761 return DAG.getNode(Opcode: Op.getOpcode(), DL: SDLoc(Op), VT: Op.getValueType(), Ops);
2762 }
2763 // Perform default expansion
2764 return SDValue();
2765}
2766
2767static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2768 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2769 // 32-bit and 64-bit unrolled shifts will have proper semantics
2770 if (LaneT.bitsGE(VT: MVT::i32))
2771 return DAG.UnrollVectorOp(N: Op.getNode());
2772 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2773 SDLoc DL(Op);
2774 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2775 SDValue Mask = DAG.getConstant(Val: LaneT.getSizeInBits() - 1, DL, VT: MVT::i32);
2776 unsigned ShiftOpcode = Op.getOpcode();
2777 SmallVector<SDValue, 16> ShiftedElements;
2778 DAG.ExtractVectorElements(Op: Op.getOperand(i: 0), Args&: ShiftedElements, Start: 0, Count: 0, EltVT: MVT::i32);
2779 SmallVector<SDValue, 16> ShiftElements;
2780 DAG.ExtractVectorElements(Op: Op.getOperand(i: 1), Args&: ShiftElements, Start: 0, Count: 0, EltVT: MVT::i32);
2781 SmallVector<SDValue, 16> UnrolledOps;
2782 for (size_t i = 0; i < NumLanes; ++i) {
2783 SDValue MaskedShiftValue =
2784 DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: ShiftElements[i], N2: Mask);
2785 SDValue ShiftedValue = ShiftedElements[i];
2786 if (ShiftOpcode == ISD::SRA)
2787 ShiftedValue = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i32,
2788 N1: ShiftedValue, N2: DAG.getValueType(LaneT));
2789 UnrolledOps.push_back(
2790 Elt: DAG.getNode(Opcode: ShiftOpcode, DL, VT: MVT::i32, N1: ShiftedValue, N2: MaskedShiftValue));
2791 }
2792 return DAG.getBuildVector(VT: Op.getValueType(), DL, Ops: UnrolledOps);
2793}
2794
2795SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2796 SelectionDAG &DAG) const {
2797 SDLoc DL(Op);
2798
2799 // Only manually lower vector shifts
2800 assert(Op.getSimpleValueType().isVector());
2801
2802 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2803 auto ShiftVal = Op.getOperand(i: 1);
2804
2805 // Try to skip bitmask operation since it is implied inside shift instruction
2806 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2807 if (MaskOp.getOpcode() != ISD::AND)
2808 return MaskOp;
2809 SDValue LHS = MaskOp.getOperand(i: 0);
2810 SDValue RHS = MaskOp.getOperand(i: 1);
2811 if (MaskOp.getValueType().isVector()) {
2812 APInt MaskVal;
2813 if (!ISD::isConstantSplatVector(N: RHS.getNode(), SplatValue&: MaskVal))
2814 std::swap(a&: LHS, b&: RHS);
2815
2816 if (ISD::isConstantSplatVector(N: RHS.getNode(), SplatValue&: MaskVal) &&
2817 MaskVal == MaskBits)
2818 MaskOp = LHS;
2819 } else {
2820 if (!isa<ConstantSDNode>(Val: RHS.getNode()))
2821 std::swap(a&: LHS, b&: RHS);
2822
2823 auto ConstantRHS = dyn_cast<ConstantSDNode>(Val: RHS.getNode());
2824 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2825 MaskOp = LHS;
2826 }
2827
2828 return MaskOp;
2829 };
2830
2831 // Skip vector and operation
2832 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2833 ShiftVal = DAG.getSplatValue(V: ShiftVal);
2834 if (!ShiftVal)
2835 return unrollVectorShift(Op, DAG);
2836
2837 // Skip scalar and operation
2838 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2839 // Use anyext because none of the high bits can affect the shift
2840 ShiftVal = DAG.getAnyExtOrTrunc(Op: ShiftVal, DL, VT: MVT::i32);
2841
2842 unsigned Opcode;
2843 switch (Op.getOpcode()) {
2844 case ISD::SHL:
2845 Opcode = WebAssemblyISD::VEC_SHL;
2846 break;
2847 case ISD::SRA:
2848 Opcode = WebAssemblyISD::VEC_SHR_S;
2849 break;
2850 case ISD::SRL:
2851 Opcode = WebAssemblyISD::VEC_SHR_U;
2852 break;
2853 default:
2854 llvm_unreachable("unexpected opcode");
2855 }
2856
2857 return DAG.getNode(Opcode, DL, VT: Op.getValueType(), N1: Op.getOperand(i: 0), N2: ShiftVal);
2858}
2859
2860SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2861 SelectionDAG &DAG) const {
2862 EVT ResT = Op.getValueType();
2863 EVT SatVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT();
2864
2865 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2866 (SatVT == MVT::i32 || SatVT == MVT::i64))
2867 return Op;
2868
2869 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2870 return Op;
2871
2872 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2873 return Op;
2874
2875 return SDValue();
2876}
2877
2878static bool HasNoSignedZerosOrNaNs(SDValue Op, SelectionDAG &DAG) {
2879 return (Op->getFlags().hasNoNaNs() ||
2880 (DAG.isKnownNeverNaN(Op: Op->getOperand(Num: 0)) &&
2881 DAG.isKnownNeverNaN(Op: Op->getOperand(Num: 1)))) &&
2882 (Op->getFlags().hasNoSignedZeros() ||
2883 DAG.isKnownNeverZeroFloat(Op: Op->getOperand(Num: 0)) ||
2884 DAG.isKnownNeverZeroFloat(Op: Op->getOperand(Num: 1)));
2885}
2886
2887SDValue WebAssemblyTargetLowering::LowerFMIN(SDValue Op,
2888 SelectionDAG &DAG) const {
2889 if (Subtarget->hasRelaxedSIMD() && HasNoSignedZerosOrNaNs(Op, DAG)) {
2890 return DAG.getNode(Opcode: WebAssemblyISD::RELAXED_FMIN, DL: SDLoc(Op),
2891 VT: Op.getValueType(), N1: Op.getOperand(i: 0), N2: Op.getOperand(i: 1));
2892 }
2893 return SDValue();
2894}
2895
2896SDValue WebAssemblyTargetLowering::LowerFMAX(SDValue Op,
2897 SelectionDAG &DAG) const {
2898 if (Subtarget->hasRelaxedSIMD() && HasNoSignedZerosOrNaNs(Op, DAG)) {
2899 return DAG.getNode(Opcode: WebAssemblyISD::RELAXED_FMAX, DL: SDLoc(Op),
2900 VT: Op.getValueType(), N1: Op.getOperand(i: 0), N2: Op.getOperand(i: 1));
2901 }
2902 return SDValue();
2903}
2904
2905//===----------------------------------------------------------------------===//
2906// Custom DAG combine hooks
2907//===----------------------------------------------------------------------===//
2908static SDValue
2909performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2910 auto &DAG = DCI.DAG;
2911 auto Shuffle = cast<ShuffleVectorSDNode>(Val: N);
2912
2913 // Hoist vector bitcasts that don't change the number of lanes out of unary
2914 // shuffles, where they are less likely to get in the way of other combines.
2915 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2916 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2917 SDValue Bitcast = N->getOperand(Num: 0);
2918 if (Bitcast.getOpcode() != ISD::BITCAST)
2919 return SDValue();
2920 if (!N->getOperand(Num: 1).isUndef())
2921 return SDValue();
2922 SDValue CastOp = Bitcast.getOperand(i: 0);
2923 EVT SrcType = CastOp.getValueType();
2924 EVT DstType = Bitcast.getValueType();
2925 if (!SrcType.is128BitVector() ||
2926 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2927 return SDValue();
2928 SDValue NewShuffle = DAG.getVectorShuffle(
2929 VT: SrcType, dl: SDLoc(N), N1: CastOp, N2: DAG.getUNDEF(VT: SrcType), Mask: Shuffle->getMask());
2930 return DAG.getBitcast(VT: DstType, V: NewShuffle);
2931}
2932
2933/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
2934/// split up into scalar instructions during legalization, and the vector
2935/// extending instructions are selected in performVectorExtendCombine below.
2936static SDValue
2937performVectorExtendToFPCombine(SDNode *N,
2938 TargetLowering::DAGCombinerInfo &DCI) {
2939 auto &DAG = DCI.DAG;
2940 assert(N->getOpcode() == ISD::UINT_TO_FP ||
2941 N->getOpcode() == ISD::SINT_TO_FP);
2942
2943 EVT InVT = N->getOperand(Num: 0)->getValueType(ResNo: 0);
2944 EVT ResVT = N->getValueType(ResNo: 0);
2945 MVT ExtVT;
2946 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2947 ExtVT = MVT::v4i32;
2948 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2949 ExtVT = MVT::v2i32;
2950 else
2951 return SDValue();
2952
2953 unsigned Op =
2954 N->getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
2955 SDValue Conv = DAG.getNode(Opcode: Op, DL: SDLoc(N), VT: ExtVT, Operand: N->getOperand(Num: 0));
2956 return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: ResVT, Operand: Conv);
2957}
2958
2959static SDValue
2960performVectorNonNegToFPCombine(SDNode *N,
2961 TargetLowering::DAGCombinerInfo &DCI) {
2962 auto &DAG = DCI.DAG;
2963
2964 SDNodeFlags Flags = N->getFlags();
2965 SDValue Op0 = N->getOperand(Num: 0);
2966 EVT VT = N->getValueType(ResNo: 0);
2967
2968 // Optimize uitofp to sitofp when the sign bit is known to be zero.
2969 // Depending on the target (runtime) backend, this might be performance
2970 // neutral (e.g. AArch64) or a significant improvement (e.g. x86_64).
2971 if (VT.isVector() && (Flags.hasNonNeg() || DAG.SignBitIsZero(Op: Op0))) {
2972 return DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: SDLoc(N), VT, Operand: Op0);
2973 }
2974
2975 return SDValue();
2976}
2977
2978static SDValue
2979performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2980 auto &DAG = DCI.DAG;
2981 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2982 N->getOpcode() == ISD::ZERO_EXTEND);
2983
2984 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2985 // possible before the extract_subvector can be expanded.
2986 auto Extract = N->getOperand(Num: 0);
2987 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2988 return SDValue();
2989 auto Source = Extract.getOperand(i: 0);
2990 auto *IndexNode = dyn_cast<ConstantSDNode>(Val: Extract.getOperand(i: 1));
2991 if (IndexNode == nullptr)
2992 return SDValue();
2993 auto Index = IndexNode->getZExtValue();
2994
2995 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2996 // extracted subvector is the low or high half of its source.
2997 EVT ResVT = N->getValueType(ResNo: 0);
2998 if (ResVT == MVT::v8i16) {
2999 if (Extract.getValueType() != MVT::v8i8 ||
3000 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
3001 return SDValue();
3002 } else if (ResVT == MVT::v4i32) {
3003 if (Extract.getValueType() != MVT::v4i16 ||
3004 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
3005 return SDValue();
3006 } else if (ResVT == MVT::v2i64) {
3007 if (Extract.getValueType() != MVT::v2i32 ||
3008 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
3009 return SDValue();
3010 } else {
3011 return SDValue();
3012 }
3013
3014 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
3015 bool IsLow = Index == 0;
3016
3017 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
3018 : WebAssemblyISD::EXTEND_HIGH_S)
3019 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
3020 : WebAssemblyISD::EXTEND_HIGH_U);
3021
3022 return DAG.getNode(Opcode: Op, DL: SDLoc(N), VT: ResVT, Operand: Source);
3023}
3024
3025static SDValue
3026performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
3027 auto &DAG = DCI.DAG;
3028
3029 auto GetWasmConversionOp = [](unsigned Op) {
3030 switch (Op) {
3031 case ISD::FP_TO_SINT_SAT:
3032 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
3033 case ISD::FP_TO_UINT_SAT:
3034 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
3035 case ISD::FP_ROUND:
3036 return WebAssemblyISD::DEMOTE_ZERO;
3037 }
3038 llvm_unreachable("unexpected op");
3039 };
3040
3041 auto IsZeroSplat = [](SDValue SplatVal) {
3042 auto *Splat = dyn_cast<BuildVectorSDNode>(Val: SplatVal.getNode());
3043 APInt SplatValue, SplatUndef;
3044 unsigned SplatBitSize;
3045 bool HasAnyUndefs;
3046 // Endianness doesn't matter in this context because we are looking for
3047 // an all-zero value.
3048 return Splat &&
3049 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
3050 HasAnyUndefs) &&
3051 SplatValue == 0;
3052 };
3053
3054 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
3055 // Combine this:
3056 //
3057 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
3058 //
3059 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
3060 //
3061 // Or this:
3062 //
3063 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
3064 //
3065 // into (f32x4.demote_zero_f64x2 $x).
3066 EVT ResVT;
3067 EVT ExpectedConversionType;
3068 auto Conversion = N->getOperand(Num: 0);
3069 auto ConversionOp = Conversion.getOpcode();
3070 switch (ConversionOp) {
3071 case ISD::FP_TO_SINT_SAT:
3072 case ISD::FP_TO_UINT_SAT:
3073 ResVT = MVT::v4i32;
3074 ExpectedConversionType = MVT::v2i32;
3075 break;
3076 case ISD::FP_ROUND:
3077 ResVT = MVT::v4f32;
3078 ExpectedConversionType = MVT::v2f32;
3079 break;
3080 default:
3081 return SDValue();
3082 }
3083
3084 if (N->getValueType(ResNo: 0) != ResVT)
3085 return SDValue();
3086
3087 if (Conversion.getValueType() != ExpectedConversionType)
3088 return SDValue();
3089
3090 auto Source = Conversion.getOperand(i: 0);
3091 if (Source.getValueType() != MVT::v2f64)
3092 return SDValue();
3093
3094 if (!IsZeroSplat(N->getOperand(Num: 1)) ||
3095 N->getOperand(Num: 1).getValueType() != ExpectedConversionType)
3096 return SDValue();
3097
3098 unsigned Op = GetWasmConversionOp(ConversionOp);
3099 return DAG.getNode(Opcode: Op, DL: SDLoc(N), VT: ResVT, Operand: Source);
3100 }
3101
3102 // Combine this:
3103 //
3104 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
3105 //
3106 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
3107 //
3108 // Or this:
3109 //
3110 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
3111 //
3112 // into (f32x4.demote_zero_f64x2 $x).
3113 EVT ResVT;
3114 auto ConversionOp = N->getOpcode();
3115 switch (ConversionOp) {
3116 case ISD::FP_TO_SINT_SAT:
3117 case ISD::FP_TO_UINT_SAT:
3118 ResVT = MVT::v4i32;
3119 break;
3120 case ISD::FP_ROUND:
3121 ResVT = MVT::v4f32;
3122 break;
3123 default:
3124 llvm_unreachable("unexpected op");
3125 }
3126
3127 if (N->getValueType(ResNo: 0) != ResVT)
3128 return SDValue();
3129
3130 auto Concat = N->getOperand(Num: 0);
3131 if (Concat.getValueType() != MVT::v4f64)
3132 return SDValue();
3133
3134 auto Source = Concat.getOperand(i: 0);
3135 if (Source.getValueType() != MVT::v2f64)
3136 return SDValue();
3137
3138 if (!IsZeroSplat(Concat.getOperand(i: 1)) ||
3139 Concat.getOperand(i: 1).getValueType() != MVT::v2f64)
3140 return SDValue();
3141
3142 unsigned Op = GetWasmConversionOp(ConversionOp);
3143 return DAG.getNode(Opcode: Op, DL: SDLoc(N), VT: ResVT, Operand: Source);
3144}
3145
3146// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
3147static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
3148 const SDLoc &DL, unsigned VectorWidth) {
3149 EVT VT = Vec.getValueType();
3150 EVT ElVT = VT.getVectorElementType();
3151 unsigned Factor = VT.getSizeInBits() / VectorWidth;
3152 EVT ResultVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElVT,
3153 NumElements: VT.getVectorNumElements() / Factor);
3154
3155 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
3156 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
3157 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3158
3159 // This is the index of the first element of the VectorWidth-bit chunk
3160 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3161 IdxVal &= ~(ElemsPerChunk - 1);
3162
3163 // If the input is a buildvector just emit a smaller one.
3164 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
3165 return DAG.getBuildVector(VT: ResultVT, DL,
3166 Ops: Vec->ops().slice(N: IdxVal, M: ElemsPerChunk));
3167
3168 SDValue VecIdx = DAG.getIntPtrConstant(Val: IdxVal, DL);
3169 return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: ResultVT, N1: Vec, N2: VecIdx);
3170}
3171
3172// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
3173// is the expected destination value type after recursion. In is the initial
3174// input. Note that the input should have enough leading zero bits to prevent
3175// NARROW_U from saturating results.
3176static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL,
3177 SelectionDAG &DAG) {
3178 EVT SrcVT = In.getValueType();
3179
3180 // No truncation required, we might get here due to recursive calls.
3181 if (SrcVT == DstVT)
3182 return In;
3183
3184 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
3185 unsigned NumElems = SrcVT.getVectorNumElements();
3186 if (!isPowerOf2_32(Value: NumElems))
3187 return SDValue();
3188 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
3189 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
3190
3191 LLVMContext &Ctx = *DAG.getContext();
3192 EVT PackedSVT = EVT::getIntegerVT(Context&: Ctx, BitWidth: SrcVT.getScalarSizeInBits() / 2);
3193
3194 // Narrow to the largest type possible:
3195 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
3196 EVT InVT = MVT::i16, OutVT = MVT::i8;
3197 if (SrcVT.getScalarSizeInBits() > 16) {
3198 InVT = MVT::i32;
3199 OutVT = MVT::i16;
3200 }
3201 unsigned SubSizeInBits = SrcSizeInBits / 2;
3202 InVT = EVT::getVectorVT(Context&: Ctx, VT: InVT, NumElements: SubSizeInBits / InVT.getSizeInBits());
3203 OutVT = EVT::getVectorVT(Context&: Ctx, VT: OutVT, NumElements: SubSizeInBits / OutVT.getSizeInBits());
3204
3205 // Split lower/upper subvectors.
3206 SDValue Lo = extractSubVector(Vec: In, IdxVal: 0, DAG, DL, VectorWidth: SubSizeInBits);
3207 SDValue Hi = extractSubVector(Vec: In, IdxVal: NumElems / 2, DAG, DL, VectorWidth: SubSizeInBits);
3208
3209 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
3210 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
3211 Lo = DAG.getBitcast(VT: InVT, V: Lo);
3212 Hi = DAG.getBitcast(VT: InVT, V: Hi);
3213 SDValue Res = DAG.getNode(Opcode: WebAssemblyISD::NARROW_U, DL, VT: OutVT, N1: Lo, N2: Hi);
3214 return DAG.getBitcast(VT: DstVT, V: Res);
3215 }
3216
3217 // Recursively narrow lower/upper subvectors, concat result and narrow again.
3218 EVT PackedVT = EVT::getVectorVT(Context&: Ctx, VT: PackedSVT, NumElements: NumElems / 2);
3219 Lo = truncateVectorWithNARROW(DstVT: PackedVT, In: Lo, DL, DAG);
3220 Hi = truncateVectorWithNARROW(DstVT: PackedVT, In: Hi, DL, DAG);
3221
3222 PackedVT = EVT::getVectorVT(Context&: Ctx, VT: PackedSVT, NumElements: NumElems);
3223 SDValue Res = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: PackedVT, N1: Lo, N2: Hi);
3224 return truncateVectorWithNARROW(DstVT, In: Res, DL, DAG);
3225}
3226
3227static SDValue performTruncateCombine(SDNode *N,
3228 TargetLowering::DAGCombinerInfo &DCI) {
3229 auto &DAG = DCI.DAG;
3230
3231 SDValue In = N->getOperand(Num: 0);
3232 EVT InVT = In.getValueType();
3233 if (!InVT.isSimple())
3234 return SDValue();
3235
3236 EVT OutVT = N->getValueType(ResNo: 0);
3237 if (!OutVT.isVector())
3238 return SDValue();
3239
3240 EVT OutSVT = OutVT.getVectorElementType();
3241 EVT InSVT = InVT.getVectorElementType();
3242 // Currently only cover truncate to v16i8 or v8i16.
3243 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3244 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
3245 return SDValue();
3246
3247 SDLoc DL(N);
3248 APInt Mask = APInt::getLowBitsSet(numBits: InVT.getScalarSizeInBits(),
3249 loBitsSet: OutVT.getScalarSizeInBits());
3250 In = DAG.getNode(Opcode: ISD::AND, DL, VT: InVT, N1: In, N2: DAG.getConstant(Val: Mask, DL, VT: InVT));
3251 return truncateVectorWithNARROW(DstVT: OutVT, In, DL, DAG);
3252}
3253
3254static SDValue performBitcastCombine(SDNode *N,
3255 TargetLowering::DAGCombinerInfo &DCI) {
3256 using namespace llvm::SDPatternMatch;
3257 auto &DAG = DCI.DAG;
3258 SDLoc DL(N);
3259 SDValue Src = N->getOperand(Num: 0);
3260 EVT VT = N->getValueType(ResNo: 0);
3261 EVT SrcVT = Src.getValueType();
3262
3263 if (!(DCI.isBeforeLegalize() && VT.isScalarInteger() &&
3264 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1))
3265 return SDValue();
3266
3267 unsigned NumElts = SrcVT.getVectorNumElements();
3268 EVT Width = MVT::getIntegerVT(BitWidth: 128 / NumElts);
3269
3270 // bitcast <N x i1> to iN, where N = 2, 4, 8, 16 (legal)
3271 // ==> bitmask
3272 if (NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) {
3273 return DAG.getZExtOrTrunc(
3274 Op: DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: MVT::i32,
3275 Ops: {DAG.getConstant(Val: Intrinsic::wasm_bitmask, DL, VT: MVT::i32),
3276 DAG.getSExtOrTrunc(Op: N->getOperand(Num: 0), DL,
3277 VT: SrcVT.changeVectorElementType(
3278 Context&: *DAG.getContext(), EltVT: Width))}),
3279 DL, VT);
3280 }
3281
3282 // bitcast <N x i1>(setcc ...) to concat iN, where N = 32 and 64 (illegal)
3283 if (NumElts == 32 || NumElts == 64) {
3284 // Strategy: We will setcc them separately in v16i8 -> v16i1
3285 // Bitcast them to i16, extend them to either i32 or i64.
3286 // Add them together, shifting left 1 by 1.
3287 SDValue Concat, SetCCVector;
3288 ISD::CondCode SetCond;
3289
3290 if (!sd_match(N, P: m_BitCast(Op: m_c_SetCC(LHS: m_Value(N&: Concat), RHS: m_Value(N&: SetCCVector),
3291 CC: m_CondCode(CC&: SetCond)))))
3292 return SDValue();
3293 if (Concat.getOpcode() != ISD::CONCAT_VECTORS)
3294 return SDValue();
3295
3296 uint64_t ElementWidth =
3297 SetCCVector.getValueType().getVectorElementType().getFixedSizeInBits();
3298
3299 SmallVector<SDValue> VectorsToShuffle;
3300 for (size_t I = 0; I < Concat->ops().size(); I++) {
3301 VectorsToShuffle.push_back(Elt: DAG.getBitcast(
3302 VT: MVT::i16,
3303 V: DAG.getSetCC(DL, VT: MVT::v16i1, LHS: Concat->ops()[I],
3304 RHS: extractSubVector(Vec: SetCCVector, IdxVal: I * (128 / ElementWidth),
3305 DAG, DL, VectorWidth: 128),
3306 Cond: SetCond)));
3307 }
3308
3309 MVT ReturnType = VectorsToShuffle.size() == 2 ? MVT::i32 : MVT::i64;
3310 SDValue ReturningInteger = DAG.getConstant(Val: 0, DL, VT: ReturnType);
3311
3312 for (SDValue V : VectorsToShuffle) {
3313 ReturningInteger = DAG.getNode(
3314 Opcode: ISD::SHL, DL, VT: ReturnType,
3315 Ops: {DAG.getShiftAmountConstant(Val: 16, VT: ReturnType, DL), ReturningInteger});
3316
3317 SDValue ExtendedV = DAG.getZExtOrTrunc(Op: V, DL, VT: ReturnType);
3318 ReturningInteger =
3319 DAG.getNode(Opcode: ISD::ADD, DL, VT: ReturnType, Ops: {ReturningInteger, ExtendedV});
3320 }
3321
3322 return ReturningInteger;
3323 }
3324
3325 return SDValue();
3326}
3327
3328static SDValue performAnyAllCombine(SDNode *N, SelectionDAG &DAG) {
3329 // any_true (setcc <X>, 0, eq) => (not (all_true X))
3330 // all_true (setcc <X>, 0, eq) => (not (any_true X))
3331 // any_true (setcc <X>, 0, ne) => (any_true X)
3332 // all_true (setcc <X>, 0, ne) => (all_true X)
3333 assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN);
3334 using namespace llvm::SDPatternMatch;
3335
3336 SDValue LHS;
3337 if (N->getNumOperands() < 2 ||
3338 !sd_match(N: N->getOperand(Num: 1),
3339 P: m_c_SetCC(LHS: m_Value(N&: LHS), RHS: m_Zero(), CC: m_CondCode())))
3340 return SDValue();
3341 EVT LT = LHS.getValueType();
3342 if (LT.getScalarSizeInBits() > 128 / LT.getVectorNumElements())
3343 return SDValue();
3344
3345 auto CombineSetCC = [&N, &DAG](Intrinsic::WASMIntrinsics InPre,
3346 ISD::CondCode SetType,
3347 Intrinsic::WASMIntrinsics InPost) {
3348 if (N->getConstantOperandVal(Num: 0) != InPre)
3349 return SDValue();
3350
3351 SDValue LHS;
3352 if (!sd_match(N: N->getOperand(Num: 1), P: m_c_SetCC(LHS: m_Value(N&: LHS), RHS: m_Zero(),
3353 CC: m_SpecificCondCode(CC: SetType))))
3354 return SDValue();
3355
3356 SDLoc DL(N);
3357 SDValue Ret = DAG.getZExtOrTrunc(
3358 Op: DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: MVT::i32,
3359 Ops: {DAG.getConstant(Val: InPost, DL, VT: MVT::i32), LHS}),
3360 DL, VT: MVT::i1);
3361 if (SetType == ISD::SETEQ)
3362 Ret = DAG.getNOT(DL, Val: Ret, VT: MVT::i1);
3363 return DAG.getZExtOrTrunc(Op: Ret, DL, VT: N->getValueType(ResNo: 0));
3364 };
3365
3366 if (SDValue AnyTrueEQ = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETEQ,
3367 Intrinsic::wasm_alltrue))
3368 return AnyTrueEQ;
3369 if (SDValue AllTrueEQ = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETEQ,
3370 Intrinsic::wasm_anytrue))
3371 return AllTrueEQ;
3372 if (SDValue AnyTrueNE = CombineSetCC(Intrinsic::wasm_anytrue, ISD::SETNE,
3373 Intrinsic::wasm_anytrue))
3374 return AnyTrueNE;
3375 if (SDValue AllTrueNE = CombineSetCC(Intrinsic::wasm_alltrue, ISD::SETNE,
3376 Intrinsic::wasm_alltrue))
3377 return AllTrueNE;
3378
3379 return SDValue();
3380}
3381
3382template <int MatchRHS, ISD::CondCode MatchCond, bool RequiresNegate,
3383 Intrinsic::ID Intrin>
3384static SDValue TryMatchTrue(SDNode *N, EVT VecVT, SelectionDAG &DAG) {
3385 SDValue LHS = N->getOperand(Num: 0);
3386 SDValue RHS = N->getOperand(Num: 1);
3387 SDValue Cond = N->getOperand(Num: 2);
3388 if (MatchCond != cast<CondCodeSDNode>(Val&: Cond)->get())
3389 return SDValue();
3390
3391 if (MatchRHS != cast<ConstantSDNode>(Val&: RHS)->getSExtValue())
3392 return SDValue();
3393
3394 SDLoc DL(N);
3395 SDValue Ret = DAG.getZExtOrTrunc(
3396 Op: DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: MVT::i32,
3397 Ops: {DAG.getConstant(Val: Intrin, DL, VT: MVT::i32),
3398 DAG.getSExtOrTrunc(Op: LHS->getOperand(Num: 0), DL, VT: VecVT)}),
3399 DL, VT: MVT::i1);
3400 if (RequiresNegate)
3401 Ret = DAG.getNOT(DL, Val: Ret, VT: MVT::i1);
3402 return DAG.getZExtOrTrunc(Op: Ret, DL, VT: N->getValueType(ResNo: 0));
3403}
3404
3405/// Try to convert a i128 comparison to a v16i8 comparison before type
3406/// legalization splits it up into chunks
3407static SDValue
3408combineVectorSizedSetCCEquality(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
3409 const WebAssemblySubtarget *Subtarget) {
3410
3411 SDLoc DL(N);
3412 SDValue X = N->getOperand(Num: 0);
3413 SDValue Y = N->getOperand(Num: 1);
3414 EVT VT = N->getValueType(ResNo: 0);
3415 EVT OpVT = X.getValueType();
3416
3417 SelectionDAG &DAG = DCI.DAG;
3418 if (DCI.DAG.getMachineFunction().getFunction().hasFnAttribute(
3419 Kind: Attribute::NoImplicitFloat))
3420 return SDValue();
3421
3422 ISD::CondCode CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get();
3423 // We're looking for an oversized integer equality comparison with SIMD
3424 if (!OpVT.isScalarInteger() || !OpVT.isByteSized() || OpVT != MVT::i128 ||
3425 !Subtarget->hasSIMD128() || !isIntEqualitySetCC(Code: CC))
3426 return SDValue();
3427
3428 // Don't perform this combine if constructing the vector will be expensive.
3429 auto IsVectorBitCastCheap = [](SDValue X) {
3430 X = peekThroughBitcasts(V: X);
3431 return isa<ConstantSDNode>(Val: X) || X.getOpcode() == ISD::LOAD;
3432 };
3433
3434 if (!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y))
3435 return SDValue();
3436
3437 SDValue VecX = DAG.getBitcast(VT: MVT::v16i8, V: X);
3438 SDValue VecY = DAG.getBitcast(VT: MVT::v16i8, V: Y);
3439 SDValue Cmp = DAG.getSetCC(DL, VT: MVT::v16i8, LHS: VecX, RHS: VecY, Cond: CC);
3440
3441 SDValue Intr =
3442 DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: MVT::i32,
3443 Ops: {DAG.getConstant(Val: CC == ISD::SETEQ ? Intrinsic::wasm_alltrue
3444 : Intrinsic::wasm_anytrue,
3445 DL, VT: MVT::i32),
3446 Cmp});
3447
3448 return DAG.getSetCC(DL, VT, LHS: Intr, RHS: DAG.getConstant(Val: 0, DL, VT: MVT::i32),
3449 Cond: ISD::SETNE);
3450}
3451
3452static SDValue performSETCCCombine(SDNode *N,
3453 TargetLowering::DAGCombinerInfo &DCI,
3454 const WebAssemblySubtarget *Subtarget) {
3455 if (!DCI.isBeforeLegalize())
3456 return SDValue();
3457
3458 EVT VT = N->getValueType(ResNo: 0);
3459 if (!VT.isScalarInteger())
3460 return SDValue();
3461
3462 if (SDValue V = combineVectorSizedSetCCEquality(N, DCI, Subtarget))
3463 return V;
3464
3465 SDValue LHS = N->getOperand(Num: 0);
3466 if (LHS->getOpcode() != ISD::BITCAST)
3467 return SDValue();
3468
3469 EVT FromVT = LHS->getOperand(Num: 0).getValueType();
3470 if (!FromVT.isFixedLengthVector() || FromVT.getVectorElementType() != MVT::i1)
3471 return SDValue();
3472
3473 unsigned NumElts = FromVT.getVectorNumElements();
3474 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3475 return SDValue();
3476
3477 if (!cast<ConstantSDNode>(Val: N->getOperand(Num: 1)))
3478 return SDValue();
3479
3480 auto &DAG = DCI.DAG;
3481 EVT VecVT = FromVT.changeVectorElementType(Context&: *DAG.getContext(),
3482 EltVT: MVT::getIntegerVT(BitWidth: 128 / NumElts));
3483 // setcc (iN (bitcast (vNi1 X))), 0, ne
3484 // ==> any_true (vNi1 X)
3485 if (auto Match = TryMatchTrue<0, ISD::SETNE, false, Intrinsic::wasm_anytrue>(
3486 N, VecVT, DAG)) {
3487 return Match;
3488 }
3489 // setcc (iN (bitcast (vNi1 X))), 0, eq
3490 // ==> xor (any_true (vNi1 X)), -1
3491 if (auto Match = TryMatchTrue<0, ISD::SETEQ, true, Intrinsic::wasm_anytrue>(
3492 N, VecVT, DAG)) {
3493 return Match;
3494 }
3495 // setcc (iN (bitcast (vNi1 X))), -1, eq
3496 // ==> all_true (vNi1 X)
3497 if (auto Match = TryMatchTrue<-1, ISD::SETEQ, false, Intrinsic::wasm_alltrue>(
3498 N, VecVT, DAG)) {
3499 return Match;
3500 }
3501 // setcc (iN (bitcast (vNi1 X))), -1, ne
3502 // ==> xor (all_true (vNi1 X)), -1
3503 if (auto Match = TryMatchTrue<-1, ISD::SETNE, true, Intrinsic::wasm_alltrue>(
3504 N, VecVT, DAG)) {
3505 return Match;
3506 }
3507 return SDValue();
3508}
3509
3510static SDValue TryWideExtMulCombine(SDNode *N, SelectionDAG &DAG) {
3511 EVT VT = N->getValueType(ResNo: 0);
3512 if (VT != MVT::v8i32 && VT != MVT::v16i32)
3513 return SDValue();
3514
3515 // Mul with extending inputs.
3516 SDValue LHS = N->getOperand(Num: 0);
3517 SDValue RHS = N->getOperand(Num: 1);
3518 if (LHS.getOpcode() != RHS.getOpcode())
3519 return SDValue();
3520
3521 if (LHS.getOpcode() != ISD::SIGN_EXTEND &&
3522 LHS.getOpcode() != ISD::ZERO_EXTEND)
3523 return SDValue();
3524
3525 if (LHS->getOperand(Num: 0).getValueType() != RHS->getOperand(Num: 0).getValueType())
3526 return SDValue();
3527
3528 EVT FromVT = LHS->getOperand(Num: 0).getValueType();
3529 EVT EltTy = FromVT.getVectorElementType();
3530 if (EltTy != MVT::i8)
3531 return SDValue();
3532
3533 // For an input DAG that looks like this
3534 // %a = input_type
3535 // %b = input_type
3536 // %lhs = extend %a to output_type
3537 // %rhs = extend %b to output_type
3538 // %mul = mul %lhs, %rhs
3539
3540 // input_type | output_type | instructions
3541 // v16i8 | v16i32 | %low = i16x8.extmul_low_i8x16_ %a, %b
3542 // | | %high = i16x8.extmul_high_i8x16_, %a, %b
3543 // | | %low_low = i32x4.ext_low_i16x8_ %low
3544 // | | %low_high = i32x4.ext_high_i16x8_ %low
3545 // | | %high_low = i32x4.ext_low_i16x8_ %high
3546 // | | %high_high = i32x4.ext_high_i16x8_ %high
3547 // | | %res = concat_vector(...)
3548 // v8i8 | v8i32 | %low = i16x8.extmul_low_i8x16_ %a, %b
3549 // | | %low_low = i32x4.ext_low_i16x8_ %low
3550 // | | %low_high = i32x4.ext_high_i16x8_ %low
3551 // | | %res = concat_vector(%low_low, %low_high)
3552
3553 SDLoc DL(N);
3554 unsigned NumElts = VT.getVectorNumElements();
3555 SDValue ExtendInLHS = LHS->getOperand(Num: 0);
3556 SDValue ExtendInRHS = RHS->getOperand(Num: 0);
3557 bool IsSigned = LHS->getOpcode() == ISD::SIGN_EXTEND;
3558 unsigned ExtendLowOpc =
3559 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3560 unsigned ExtendHighOpc =
3561 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3562
3563 auto GetExtendLow = [&DAG, &DL, &ExtendLowOpc](EVT VT, SDValue Op) {
3564 return DAG.getNode(Opcode: ExtendLowOpc, DL, VT, Operand: Op);
3565 };
3566 auto GetExtendHigh = [&DAG, &DL, &ExtendHighOpc](EVT VT, SDValue Op) {
3567 return DAG.getNode(Opcode: ExtendHighOpc, DL, VT, Operand: Op);
3568 };
3569
3570 if (NumElts == 16) {
3571 SDValue LowLHS = GetExtendLow(MVT::v8i16, ExtendInLHS);
3572 SDValue LowRHS = GetExtendLow(MVT::v8i16, ExtendInRHS);
3573 SDValue MulLow = DAG.getNode(Opcode: ISD::MUL, DL, VT: MVT::v8i16, N1: LowLHS, N2: LowRHS);
3574 SDValue HighLHS = GetExtendHigh(MVT::v8i16, ExtendInLHS);
3575 SDValue HighRHS = GetExtendHigh(MVT::v8i16, ExtendInRHS);
3576 SDValue MulHigh = DAG.getNode(Opcode: ISD::MUL, DL, VT: MVT::v8i16, N1: HighLHS, N2: HighRHS);
3577 SDValue SubVectors[] = {
3578 GetExtendLow(MVT::v4i32, MulLow),
3579 GetExtendHigh(MVT::v4i32, MulLow),
3580 GetExtendLow(MVT::v4i32, MulHigh),
3581 GetExtendHigh(MVT::v4i32, MulHigh),
3582 };
3583 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT, Ops: SubVectors);
3584 } else {
3585 assert(NumElts == 8);
3586 SDValue LowLHS = DAG.getNode(Opcode: LHS->getOpcode(), DL, VT: MVT::v8i16, Operand: ExtendInLHS);
3587 SDValue LowRHS = DAG.getNode(Opcode: RHS->getOpcode(), DL, VT: MVT::v8i16, Operand: ExtendInRHS);
3588 SDValue MulLow = DAG.getNode(Opcode: ISD::MUL, DL, VT: MVT::v8i16, N1: LowLHS, N2: LowRHS);
3589 SDValue Lo = GetExtendLow(MVT::v4i32, MulLow);
3590 SDValue Hi = GetExtendHigh(MVT::v4i32, MulLow);
3591 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT, N1: Lo, N2: Hi);
3592 }
3593 return SDValue();
3594}
3595
3596static SDValue performMulCombine(SDNode *N,
3597 TargetLowering::DAGCombinerInfo &DCI) {
3598 assert(N->getOpcode() == ISD::MUL);
3599 EVT VT = N->getValueType(ResNo: 0);
3600 if (!VT.isVector())
3601 return SDValue();
3602
3603 if (auto Res = TryWideExtMulCombine(N, DAG&: DCI.DAG))
3604 return Res;
3605
3606 // We don't natively support v16i8 or v8i8 mul, but we do support v8i16. So,
3607 // extend them to v8i16.
3608 if (VT != MVT::v8i8 && VT != MVT::v16i8)
3609 return SDValue();
3610
3611 SDLoc DL(N);
3612 SelectionDAG &DAG = DCI.DAG;
3613 SDValue LHS = N->getOperand(Num: 0);
3614 SDValue RHS = N->getOperand(Num: 1);
3615 EVT MulVT = MVT::v8i16;
3616
3617 if (VT == MVT::v8i8) {
3618 SDValue PromotedLHS = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: MVT::v16i8, N1: LHS,
3619 N2: DAG.getUNDEF(VT: MVT::v8i8));
3620 SDValue PromotedRHS = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: MVT::v16i8, N1: RHS,
3621 N2: DAG.getUNDEF(VT: MVT::v8i8));
3622 SDValue LowLHS =
3623 DAG.getNode(Opcode: WebAssemblyISD::EXTEND_LOW_U, DL, VT: MulVT, Operand: PromotedLHS);
3624 SDValue LowRHS =
3625 DAG.getNode(Opcode: WebAssemblyISD::EXTEND_LOW_U, DL, VT: MulVT, Operand: PromotedRHS);
3626 SDValue MulLow = DAG.getBitcast(
3627 VT: MVT::v16i8, V: DAG.getNode(Opcode: ISD::MUL, DL, VT: MulVT, N1: LowLHS, N2: LowRHS));
3628 // Take the low byte of each lane.
3629 SDValue Shuffle = DAG.getVectorShuffle(
3630 VT: MVT::v16i8, dl: DL, N1: MulLow, N2: DAG.getUNDEF(VT: MVT::v16i8),
3631 Mask: {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
3632 return extractSubVector(Vec: Shuffle, IdxVal: 0, DAG, DL, VectorWidth: 64);
3633 } else {
3634 assert(VT == MVT::v16i8 && "Expected v16i8");
3635 SDValue LowLHS = DAG.getNode(Opcode: WebAssemblyISD::EXTEND_LOW_U, DL, VT: MulVT, Operand: LHS);
3636 SDValue LowRHS = DAG.getNode(Opcode: WebAssemblyISD::EXTEND_LOW_U, DL, VT: MulVT, Operand: RHS);
3637 SDValue HighLHS =
3638 DAG.getNode(Opcode: WebAssemblyISD::EXTEND_HIGH_U, DL, VT: MulVT, Operand: LHS);
3639 SDValue HighRHS =
3640 DAG.getNode(Opcode: WebAssemblyISD::EXTEND_HIGH_U, DL, VT: MulVT, Operand: RHS);
3641
3642 SDValue MulLow =
3643 DAG.getBitcast(VT, V: DAG.getNode(Opcode: ISD::MUL, DL, VT: MulVT, N1: LowLHS, N2: LowRHS));
3644 SDValue MulHigh =
3645 DAG.getBitcast(VT, V: DAG.getNode(Opcode: ISD::MUL, DL, VT: MulVT, N1: HighLHS, N2: HighRHS));
3646
3647 // Take the low byte of each lane.
3648 return DAG.getVectorShuffle(
3649 VT, dl: DL, N1: MulLow, N2: MulHigh,
3650 Mask: {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
3651 }
3652}
3653
3654SDValue DoubleVectorWidth(SDValue In, unsigned RequiredNumElems,
3655 SelectionDAG &DAG) {
3656 SDLoc DL(In);
3657 LLVMContext &Ctx = *DAG.getContext();
3658 EVT InVT = In.getValueType();
3659 unsigned NumElems = InVT.getVectorNumElements() * 2;
3660 EVT OutVT = EVT::getVectorVT(Context&: Ctx, VT: InVT.getVectorElementType(), NumElements: NumElems);
3661 SDValue Concat =
3662 DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: OutVT, N1: In, N2: DAG.getPOISON(VT: InVT));
3663 if (NumElems < RequiredNumElems) {
3664 return DoubleVectorWidth(In: Concat, RequiredNumElems, DAG);
3665 }
3666 return Concat;
3667}
3668
3669SDValue performConvertFPCombine(SDNode *N, SelectionDAG &DAG) {
3670 EVT OutVT = N->getValueType(ResNo: 0);
3671 if (!OutVT.isVector())
3672 return SDValue();
3673
3674 EVT OutElTy = OutVT.getVectorElementType();
3675 if (OutElTy != MVT::i8 && OutElTy != MVT::i16)
3676 return SDValue();
3677
3678 unsigned NumElems = OutVT.getVectorNumElements();
3679 if (!isPowerOf2_32(Value: NumElems))
3680 return SDValue();
3681
3682 EVT FPVT = N->getOperand(Num: 0)->getValueType(ResNo: 0);
3683 if (FPVT.getVectorElementType() != MVT::f32)
3684 return SDValue();
3685
3686 SDLoc DL(N);
3687
3688 // First, convert to i32.
3689 LLVMContext &Ctx = *DAG.getContext();
3690 EVT IntVT = EVT::getVectorVT(Context&: Ctx, VT: MVT::i32, NumElements: NumElems);
3691 SDValue ToInt = DAG.getNode(Opcode: N->getOpcode(), DL, VT: IntVT, Operand: N->getOperand(Num: 0));
3692 APInt Mask = APInt::getLowBitsSet(numBits: IntVT.getScalarSizeInBits(),
3693 loBitsSet: OutVT.getScalarSizeInBits());
3694 // Mask out the top MSBs.
3695 SDValue Masked =
3696 DAG.getNode(Opcode: ISD::AND, DL, VT: IntVT, N1: ToInt, N2: DAG.getConstant(Val: Mask, DL, VT: IntVT));
3697
3698 if (OutVT.getSizeInBits() < 128) {
3699 // Create a wide enough vector that we can use narrow.
3700 EVT NarrowedVT = OutElTy == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
3701 unsigned NumRequiredElems = NarrowedVT.getVectorNumElements();
3702 SDValue WideVector = DoubleVectorWidth(In: Masked, RequiredNumElems: NumRequiredElems, DAG);
3703 SDValue Trunc = truncateVectorWithNARROW(DstVT: NarrowedVT, In: WideVector, DL, DAG);
3704 return DAG.getBitcast(
3705 VT: OutVT, V: extractSubVector(Vec: Trunc, IdxVal: 0, DAG, DL, VectorWidth: OutVT.getSizeInBits()));
3706 } else {
3707 return truncateVectorWithNARROW(DstVT: OutVT, In: Masked, DL, DAG);
3708 }
3709 return SDValue();
3710}
3711
3712SDValue
3713WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
3714 DAGCombinerInfo &DCI) const {
3715 switch (N->getOpcode()) {
3716 default:
3717 return SDValue();
3718 case ISD::BITCAST:
3719 return performBitcastCombine(N, DCI);
3720 case ISD::SETCC:
3721 return performSETCCCombine(N, DCI, Subtarget);
3722 case ISD::VECTOR_SHUFFLE:
3723 return performVECTOR_SHUFFLECombine(N, DCI);
3724 case ISD::SIGN_EXTEND:
3725 case ISD::ZERO_EXTEND:
3726 return performVectorExtendCombine(N, DCI);
3727 case ISD::UINT_TO_FP:
3728 if (auto ExtCombine = performVectorExtendToFPCombine(N, DCI))
3729 return ExtCombine;
3730 return performVectorNonNegToFPCombine(N, DCI);
3731 case ISD::SINT_TO_FP:
3732 return performVectorExtendToFPCombine(N, DCI);
3733 case ISD::FP_TO_SINT_SAT:
3734 case ISD::FP_TO_UINT_SAT:
3735 case ISD::FP_ROUND:
3736 case ISD::CONCAT_VECTORS:
3737 return performVectorTruncZeroCombine(N, DCI);
3738 case ISD::FP_TO_SINT:
3739 case ISD::FP_TO_UINT:
3740 return performConvertFPCombine(N, DAG&: DCI.DAG);
3741 case ISD::TRUNCATE:
3742 return performTruncateCombine(N, DCI);
3743 case ISD::INTRINSIC_WO_CHAIN:
3744 return performAnyAllCombine(N, DAG&: DCI.DAG);
3745 case ISD::MUL:
3746 return performMulCombine(N, DCI);
3747 }
3748}
3749