1//===- SPIRVLegalizerInfo.cpp --- SPIR-V Legalization Rules ------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the Machinelegalizer class for SPIR-V.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SPIRVLegalizerInfo.h"
14#include "SPIRV.h"
15#include "SPIRVGlobalRegistry.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
18#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
20#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/TargetOpcodes.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/MathExtras.h"
27
28using namespace llvm;
29using namespace llvm::LegalizeActions;
30using namespace llvm::LegalityPredicates;
31
32#define DEBUG_TYPE "spirv-legalizer"
33
34LegalityPredicate typeOfExtendedScalars(unsigned TypeIdx, bool IsExtendedInts) {
35 return [IsExtendedInts, TypeIdx](const LegalityQuery &Query) {
36 const LLT Ty = Query.Types[TypeIdx];
37 return IsExtendedInts && Ty.isValid() && Ty.isScalar();
38 };
39}
40
41SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
42 using namespace TargetOpcode;
43
44 this->ST = &ST;
45 GR = ST.getSPIRVGlobalRegistry();
46
47 const LLT s1 = LLT::scalar(SizeInBits: 1);
48 const LLT s8 = LLT::scalar(SizeInBits: 8);
49 const LLT s16 = LLT::scalar(SizeInBits: 16);
50 const LLT s32 = LLT::scalar(SizeInBits: 32);
51 const LLT s64 = LLT::scalar(SizeInBits: 64);
52 const LLT s128 = LLT::scalar(SizeInBits: 128);
53
54 const LLT v16s64 = LLT::fixed_vector(NumElements: 16, ScalarSizeInBits: 64);
55 const LLT v16s32 = LLT::fixed_vector(NumElements: 16, ScalarSizeInBits: 32);
56 const LLT v16s16 = LLT::fixed_vector(NumElements: 16, ScalarSizeInBits: 16);
57 const LLT v16s8 = LLT::fixed_vector(NumElements: 16, ScalarSizeInBits: 8);
58 const LLT v16s1 = LLT::fixed_vector(NumElements: 16, ScalarSizeInBits: 1);
59
60 const LLT v8s64 = LLT::fixed_vector(NumElements: 8, ScalarSizeInBits: 64);
61 const LLT v8s32 = LLT::fixed_vector(NumElements: 8, ScalarSizeInBits: 32);
62 const LLT v8s16 = LLT::fixed_vector(NumElements: 8, ScalarSizeInBits: 16);
63 const LLT v8s8 = LLT::fixed_vector(NumElements: 8, ScalarSizeInBits: 8);
64 const LLT v8s1 = LLT::fixed_vector(NumElements: 8, ScalarSizeInBits: 1);
65
66 const LLT v4s64 = LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 64);
67 const LLT v4s32 = LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 32);
68 const LLT v4s16 = LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 16);
69 const LLT v4s8 = LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 8);
70 const LLT v4s1 = LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 1);
71
72 const LLT v3s64 = LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: 64);
73 const LLT v3s32 = LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: 32);
74 const LLT v3s16 = LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: 16);
75 const LLT v3s8 = LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: 8);
76 const LLT v3s1 = LLT::fixed_vector(NumElements: 3, ScalarSizeInBits: 1);
77
78 const LLT v2s64 = LLT::fixed_vector(NumElements: 2, ScalarSizeInBits: 64);
79 const LLT v2s32 = LLT::fixed_vector(NumElements: 2, ScalarSizeInBits: 32);
80 const LLT v2s16 = LLT::fixed_vector(NumElements: 2, ScalarSizeInBits: 16);
81 const LLT v2s8 = LLT::fixed_vector(NumElements: 2, ScalarSizeInBits: 8);
82 const LLT v2s1 = LLT::fixed_vector(NumElements: 2, ScalarSizeInBits: 1);
83
84 const unsigned PSize = ST.getPointerSize();
85 const LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: PSize); // Function
86 const LLT p1 = LLT::pointer(AddressSpace: 1, SizeInBits: PSize); // CrossWorkgroup
87 const LLT p2 = LLT::pointer(AddressSpace: 2, SizeInBits: PSize); // UniformConstant
88 const LLT p3 = LLT::pointer(AddressSpace: 3, SizeInBits: PSize); // Workgroup
89 const LLT p4 = LLT::pointer(AddressSpace: 4, SizeInBits: PSize); // Generic
90 const LLT p5 =
91 LLT::pointer(AddressSpace: 5, SizeInBits: PSize); // Input, SPV_INTEL_usm_storage_classes (Device)
92 const LLT p6 = LLT::pointer(AddressSpace: 6, SizeInBits: PSize); // SPV_INTEL_usm_storage_classes (Host)
93 const LLT p7 = LLT::pointer(AddressSpace: 7, SizeInBits: PSize); // Input
94 const LLT p8 = LLT::pointer(AddressSpace: 8, SizeInBits: PSize); // Output
95 const LLT p9 =
96 LLT::pointer(AddressSpace: 9, SizeInBits: PSize); // CodeSectionINTEL, SPV_INTEL_function_pointers
97 const LLT p10 = LLT::pointer(AddressSpace: 10, SizeInBits: PSize); // Private
98 const LLT p11 = LLT::pointer(AddressSpace: 11, SizeInBits: PSize); // StorageBuffer
99 const LLT p12 = LLT::pointer(AddressSpace: 12, SizeInBits: PSize); // Uniform
100 const LLT p13 = LLT::pointer(AddressSpace: 13, SizeInBits: PSize); // PushConstant
101
102 // TODO: remove copy-pasting here by using concatenation in some way.
103 auto allPtrsScalarsAndVectors = {
104 p0, p1, p2, p3, p4, p5, p6, p7, p8,
105 p9, p10, p11, p12, p13, s1, s8, s16, s32,
106 s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, v3s16,
107 v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8,
108 v8s16, v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
109
110 auto allVectors = {v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8,
111 v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32,
112 v4s64, v8s1, v8s8, v8s16, v8s32, v8s64, v16s1,
113 v16s8, v16s16, v16s32, v16s64};
114
115 auto allShaderVectors = {v2s1, v2s8, v2s16, v2s32, v2s64,
116 v3s1, v3s8, v3s16, v3s32, v3s64,
117 v4s1, v4s8, v4s16, v4s32, v4s64};
118
119 auto allScalars = {s1, s8, s16, s32, s64};
120
121 auto allScalarsAndVectors = {
122 s1, s8, s16, s32, s64, s128, v2s1, v2s8,
123 v2s16, v2s32, v2s64, v3s1, v3s8, v3s16, v3s32, v3s64,
124 v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
125 v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
126
127 auto allIntScalarsAndVectors = {
128 s8, s16, s32, s64, s128, v2s8, v2s16, v2s32, v2s64,
129 v3s8, v3s16, v3s32, v3s64, v4s8, v4s16, v4s32, v4s64, v8s8,
130 v8s16, v8s32, v8s64, v16s8, v16s16, v16s32, v16s64};
131
132 auto allBoolScalarsAndVectors = {s1, v2s1, v3s1, v4s1, v8s1, v16s1};
133
134 auto allIntScalars = {s8, s16, s32, s64, s128};
135
136 auto allFloatScalarsAndF16Vector2AndVector4s = {s16, s32, s64, v2s16, v4s16};
137
138 auto allFloatScalarsAndVectors = {
139 s16, s32, s64, v2s16, v2s32, v2s64, v3s16, v3s32, v3s64,
140 v4s16, v4s32, v4s64, v8s16, v8s32, v8s64, v16s16, v16s32, v16s64};
141
142 auto allFloatAndIntScalarsAndPtrs = {s8, s16, s32, s64, p0, p1,
143 p2, p3, p4, p5, p6, p7,
144 p8, p9, p10, p11, p12, p13};
145
146 auto allPtrs = {p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13};
147
148 auto &allowedVectorTypes = ST.isShader() ? allShaderVectors : allVectors;
149
150 bool IsExtendedInts =
151 ST.canUseExtension(
152 E: SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
153 ST.canUseExtension(E: SPIRV::Extension::SPV_KHR_bit_instructions) ||
154 ST.canUseExtension(E: SPIRV::Extension::SPV_INTEL_int4);
155 auto extendedScalarsAndVectors =
156 [IsExtendedInts](const LegalityQuery &Query) {
157 const LLT Ty = Query.Types[0];
158 return IsExtendedInts && Ty.isValid() && !Ty.isPointerOrPointerVector();
159 };
160 auto extendedScalarsAndVectorsProduct = [IsExtendedInts](
161 const LegalityQuery &Query) {
162 const LLT Ty1 = Query.Types[0], Ty2 = Query.Types[1];
163 return IsExtendedInts && Ty1.isValid() && Ty2.isValid() &&
164 !Ty1.isPointerOrPointerVector() && !Ty2.isPointerOrPointerVector();
165 };
166 auto extendedPtrsScalarsAndVectors =
167 [IsExtendedInts](const LegalityQuery &Query) {
168 const LLT Ty = Query.Types[0];
169 return IsExtendedInts && Ty.isValid();
170 };
171
172 // The universal validation rules in the SPIR-V specification state that
173 // vector sizes are typically limited to 2, 3, or 4. However, larger vector
174 // sizes (8 and 16) are enabled when the Kernel capability is present. For
175 // shader execution models, vector sizes are strictly limited to 4. In
176 // non-shader contexts, vector sizes of 8 and 16 are also permitted, but
177 // arbitrary sizes (e.g., 6 or 11) are not.
178 uint32_t MaxVectorSize = ST.isShader() ? 4 : 16;
179 LLVM_DEBUG(dbgs() << "MaxVectorSize: " << MaxVectorSize << "\n");
180
181 for (auto Opc : getTypeFoldingSupportedOpcodes()) {
182 switch (Opc) {
183 case G_EXTRACT_VECTOR_ELT:
184 case G_UREM:
185 case G_SREM:
186 case G_UDIV:
187 case G_SDIV:
188 case G_FREM:
189 break;
190 default:
191 getActionDefinitionsBuilder(Opcode: Opc)
192 .customFor(Types: allScalars)
193 .customFor(Types: allowedVectorTypes)
194 .moreElementsToNextPow2(TypeIdx: 0)
195 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
196 Mutation: LegalizeMutations::changeElementCountTo(
197 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
198 .custom();
199 break;
200 }
201 }
202
203 getActionDefinitionsBuilder(Opcodes: {G_UREM, G_SREM, G_SDIV, G_UDIV, G_FREM})
204 .customFor(Types: allScalars)
205 .customFor(Types: allowedVectorTypes)
206 .scalarizeIf(Predicate: numElementsNotPow2(TypeIdx: 0), TypeIdx: 0)
207 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
208 Mutation: LegalizeMutations::changeElementCountTo(
209 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
210 .custom();
211
212 getActionDefinitionsBuilder(Opcodes: {G_FMA, G_STRICT_FMA})
213 .legalFor(Types: allScalars)
214 .legalFor(Types: allowedVectorTypes)
215 .moreElementsToNextPow2(TypeIdx: 0)
216 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
217 Mutation: LegalizeMutations::changeElementCountTo(
218 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
219 .alwaysLegal();
220
221 getActionDefinitionsBuilder(Opcode: G_INTRINSIC_W_SIDE_EFFECTS).custom();
222
223 getActionDefinitionsBuilder(Opcode: G_SHUFFLE_VECTOR)
224 .legalForCartesianProduct(Types0: allowedVectorTypes, Types1: allowedVectorTypes)
225 .moreElementsToNextPow2(TypeIdx: 0)
226 .lowerIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize))
227 .moreElementsToNextPow2(TypeIdx: 1)
228 .lowerIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 1, Size: MaxVectorSize));
229
230 getActionDefinitionsBuilder(Opcode: G_EXTRACT_VECTOR_ELT)
231 .moreElementsToNextPow2(TypeIdx: 1)
232 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 1, Size: MaxVectorSize),
233 Mutation: LegalizeMutations::changeElementCountTo(
234 TypeIdx: 1, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
235 .custom();
236
237 getActionDefinitionsBuilder(Opcode: G_INSERT_VECTOR_ELT)
238 .moreElementsToNextPow2(TypeIdx: 0)
239 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
240 Mutation: LegalizeMutations::changeElementCountTo(
241 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
242 .custom();
243
244 // Illegal G_UNMERGE_VALUES instructions should be handled
245 // during the combine phase.
246 getActionDefinitionsBuilder(Opcode: G_BUILD_VECTOR)
247 .legalIf(Predicate: vectorElementCountIsLessThanOrEqualTo(TypeIdx: 0, Size: MaxVectorSize));
248
249 // When entering the legalizer, there should be no G_BITCAST instructions.
250 // They should all be calls to the `spv_bitcast` intrinsic. The call to
251 // the intrinsic will be converted to a G_BITCAST during legalization if
252 // the vectors are not legal. After using the rules to legalize a G_BITCAST,
253 // we turn it back into a call to the intrinsic with a custom rule to avoid
254 // potential machine verifier failures.
255 getActionDefinitionsBuilder(Opcode: G_BITCAST)
256 .moreElementsToNextPow2(TypeIdx: 0)
257 .moreElementsToNextPow2(TypeIdx: 1)
258 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
259 Mutation: LegalizeMutations::changeElementCountTo(
260 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)))
261 .lowerIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 1, Size: MaxVectorSize))
262 .custom();
263
264 // If the result is still illegal, the combiner should be able to remove it.
265 getActionDefinitionsBuilder(Opcode: G_CONCAT_VECTORS)
266 .legalForCartesianProduct(Types0: allowedVectorTypes, Types1: allowedVectorTypes);
267
268 getActionDefinitionsBuilder(Opcode: G_SPLAT_VECTOR)
269 .legalFor(Types: allowedVectorTypes)
270 .moreElementsToNextPow2(TypeIdx: 0)
271 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
272 Mutation: LegalizeMutations::changeElementSizeTo(TypeIdx: 0, FromTypeIdx: MaxVectorSize))
273 .alwaysLegal();
274
275 // Vector Reduction Operations
276 getActionDefinitionsBuilder(
277 Opcodes: {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX,
278 G_VECREDUCE_ADD, G_VECREDUCE_MUL, G_VECREDUCE_FMUL, G_VECREDUCE_FMIN,
279 G_VECREDUCE_FMAX, G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM,
280 G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
281 .legalFor(Types: allowedVectorTypes)
282 .scalarize(TypeIdx: 1)
283 .lower();
284
285 getActionDefinitionsBuilder(Opcodes: {G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
286 .scalarize(TypeIdx: 2)
287 .lower();
288
289 // Illegal G_UNMERGE_VALUES instructions should be handled
290 // during the combine phase.
291 getActionDefinitionsBuilder(Opcode: G_UNMERGE_VALUES)
292 .legalIf(Predicate: vectorElementCountIsLessThanOrEqualTo(TypeIdx: 1, Size: MaxVectorSize));
293
294 getActionDefinitionsBuilder(Opcodes: {G_MEMCPY, G_MEMMOVE})
295 .unsupportedIf(Predicate: LegalityPredicates::any(P0: typeIs(TypeIdx: 0, TypesInit: p9), P1: typeIs(TypeIdx: 1, TypesInit: p9)))
296 .legalIf(Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrs), P1: typeInSet(TypeIdx: 1, TypesInit: allPtrs)));
297
298 getActionDefinitionsBuilder(Opcode: G_MEMSET)
299 .unsupportedIf(Predicate: typeIs(TypeIdx: 0, TypesInit: p9))
300 .legalIf(Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrs), P1: typeInSet(TypeIdx: 1, TypesInit: allIntScalars)));
301
302 getActionDefinitionsBuilder(Opcode: G_ADDRSPACE_CAST)
303 .unsupportedIf(
304 Predicate: LegalityPredicates::any(P0: all(P0: typeIs(TypeIdx: 0, TypesInit: p9), P1: typeIsNot(TypeIdx: 1, Type: p9)),
305 P1: all(P0: typeIsNot(TypeIdx: 0, Type: p9), P1: typeIs(TypeIdx: 1, TypesInit: p9))))
306 .legalForCartesianProduct(Types0: allPtrs, Types1: allPtrs);
307
308 // Should we be legalizing bad scalar sizes like s5 here instead
309 // of handling them in the instruction selector?
310 getActionDefinitionsBuilder(Opcodes: {G_LOAD, G_STORE})
311 .unsupportedIf(Predicate: typeIs(TypeIdx: 1, TypesInit: p9))
312 .legalForCartesianProduct(Types0: allowedVectorTypes, Types1: allPtrs)
313 .legalForCartesianProduct(Types0: allPtrs, Types1: allPtrs)
314 .legalIf(Predicate: isScalar(TypeIdx: 0))
315 .custom();
316
317 getActionDefinitionsBuilder(Opcodes: {G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS,
318 G_BITREVERSE, G_SADDSAT, G_UADDSAT, G_SSUBSAT,
319 G_USUBSAT, G_SCMP, G_UCMP})
320 .legalFor(Types: allIntScalarsAndVectors)
321 .legalIf(Predicate: extendedScalarsAndVectors);
322
323 getActionDefinitionsBuilder(Opcode: G_STRICT_FLDEXP)
324 .legalForCartesianProduct(Types0: allFloatScalarsAndVectors, Types1: allIntScalars);
325
326 getActionDefinitionsBuilder(Opcodes: {G_FPTOSI, G_FPTOUI})
327 .legalForCartesianProduct(Types0: allIntScalarsAndVectors,
328 Types1: allFloatScalarsAndVectors);
329
330 getActionDefinitionsBuilder(Opcodes: {G_FPTOSI_SAT, G_FPTOUI_SAT})
331 .legalForCartesianProduct(Types0: allIntScalarsAndVectors,
332 Types1: allFloatScalarsAndVectors);
333
334 getActionDefinitionsBuilder(Opcodes: {G_SITOFP, G_UITOFP})
335 .legalForCartesianProduct(Types0: allFloatScalarsAndVectors,
336 Types1: allScalarsAndVectors);
337
338 getActionDefinitionsBuilder(Opcode: G_CTPOP)
339 .legalForCartesianProduct(Types: allIntScalarsAndVectors)
340 .legalIf(Predicate: extendedScalarsAndVectorsProduct);
341
342 // Extensions.
343 getActionDefinitionsBuilder(Opcodes: {G_TRUNC, G_ZEXT, G_SEXT, G_ANYEXT})
344 .legalForCartesianProduct(Types: allScalarsAndVectors)
345 .legalIf(Predicate: extendedScalarsAndVectorsProduct);
346
347 getActionDefinitionsBuilder(Opcode: G_PHI)
348 .legalFor(Types: allPtrsScalarsAndVectors)
349 .legalIf(Predicate: extendedPtrsScalarsAndVectors);
350
351 getActionDefinitionsBuilder(Opcode: G_BITCAST).legalIf(
352 Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrsScalarsAndVectors),
353 P1: typeInSet(TypeIdx: 1, TypesInit: allPtrsScalarsAndVectors)));
354
355 getActionDefinitionsBuilder(Opcodes: {G_IMPLICIT_DEF, G_FREEZE})
356 .legalFor(Types: {s1, s128})
357 .legalFor(Types: allFloatAndIntScalarsAndPtrs)
358 .legalFor(Types: allowedVectorTypes)
359 .legalIf(Predicate: [](const LegalityQuery &Query) {
360 return Query.Types[0].isPointerVector();
361 })
362 .moreElementsToNextPow2(TypeIdx: 0)
363 .fewerElementsIf(Predicate: vectorElementCountIsGreaterThan(TypeIdx: 0, Size: MaxVectorSize),
364 Mutation: LegalizeMutations::changeElementCountTo(
365 TypeIdx: 0, EC: ElementCount::getFixed(MinVal: MaxVectorSize)));
366
367 getActionDefinitionsBuilder(Opcodes: {G_STACKSAVE, G_STACKRESTORE}).alwaysLegal();
368
369 getActionDefinitionsBuilder(Opcode: G_INTTOPTR)
370 .legalForCartesianProduct(Types0: allPtrs, Types1: allIntScalars)
371 .legalIf(
372 Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrs), P1: typeOfExtendedScalars(TypeIdx: 1, IsExtendedInts)))
373 .legalIf(Predicate: [](const LegalityQuery &Query) {
374 const LLT DstTy = Query.Types[0];
375 const LLT SrcTy = Query.Types[1];
376 return DstTy.isPointerVector() && SrcTy.isVector() &&
377 !SrcTy.isPointer() &&
378 DstTy.getNumElements() == SrcTy.getNumElements();
379 });
380 getActionDefinitionsBuilder(Opcode: G_PTRTOINT)
381 .legalForCartesianProduct(Types0: allIntScalars, Types1: allPtrs)
382 .legalIf(
383 Predicate: all(P0: typeOfExtendedScalars(TypeIdx: 0, IsExtendedInts), P1: typeInSet(TypeIdx: 1, TypesInit: allPtrs)))
384 .legalIf(Predicate: [](const LegalityQuery &Query) {
385 const LLT DstTy = Query.Types[0];
386 const LLT SrcTy = Query.Types[1];
387 return SrcTy.isPointerVector() && DstTy.isVector() &&
388 !DstTy.isPointer() &&
389 DstTy.getNumElements() == SrcTy.getNumElements();
390 });
391 getActionDefinitionsBuilder(Opcode: G_PTR_ADD)
392 .legalForCartesianProduct(Types0: allPtrs, Types1: allIntScalars)
393 .legalIf(
394 Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrs), P1: typeOfExtendedScalars(TypeIdx: 1, IsExtendedInts)));
395
396 // ST.canDirectlyComparePointers() for pointer args is supported in
397 // legalizeCustom().
398 getActionDefinitionsBuilder(Opcode: G_ICMP)
399 .unsupportedIf(Predicate: LegalityPredicates::any(
400 P0: all(P0: typeIs(TypeIdx: 0, TypesInit: p9), P1: typeInSet(TypeIdx: 1, TypesInit: allPtrs), args: typeIsNot(TypeIdx: 1, Type: p9)),
401 P1: all(P0: typeInSet(TypeIdx: 0, TypesInit: allPtrs), P1: typeIsNot(TypeIdx: 0, Type: p9), args: typeIs(TypeIdx: 1, TypesInit: p9))))
402 .legalIf(Predicate: [IsExtendedInts](const LegalityQuery &Query) {
403 const LLT Ty = Query.Types[1];
404 return IsExtendedInts && Ty.isValid() && !Ty.isPointerOrPointerVector();
405 })
406 .customIf(Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allBoolScalarsAndVectors),
407 P1: typeInSet(TypeIdx: 1, TypesInit: allPtrsScalarsAndVectors)));
408
409 getActionDefinitionsBuilder(Opcode: G_FCMP).legalIf(
410 Predicate: all(P0: typeInSet(TypeIdx: 0, TypesInit: allBoolScalarsAndVectors),
411 P1: typeInSet(TypeIdx: 1, TypesInit: allFloatScalarsAndVectors)));
412
413 getActionDefinitionsBuilder(Opcodes: {G_ATOMICRMW_OR, G_ATOMICRMW_ADD, G_ATOMICRMW_AND,
414 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN,
415 G_ATOMICRMW_SUB, G_ATOMICRMW_XOR,
416 G_ATOMICRMW_UMAX, G_ATOMICRMW_UMIN})
417 .legalForCartesianProduct(Types0: allIntScalars, Types1: allPtrs);
418
419 getActionDefinitionsBuilder(
420 Opcodes: {G_ATOMICRMW_FADD, G_ATOMICRMW_FSUB, G_ATOMICRMW_FMIN, G_ATOMICRMW_FMAX})
421 .legalForCartesianProduct(Types0: allFloatScalarsAndF16Vector2AndVector4s,
422 Types1: allPtrs);
423
424 getActionDefinitionsBuilder(Opcode: G_ATOMICRMW_XCHG)
425 .legalForCartesianProduct(Types0: allFloatAndIntScalarsAndPtrs, Types1: allPtrs);
426
427 getActionDefinitionsBuilder(Opcode: G_ATOMIC_CMPXCHG_WITH_SUCCESS).lower();
428 // TODO: add proper legalization rules.
429 getActionDefinitionsBuilder(Opcode: G_ATOMIC_CMPXCHG).alwaysLegal();
430
431 getActionDefinitionsBuilder(Opcodes: {G_UADDO, G_USUBO, G_UMULO, G_SMULO})
432 .alwaysLegal();
433
434 getActionDefinitionsBuilder(Opcodes: {G_SADDO, G_SSUBO}).lower();
435
436 getActionDefinitionsBuilder(Opcodes: {G_LROUND, G_LLROUND})
437 .legalForCartesianProduct(Types0: allFloatScalarsAndVectors,
438 Types1: allIntScalarsAndVectors);
439
440 // FP conversions.
441 getActionDefinitionsBuilder(Opcodes: {G_FPTRUNC, G_FPEXT})
442 .legalForCartesianProduct(Types: allFloatScalarsAndVectors);
443
444 // Pointer-handling.
445 getActionDefinitionsBuilder(Opcode: G_FRAME_INDEX).legalFor(Types: {p0});
446
447 getActionDefinitionsBuilder(Opcode: G_GLOBAL_VALUE).legalFor(Types: allPtrs);
448
449 // Control-flow. In some cases (e.g. constants) s1 may be promoted to s32.
450 getActionDefinitionsBuilder(Opcode: G_BRCOND).legalFor(Types: {s1, s32});
451
452 getActionDefinitionsBuilder(Opcode: G_FFREXP).legalForCartesianProduct(
453 Types0: allFloatScalarsAndVectors, Types1: {s32, v2s32, v3s32, v4s32, v8s32, v16s32});
454
455 // TODO: Review the target OpenCL and GLSL Extended Instruction Set specs to
456 // tighten these requirements. Many of these math functions are only legal on
457 // specific bitwidths, so they are not selectable for
458 // allFloatScalarsAndVectors.
459 // clang-format off
460 getActionDefinitionsBuilder(Opcodes: {G_STRICT_FSQRT,
461 G_FPOW,
462 G_FEXP,
463 G_FMODF,
464 G_FSINCOS,
465 G_FEXP2,
466 G_FEXP10,
467 G_FLOG,
468 G_FLOG2,
469 G_FLOG10,
470 G_FABS,
471 G_FMINNUM,
472 G_FMAXNUM,
473 G_FCEIL,
474 G_FCOS,
475 G_FSIN,
476 G_FTAN,
477 G_FACOS,
478 G_FASIN,
479 G_FATAN,
480 G_FATAN2,
481 G_FCOSH,
482 G_FSINH,
483 G_FTANH,
484 G_FSQRT,
485 G_FFLOOR,
486 G_FRINT,
487 G_FNEARBYINT,
488 G_INTRINSIC_ROUND,
489 G_INTRINSIC_TRUNC,
490 G_FMINIMUM,
491 G_FMAXIMUM,
492 G_INTRINSIC_ROUNDEVEN})
493 .legalFor(Types: allFloatScalarsAndVectors);
494 // clang-format on
495
496 getActionDefinitionsBuilder(Opcode: G_FCOPYSIGN)
497 .legalForCartesianProduct(Types0: allFloatScalarsAndVectors,
498 Types1: allFloatScalarsAndVectors);
499
500 getActionDefinitionsBuilder(Opcode: G_FPOWI).legalForCartesianProduct(
501 Types0: allFloatScalarsAndVectors, Types1: allIntScalarsAndVectors);
502
503 if (ST.canUseExtInstSet(E: SPIRV::InstructionSet::OpenCL_std)) {
504 getActionDefinitionsBuilder(
505 Opcodes: {G_CTTZ, G_CTTZ_ZERO_UNDEF, G_CTLZ, G_CTLZ_ZERO_UNDEF})
506 .legalForCartesianProduct(Types0: allIntScalarsAndVectors,
507 Types1: allIntScalarsAndVectors);
508
509 // Struct return types become a single scalar, so cannot easily legalize.
510 getActionDefinitionsBuilder(Opcodes: {G_SMULH, G_UMULH}).alwaysLegal();
511 }
512
513 getActionDefinitionsBuilder(Opcode: G_IS_FPCLASS).custom();
514
515 getLegacyLegalizerInfo().computeTables();
516 verify(MII: *ST.getInstrInfo());
517}
518
519static bool legalizeExtractVectorElt(LegalizerHelper &Helper, MachineInstr &MI,
520 SPIRVGlobalRegistry *GR) {
521 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
522 Register DstReg = MI.getOperand(i: 0).getReg();
523 Register SrcReg = MI.getOperand(i: 1).getReg();
524 Register IdxReg = MI.getOperand(i: 2).getReg();
525
526 MIRBuilder
527 .buildIntrinsic(ID: Intrinsic::spv_extractelt, Res: ArrayRef<Register>{DstReg})
528 .addUse(RegNo: SrcReg)
529 .addUse(RegNo: IdxReg);
530 MI.eraseFromParent();
531 return true;
532}
533
534static bool legalizeInsertVectorElt(LegalizerHelper &Helper, MachineInstr &MI,
535 SPIRVGlobalRegistry *GR) {
536 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
537 Register DstReg = MI.getOperand(i: 0).getReg();
538 Register SrcReg = MI.getOperand(i: 1).getReg();
539 Register ValReg = MI.getOperand(i: 2).getReg();
540 Register IdxReg = MI.getOperand(i: 3).getReg();
541
542 MIRBuilder
543 .buildIntrinsic(ID: Intrinsic::spv_insertelt, Res: ArrayRef<Register>{DstReg})
544 .addUse(RegNo: SrcReg)
545 .addUse(RegNo: ValReg)
546 .addUse(RegNo: IdxReg);
547 MI.eraseFromParent();
548 return true;
549}
550
551static Register convertPtrToInt(Register Reg, LLT ConvTy, SPIRVTypeInst SpvType,
552 LegalizerHelper &Helper,
553 MachineRegisterInfo &MRI,
554 SPIRVGlobalRegistry *GR) {
555 Register ConvReg = MRI.createGenericVirtualRegister(Ty: ConvTy);
556 MRI.setRegClass(Reg: ConvReg, RC: GR->getRegClass(SpvType));
557 GR->assignSPIRVTypeToVReg(Type: SpvType, VReg: ConvReg, MF: Helper.MIRBuilder.getMF());
558 Helper.MIRBuilder.buildInstr(Opcode: TargetOpcode::G_PTRTOINT)
559 .addDef(RegNo: ConvReg)
560 .addUse(RegNo: Reg);
561 return ConvReg;
562}
563
564static bool needsVectorLegalization(const LLT &Ty, const SPIRVSubtarget &ST) {
565 if (!Ty.isVector())
566 return false;
567 unsigned NumElements = Ty.getNumElements();
568 unsigned MaxVectorSize = ST.isShader() ? 4 : 16;
569 return (NumElements > 4 && !isPowerOf2_32(Value: NumElements)) ||
570 NumElements > MaxVectorSize;
571}
572
573static bool legalizeLoad(LegalizerHelper &Helper, MachineInstr &MI,
574 SPIRVGlobalRegistry *GR) {
575 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
576 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
577 Register DstReg = MI.getOperand(i: 0).getReg();
578 Register PtrReg = MI.getOperand(i: 1).getReg();
579 LLT DstTy = MRI.getType(Reg: DstReg);
580
581 if (!DstTy.isVector())
582 return true;
583
584 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
585 if (!needsVectorLegalization(Ty: DstTy, ST))
586 return true;
587
588 SmallVector<Register, 8> SplitRegs;
589 LLT EltTy = DstTy.getElementType();
590 unsigned NumElts = DstTy.getNumElements();
591
592 LLT PtrTy = MRI.getType(Reg: PtrReg);
593 auto Zero = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: 0);
594
595 for (unsigned i = 0; i < NumElts; ++i) {
596 auto Idx = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: i);
597 Register EltPtr = MRI.createGenericVirtualRegister(Ty: PtrTy);
598
599 MIRBuilder.buildIntrinsic(ID: Intrinsic::spv_gep, Res: ArrayRef<Register>{EltPtr})
600 .addImm(Val: 1) // InBounds
601 .addUse(RegNo: PtrReg)
602 .addUse(RegNo: Zero.getReg(Idx: 0))
603 .addUse(RegNo: Idx.getReg(Idx: 0));
604
605 MachinePointerInfo EltPtrInfo;
606 Align EltAlign = Align(1);
607 if (!MI.memoperands_empty()) {
608 MachineMemOperand *MMO = *MI.memoperands_begin();
609 EltPtrInfo =
610 MMO->getPointerInfo().getWithOffset(O: i * EltTy.getSizeInBytes());
611 EltAlign = commonAlignment(A: MMO->getAlign(), Offset: i * EltTy.getSizeInBytes());
612 }
613
614 Register EltReg = MRI.createGenericVirtualRegister(Ty: EltTy);
615 MIRBuilder.buildLoad(Res: EltReg, Addr: EltPtr, PtrInfo: EltPtrInfo, Alignment: EltAlign);
616 SplitRegs.push_back(Elt: EltReg);
617 }
618
619 MIRBuilder.buildBuildVector(Res: DstReg, Ops: SplitRegs);
620 MI.eraseFromParent();
621 return true;
622}
623
624static bool legalizeStore(LegalizerHelper &Helper, MachineInstr &MI,
625 SPIRVGlobalRegistry *GR) {
626 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
627 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
628 Register ValReg = MI.getOperand(i: 0).getReg();
629 Register PtrReg = MI.getOperand(i: 1).getReg();
630 LLT ValTy = MRI.getType(Reg: ValReg);
631
632 assert(ValTy.isVector() && "Expected vector store");
633
634 SmallVector<Register, 8> SplitRegs;
635 LLT EltTy = ValTy.getElementType();
636 unsigned NumElts = ValTy.getNumElements();
637
638 for (unsigned i = 0; i < NumElts; ++i)
639 SplitRegs.push_back(Elt: MRI.createGenericVirtualRegister(Ty: EltTy));
640
641 MIRBuilder.buildUnmerge(Res: SplitRegs, Op: ValReg);
642
643 LLT PtrTy = MRI.getType(Reg: PtrReg);
644 auto Zero = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: 0);
645
646 for (unsigned i = 0; i < NumElts; ++i) {
647 auto Idx = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: i);
648 Register EltPtr = MRI.createGenericVirtualRegister(Ty: PtrTy);
649
650 MIRBuilder.buildIntrinsic(ID: Intrinsic::spv_gep, Res: ArrayRef<Register>{EltPtr})
651 .addImm(Val: 1) // InBounds
652 .addUse(RegNo: PtrReg)
653 .addUse(RegNo: Zero.getReg(Idx: 0))
654 .addUse(RegNo: Idx.getReg(Idx: 0));
655
656 MachinePointerInfo EltPtrInfo;
657 Align EltAlign = Align(1);
658 if (!MI.memoperands_empty()) {
659 MachineMemOperand *MMO = *MI.memoperands_begin();
660 EltPtrInfo =
661 MMO->getPointerInfo().getWithOffset(O: i * EltTy.getSizeInBytes());
662 EltAlign = commonAlignment(A: MMO->getAlign(), Offset: i * EltTy.getSizeInBytes());
663 }
664
665 MIRBuilder.buildStore(Val: SplitRegs[i], Addr: EltPtr, PtrInfo: EltPtrInfo, Alignment: EltAlign);
666 }
667
668 MI.eraseFromParent();
669 return true;
670}
671
672bool SPIRVLegalizerInfo::legalizeCustom(
673 LegalizerHelper &Helper, MachineInstr &MI,
674 LostDebugLocObserver &LocObserver) const {
675 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
676 switch (MI.getOpcode()) {
677 default:
678 // TODO: implement legalization for other opcodes.
679 return true;
680 case TargetOpcode::G_BITCAST:
681 return legalizeBitcast(Helper, MI);
682 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
683 return legalizeExtractVectorElt(Helper, MI, GR);
684 case TargetOpcode::G_INSERT_VECTOR_ELT:
685 return legalizeInsertVectorElt(Helper, MI, GR);
686 case TargetOpcode::G_INTRINSIC:
687 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
688 return legalizeIntrinsic(Helper, MI);
689 case TargetOpcode::G_IS_FPCLASS:
690 return legalizeIsFPClass(Helper, MI, LocObserver);
691 case TargetOpcode::G_ICMP: {
692 auto &Op0 = MI.getOperand(i: 2);
693 auto &Op1 = MI.getOperand(i: 3);
694 Register Reg0 = Op0.getReg();
695 Register Reg1 = Op1.getReg();
696 CmpInst::Predicate Cond =
697 static_cast<CmpInst::Predicate>(MI.getOperand(i: 1).getPredicate());
698 if ((!ST->canDirectlyComparePointers() ||
699 (Cond != CmpInst::ICMP_EQ && Cond != CmpInst::ICMP_NE)) &&
700 MRI.getType(Reg: Reg0).isPointer() && MRI.getType(Reg: Reg1).isPointer()) {
701 LLT ConvT = LLT::scalar(SizeInBits: ST->getPointerSize());
702 Type *LLVMTy = IntegerType::get(C&: MI.getMF()->getFunction().getContext(),
703 NumBits: ST->getPointerSize());
704 SPIRVTypeInst SpirvTy = GR->getOrCreateSPIRVType(
705 Type: LLVMTy, MIRBuilder&: Helper.MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
706 Op0.setReg(convertPtrToInt(Reg: Reg0, ConvTy: ConvT, SpvType: SpirvTy, Helper, MRI, GR));
707 Op1.setReg(convertPtrToInt(Reg: Reg1, ConvTy: ConvT, SpvType: SpirvTy, Helper, MRI, GR));
708 }
709 return true;
710 }
711 case TargetOpcode::G_LOAD:
712 return legalizeLoad(Helper, MI, GR);
713 case TargetOpcode::G_STORE:
714 return legalizeStore(Helper, MI, GR);
715 }
716}
717
718static MachineInstrBuilder
719createStackTemporaryForVector(LegalizerHelper &Helper, SPIRVGlobalRegistry *GR,
720 Register SrcReg, LLT SrcTy,
721 MachinePointerInfo &PtrInfo, Align &VecAlign) {
722 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
723 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
724
725 VecAlign = Helper.getStackTemporaryAlignment(Type: SrcTy);
726 auto StackTemp = Helper.createStackTemporary(
727 Bytes: TypeSize::getFixed(ExactSize: SrcTy.getSizeInBytes()), Alignment: VecAlign, PtrInfo);
728
729 // Set the type of StackTemp to a pointer to an array of the element type.
730 SPIRVTypeInst SpvSrcTy = GR->getSPIRVTypeForVReg(VReg: SrcReg);
731 SPIRVTypeInst EltSpvTy = GR->getScalarOrVectorComponentType(Type: SpvSrcTy);
732 const Type *LLVMEltTy = GR->getTypeForSPIRVType(Ty: EltSpvTy);
733 const Type *LLVMArrTy =
734 ArrayType::get(ElementType: const_cast<Type *>(LLVMEltTy), NumElements: SrcTy.getNumElements());
735 SPIRVTypeInst ArrSpvTy = GR->getOrCreateSPIRVType(
736 Type: LLVMArrTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite, EmitIR: true);
737 SPIRVTypeInst PtrToArrSpvTy = GR->getOrCreateSPIRVPointerType(
738 BaseType: ArrSpvTy, MIRBuilder, SC: SPIRV::StorageClass::Function);
739
740 Register StackReg = StackTemp.getReg(Idx: 0);
741 MRI.setRegClass(Reg: StackReg, RC: GR->getRegClass(SpvType: PtrToArrSpvTy));
742 GR->assignSPIRVTypeToVReg(Type: PtrToArrSpvTy, VReg: StackReg, MF: MIRBuilder.getMF());
743
744 return StackTemp;
745}
746
747static bool legalizeSpvBitcast(LegalizerHelper &Helper, MachineInstr &MI,
748 SPIRVGlobalRegistry *GR) {
749 LLVM_DEBUG(dbgs() << "Found a bitcast instruction\n");
750 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
751 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
752 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
753
754 Register DstReg = MI.getOperand(i: 0).getReg();
755 Register SrcReg = MI.getOperand(i: 2).getReg();
756 LLT DstTy = MRI.getType(Reg: DstReg);
757 LLT SrcTy = MRI.getType(Reg: SrcReg);
758
759 // If an spv_bitcast needs to be legalized, we convert it to G_BITCAST to
760 // allow using the generic legalization rules.
761 if (needsVectorLegalization(Ty: DstTy, ST) ||
762 needsVectorLegalization(Ty: SrcTy, ST)) {
763 LLVM_DEBUG(dbgs() << "Replacing with a G_BITCAST\n");
764 MIRBuilder.buildBitcast(Dst: DstReg, Src: SrcReg);
765 MI.eraseFromParent();
766 }
767 return true;
768}
769
770static bool legalizeSpvInsertElt(LegalizerHelper &Helper, MachineInstr &MI,
771 SPIRVGlobalRegistry *GR) {
772 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
773 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
774 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
775
776 Register DstReg = MI.getOperand(i: 0).getReg();
777 LLT DstTy = MRI.getType(Reg: DstReg);
778
779 if (needsVectorLegalization(Ty: DstTy, ST)) {
780 Register SrcReg = MI.getOperand(i: 2).getReg();
781 Register ValReg = MI.getOperand(i: 3).getReg();
782 LLT SrcTy = MRI.getType(Reg: SrcReg);
783 MachineOperand &IdxOperand = MI.getOperand(i: 4);
784
785 if (getImm(MO: IdxOperand, MRI: &MRI)) {
786 uint64_t IdxVal = foldImm(MO: IdxOperand, MRI: &MRI);
787 if (IdxVal < SrcTy.getNumElements()) {
788 SmallVector<Register, 8> Regs;
789 SPIRVTypeInst ElementType =
790 GR->getScalarOrVectorComponentType(Type: GR->getSPIRVTypeForVReg(VReg: DstReg));
791 LLT ElementLLTTy = GR->getRegType(SpvType: ElementType);
792 for (unsigned I = 0, E = SrcTy.getNumElements(); I < E; ++I) {
793 Register Reg = MRI.createGenericVirtualRegister(Ty: ElementLLTTy);
794 MRI.setRegClass(Reg, RC: GR->getRegClass(SpvType: ElementType));
795 GR->assignSPIRVTypeToVReg(Type: ElementType, VReg: Reg, MF: *MI.getMF());
796 Regs.push_back(Elt: Reg);
797 }
798 MIRBuilder.buildUnmerge(Res: Regs, Op: SrcReg);
799 Regs[IdxVal] = ValReg;
800 MIRBuilder.buildBuildVector(Res: DstReg, Ops: Regs);
801 MI.eraseFromParent();
802 return true;
803 }
804 }
805
806 LLT EltTy = SrcTy.getElementType();
807 Align VecAlign;
808 MachinePointerInfo PtrInfo;
809 auto StackTemp = createStackTemporaryForVector(Helper, GR, SrcReg, SrcTy,
810 PtrInfo, VecAlign);
811
812 MIRBuilder.buildStore(Val: SrcReg, Addr: StackTemp, PtrInfo, Alignment: VecAlign);
813
814 Register IdxReg = IdxOperand.getReg();
815 LLT PtrTy = MRI.getType(Reg: StackTemp.getReg(Idx: 0));
816 Register EltPtr = MRI.createGenericVirtualRegister(Ty: PtrTy);
817 auto Zero = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: 0);
818
819 MIRBuilder.buildIntrinsic(ID: Intrinsic::spv_gep, Res: ArrayRef<Register>{EltPtr})
820 .addImm(Val: 1) // InBounds
821 .addUse(RegNo: StackTemp.getReg(Idx: 0))
822 .addUse(RegNo: Zero.getReg(Idx: 0))
823 .addUse(RegNo: IdxReg);
824
825 MachinePointerInfo EltPtrInfo = MachinePointerInfo(PtrTy.getAddressSpace());
826 Align EltAlign = Helper.getStackTemporaryAlignment(Type: EltTy);
827 MIRBuilder.buildStore(Val: ValReg, Addr: EltPtr, PtrInfo: EltPtrInfo, Alignment: EltAlign);
828
829 MIRBuilder.buildLoad(Res: DstReg, Addr: StackTemp, PtrInfo, Alignment: VecAlign);
830 MI.eraseFromParent();
831 return true;
832 }
833 return true;
834}
835
836static bool legalizeSpvExtractElt(LegalizerHelper &Helper, MachineInstr &MI,
837 SPIRVGlobalRegistry *GR) {
838 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
839 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
840 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
841
842 Register SrcReg = MI.getOperand(i: 2).getReg();
843 LLT SrcTy = MRI.getType(Reg: SrcReg);
844
845 if (needsVectorLegalization(Ty: SrcTy, ST)) {
846 Register DstReg = MI.getOperand(i: 0).getReg();
847 MachineOperand &IdxOperand = MI.getOperand(i: 3);
848
849 if (getImm(MO: IdxOperand, MRI: &MRI)) {
850 uint64_t IdxVal = foldImm(MO: IdxOperand, MRI: &MRI);
851 if (IdxVal < SrcTy.getNumElements()) {
852 LLT DstTy = MRI.getType(Reg: DstReg);
853 SmallVector<Register, 8> Regs;
854 SPIRVTypeInst DstSpvTy = GR->getSPIRVTypeForVReg(VReg: DstReg);
855 for (unsigned I = 0, E = SrcTy.getNumElements(); I < E; ++I) {
856 if (I == IdxVal) {
857 Regs.push_back(Elt: DstReg);
858 } else {
859 Register Reg = MRI.createGenericVirtualRegister(Ty: DstTy);
860 MRI.setRegClass(Reg, RC: GR->getRegClass(SpvType: DstSpvTy));
861 GR->assignSPIRVTypeToVReg(Type: DstSpvTy, VReg: Reg, MF: *MI.getMF());
862 Regs.push_back(Elt: Reg);
863 }
864 }
865 MIRBuilder.buildUnmerge(Res: Regs, Op: SrcReg);
866 MI.eraseFromParent();
867 return true;
868 }
869 }
870
871 LLT EltTy = SrcTy.getElementType();
872 Align VecAlign;
873 MachinePointerInfo PtrInfo;
874 auto StackTemp = createStackTemporaryForVector(Helper, GR, SrcReg, SrcTy,
875 PtrInfo, VecAlign);
876
877 MIRBuilder.buildStore(Val: SrcReg, Addr: StackTemp, PtrInfo, Alignment: VecAlign);
878
879 Register IdxReg = IdxOperand.getReg();
880 LLT PtrTy = MRI.getType(Reg: StackTemp.getReg(Idx: 0));
881 Register EltPtr = MRI.createGenericVirtualRegister(Ty: PtrTy);
882 auto Zero = MIRBuilder.buildConstant(Res: LLT::scalar(SizeInBits: 32), Val: 0);
883
884 MIRBuilder.buildIntrinsic(ID: Intrinsic::spv_gep, Res: ArrayRef<Register>{EltPtr})
885 .addImm(Val: 1) // InBounds
886 .addUse(RegNo: StackTemp.getReg(Idx: 0))
887 .addUse(RegNo: Zero.getReg(Idx: 0))
888 .addUse(RegNo: IdxReg);
889
890 MachinePointerInfo EltPtrInfo = MachinePointerInfo(PtrTy.getAddressSpace());
891 Align EltAlign = Helper.getStackTemporaryAlignment(Type: EltTy);
892 MIRBuilder.buildLoad(Res: DstReg, Addr: EltPtr, PtrInfo: EltPtrInfo, Alignment: EltAlign);
893
894 MI.eraseFromParent();
895 return true;
896 }
897 return true;
898}
899
900static bool legalizeSpvConstComposite(LegalizerHelper &Helper, MachineInstr &MI,
901 SPIRVGlobalRegistry *GR) {
902 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
903 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
904 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
905
906 Register DstReg = MI.getOperand(i: 0).getReg();
907 LLT DstTy = MRI.getType(Reg: DstReg);
908
909 if (!needsVectorLegalization(Ty: DstTy, ST))
910 return true;
911
912 SmallVector<Register, 8> SrcRegs;
913 if (MI.getNumOperands() == 2) {
914 // The "null" case: no values are attached.
915 LLT EltTy = DstTy.getElementType();
916 auto Zero = MIRBuilder.buildConstant(Res: EltTy, Val: 0);
917 SPIRVTypeInst SpvDstTy = GR->getSPIRVTypeForVReg(VReg: DstReg);
918 SPIRVTypeInst SpvEltTy = GR->getScalarOrVectorComponentType(Type: SpvDstTy);
919 GR->assignSPIRVTypeToVReg(Type: SpvEltTy, VReg: Zero.getReg(Idx: 0), MF: MIRBuilder.getMF());
920 for (unsigned i = 0; i < DstTy.getNumElements(); ++i)
921 SrcRegs.push_back(Elt: Zero.getReg(Idx: 0));
922 } else {
923 for (unsigned i = 2; i < MI.getNumOperands(); ++i) {
924 SrcRegs.push_back(Elt: MI.getOperand(i).getReg());
925 }
926 }
927 MIRBuilder.buildBuildVector(Res: DstReg, Ops: SrcRegs);
928 MI.eraseFromParent();
929 return true;
930}
931
932bool SPIRVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
933 MachineInstr &MI) const {
934 LLVM_DEBUG(dbgs() << "legalizeIntrinsic: " << MI);
935 auto IntrinsicID = cast<GIntrinsic>(Val&: MI).getIntrinsicID();
936 switch (IntrinsicID) {
937 case Intrinsic::spv_bitcast:
938 return legalizeSpvBitcast(Helper, MI, GR);
939 case Intrinsic::spv_insertelt:
940 return legalizeSpvInsertElt(Helper, MI, GR);
941 case Intrinsic::spv_extractelt:
942 return legalizeSpvExtractElt(Helper, MI, GR);
943 case Intrinsic::spv_const_composite:
944 return legalizeSpvConstComposite(Helper, MI, GR);
945 }
946 return true;
947}
948
949bool SPIRVLegalizerInfo::legalizeBitcast(LegalizerHelper &Helper,
950 MachineInstr &MI) const {
951 // Once the G_BITCAST is using vectors that are allowed, we turn it back into
952 // an spv_bitcast to avoid verifier problems when the register types are the
953 // same for the source and the result. Note that the SPIR-V types associated
954 // with the bitcast can be different even if the register types are the same.
955 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
956 Register DstReg = MI.getOperand(i: 0).getReg();
957 Register SrcReg = MI.getOperand(i: 1).getReg();
958 SmallVector<Register, 1> DstRegs = {DstReg};
959 MIRBuilder.buildIntrinsic(ID: Intrinsic::spv_bitcast, Res: DstRegs).addUse(RegNo: SrcReg);
960 MI.eraseFromParent();
961 return true;
962}
963
964// Note this code was copied from LegalizerHelper::lowerISFPCLASS and adjusted
965// to ensure that all instructions created during the lowering have SPIR-V types
966// assigned to them.
967bool SPIRVLegalizerInfo::legalizeIsFPClass(
968 LegalizerHelper &Helper, MachineInstr &MI,
969 LostDebugLocObserver &LocObserver) const {
970 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
971 FPClassTest Mask = static_cast<FPClassTest>(MI.getOperand(i: 2).getImm());
972
973 auto &MIRBuilder = Helper.MIRBuilder;
974 auto &MF = MIRBuilder.getMF();
975 MachineRegisterInfo &MRI = MF.getRegInfo();
976
977 Type *LLVMDstTy =
978 IntegerType::get(C&: MIRBuilder.getContext(), NumBits: DstTy.getScalarSizeInBits());
979 if (DstTy.isVector())
980 LLVMDstTy = VectorType::get(ElementType: LLVMDstTy, EC: DstTy.getElementCount());
981 SPIRVTypeInst SPIRVDstTy = GR->getOrCreateSPIRVType(
982 Type: LLVMDstTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite,
983 /*EmitIR*/ true);
984
985 unsigned BitSize = SrcTy.getScalarSizeInBits();
986 const fltSemantics &Semantics = getFltSemanticForLLT(Ty: SrcTy.getScalarType());
987
988 LLT IntTy = LLT::scalar(SizeInBits: BitSize);
989 Type *LLVMIntTy = IntegerType::get(C&: MIRBuilder.getContext(), NumBits: BitSize);
990 if (SrcTy.isVector()) {
991 IntTy = LLT::vector(EC: SrcTy.getElementCount(), ScalarTy: IntTy);
992 LLVMIntTy = VectorType::get(ElementType: LLVMIntTy, EC: SrcTy.getElementCount());
993 }
994 SPIRVTypeInst SPIRVIntTy = GR->getOrCreateSPIRVType(
995 Type: LLVMIntTy, MIRBuilder, AQ: SPIRV::AccessQualifier::ReadWrite,
996 /*EmitIR*/ true);
997
998 // Clang doesn't support capture of structured bindings:
999 LLT DstTyCopy = DstTy;
1000 const auto assignSPIRVTy = [&](MachineInstrBuilder &&MI) {
1001 // Assign this MI's (assumed only) destination to one of the two types we
1002 // expect: either the G_IS_FPCLASS's destination type, or the integer type
1003 // bitcast from the source type.
1004 LLT MITy = MRI.getType(Reg: MI.getReg(Idx: 0));
1005 assert((MITy == IntTy || MITy == DstTyCopy) &&
1006 "Unexpected LLT type while lowering G_IS_FPCLASS");
1007 SPIRVTypeInst SPVTy = MITy == IntTy ? SPIRVIntTy : SPIRVDstTy;
1008 GR->assignSPIRVTypeToVReg(Type: SPVTy, VReg: MI.getReg(Idx: 0), MF);
1009 return MI;
1010 };
1011
1012 // Helper to build and assign a constant in one go
1013 const auto buildSPIRVConstant = [&](LLT Ty, auto &&C) -> MachineInstrBuilder {
1014 if (!Ty.isFixedVector())
1015 return assignSPIRVTy(MIRBuilder.buildConstant(Ty, C));
1016 auto ScalarC = MIRBuilder.buildConstant(Ty.getScalarType(), C);
1017 assert((Ty == IntTy || Ty == DstTyCopy) &&
1018 "Unexpected LLT type while lowering constant for G_IS_FPCLASS");
1019 SPIRVTypeInst VecEltTy = GR->getOrCreateSPIRVType(
1020 Type: (Ty == IntTy ? LLVMIntTy : LLVMDstTy)->getScalarType(), MIRBuilder,
1021 AQ: SPIRV::AccessQualifier::ReadWrite,
1022 /*EmitIR*/ true);
1023 GR->assignSPIRVTypeToVReg(Type: VecEltTy, VReg: ScalarC.getReg(0), MF);
1024 return assignSPIRVTy(MIRBuilder.buildSplatBuildVector(Res: Ty, Src: ScalarC));
1025 };
1026
1027 if (Mask == fcNone) {
1028 MIRBuilder.buildCopy(Res: DstReg, Op: buildSPIRVConstant(DstTy, 0));
1029 MI.eraseFromParent();
1030 return true;
1031 }
1032 if (Mask == fcAllFlags) {
1033 MIRBuilder.buildCopy(Res: DstReg, Op: buildSPIRVConstant(DstTy, 1));
1034 MI.eraseFromParent();
1035 return true;
1036 }
1037
1038 // Note that rather than creating a COPY here (between a floating-point and
1039 // integer type of the same size) we create a SPIR-V bitcast immediately. We
1040 // can't create a G_BITCAST because the LLTs are the same, and we can't seem
1041 // to correctly lower COPYs to SPIR-V bitcasts at this moment.
1042 Register ResVReg = MRI.createGenericVirtualRegister(Ty: IntTy);
1043 MRI.setRegClass(Reg: ResVReg, RC: GR->getRegClass(SpvType: SPIRVIntTy));
1044 GR->assignSPIRVTypeToVReg(Type: SPIRVIntTy, VReg: ResVReg, MF: Helper.MIRBuilder.getMF());
1045 auto AsInt = MIRBuilder.buildInstr(Opcode: SPIRV::OpBitcast)
1046 .addDef(RegNo: ResVReg)
1047 .addUse(RegNo: GR->getSPIRVTypeID(SpirvType: SPIRVIntTy))
1048 .addUse(RegNo: SrcReg);
1049 AsInt = assignSPIRVTy(std::move(AsInt));
1050
1051 // Various masks.
1052 APInt SignBit = APInt::getSignMask(BitWidth: BitSize);
1053 APInt ValueMask = APInt::getSignedMaxValue(numBits: BitSize); // All bits but sign.
1054 APInt Inf = APFloat::getInf(Sem: Semantics).bitcastToAPInt(); // Exp and int bit.
1055 APInt ExpMask = Inf;
1056 APInt AllOneMantissa = APFloat::getLargest(Sem: Semantics).bitcastToAPInt() & ~Inf;
1057 APInt QNaNBitMask =
1058 APInt::getOneBitSet(numBits: BitSize, BitNo: AllOneMantissa.getActiveBits() - 1);
1059 APInt InversionMask = APInt::getAllOnes(numBits: DstTy.getScalarSizeInBits());
1060
1061 auto SignBitC = buildSPIRVConstant(IntTy, SignBit);
1062 auto ValueMaskC = buildSPIRVConstant(IntTy, ValueMask);
1063 auto InfC = buildSPIRVConstant(IntTy, Inf);
1064 auto ExpMaskC = buildSPIRVConstant(IntTy, ExpMask);
1065 auto ZeroC = buildSPIRVConstant(IntTy, 0);
1066
1067 auto Abs = assignSPIRVTy(MIRBuilder.buildAnd(Dst: IntTy, Src0: AsInt, Src1: ValueMaskC));
1068 auto Sign = assignSPIRVTy(
1069 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_NE, Res: DstTy, Op0: AsInt, Op1: Abs));
1070
1071 auto Res = buildSPIRVConstant(DstTy, 0);
1072
1073 const auto appendToRes = [&](MachineInstrBuilder &&ToAppend) {
1074 Res = assignSPIRVTy(
1075 MIRBuilder.buildOr(Dst: DstTyCopy, Src0: Res, Src1: assignSPIRVTy(std::move(ToAppend))));
1076 };
1077
1078 // Tests that involve more than one class should be processed first.
1079 if ((Mask & fcFinite) == fcFinite) {
1080 // finite(V) ==> abs(V) u< exp_mask
1081 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_ULT, Res: DstTy, Op0: Abs,
1082 Op1: ExpMaskC));
1083 Mask &= ~fcFinite;
1084 } else if ((Mask & fcFinite) == fcPosFinite) {
1085 // finite(V) && V > 0 ==> V u< exp_mask
1086 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_ULT, Res: DstTy, Op0: AsInt,
1087 Op1: ExpMaskC));
1088 Mask &= ~fcPosFinite;
1089 } else if ((Mask & fcFinite) == fcNegFinite) {
1090 // finite(V) && V < 0 ==> abs(V) u< exp_mask && signbit == 1
1091 auto Cmp = assignSPIRVTy(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_ULT,
1092 Res: DstTy, Op0: Abs, Op1: ExpMaskC));
1093 appendToRes(MIRBuilder.buildAnd(Dst: DstTy, Src0: Cmp, Src1: Sign));
1094 Mask &= ~fcNegFinite;
1095 }
1096
1097 if (FPClassTest PartialCheck = Mask & (fcZero | fcSubnormal)) {
1098 // fcZero | fcSubnormal => test all exponent bits are 0
1099 // TODO: Handle sign bit specific cases
1100 // TODO: Handle inverted case
1101 if (PartialCheck == (fcZero | fcSubnormal)) {
1102 auto ExpBits = assignSPIRVTy(MIRBuilder.buildAnd(Dst: IntTy, Src0: AsInt, Src1: ExpMaskC));
1103 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy,
1104 Op0: ExpBits, Op1: ZeroC));
1105 Mask &= ~PartialCheck;
1106 }
1107 }
1108
1109 // Check for individual classes.
1110 if (FPClassTest PartialCheck = Mask & fcZero) {
1111 if (PartialCheck == fcPosZero)
1112 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy,
1113 Op0: AsInt, Op1: ZeroC));
1114 else if (PartialCheck == fcZero)
1115 appendToRes(
1116 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy, Op0: Abs, Op1: ZeroC));
1117 else // fcNegZero
1118 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy,
1119 Op0: AsInt, Op1: SignBitC));
1120 }
1121
1122 if (FPClassTest PartialCheck = Mask & fcSubnormal) {
1123 // issubnormal(V) ==> unsigned(abs(V) - 1) u< (all mantissa bits set)
1124 // issubnormal(V) && V>0 ==> unsigned(V - 1) u< (all mantissa bits set)
1125 auto V = (PartialCheck == fcPosSubnormal) ? AsInt : Abs;
1126 auto OneC = buildSPIRVConstant(IntTy, 1);
1127 auto VMinusOne = MIRBuilder.buildSub(Dst: IntTy, Src0: V, Src1: OneC);
1128 auto SubnormalRes = assignSPIRVTy(
1129 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_ULT, Res: DstTy, Op0: VMinusOne,
1130 Op1: buildSPIRVConstant(IntTy, AllOneMantissa)));
1131 if (PartialCheck == fcNegSubnormal)
1132 SubnormalRes = MIRBuilder.buildAnd(Dst: DstTy, Src0: SubnormalRes, Src1: Sign);
1133 appendToRes(std::move(SubnormalRes));
1134 }
1135
1136 if (FPClassTest PartialCheck = Mask & fcInf) {
1137 if (PartialCheck == fcPosInf)
1138 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy,
1139 Op0: AsInt, Op1: InfC));
1140 else if (PartialCheck == fcInf)
1141 appendToRes(
1142 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy, Op0: Abs, Op1: InfC));
1143 else { // fcNegInf
1144 APInt NegInf = APFloat::getInf(Sem: Semantics, Negative: true).bitcastToAPInt();
1145 auto NegInfC = buildSPIRVConstant(IntTy, NegInf);
1146 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: DstTy,
1147 Op0: AsInt, Op1: NegInfC));
1148 }
1149 }
1150
1151 if (FPClassTest PartialCheck = Mask & fcNan) {
1152 auto InfWithQnanBitC =
1153 buildSPIRVConstant(IntTy, std::move(Inf) | QNaNBitMask);
1154 if (PartialCheck == fcNan) {
1155 // isnan(V) ==> abs(V) u> int(inf)
1156 appendToRes(
1157 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_UGT, Res: DstTy, Op0: Abs, Op1: InfC));
1158 } else if (PartialCheck == fcQNan) {
1159 // isquiet(V) ==> abs(V) u>= (unsigned(Inf) | quiet_bit)
1160 appendToRes(MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_UGE, Res: DstTy, Op0: Abs,
1161 Op1: InfWithQnanBitC));
1162 } else { // fcSNan
1163 // issignaling(V) ==> abs(V) u> unsigned(Inf) &&
1164 // abs(V) u< (unsigned(Inf) | quiet_bit)
1165 auto IsNan = assignSPIRVTy(
1166 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_UGT, Res: DstTy, Op0: Abs, Op1: InfC));
1167 auto IsNotQnan = assignSPIRVTy(MIRBuilder.buildICmp(
1168 Pred: CmpInst::Predicate::ICMP_ULT, Res: DstTy, Op0: Abs, Op1: InfWithQnanBitC));
1169 appendToRes(MIRBuilder.buildAnd(Dst: DstTy, Src0: IsNan, Src1: IsNotQnan));
1170 }
1171 }
1172
1173 if (FPClassTest PartialCheck = Mask & fcNormal) {
1174 // isnormal(V) ==> (0 u< exp u< max_exp) ==> (unsigned(exp-1) u<
1175 // (max_exp-1))
1176 APInt ExpLSB = ExpMask & ~(ExpMask.shl(shiftAmt: 1));
1177 auto ExpMinusOne = assignSPIRVTy(
1178 MIRBuilder.buildSub(Dst: IntTy, Src0: Abs, Src1: buildSPIRVConstant(IntTy, ExpLSB)));
1179 APInt MaxExpMinusOne = std::move(ExpMask) - ExpLSB;
1180 auto NormalRes = assignSPIRVTy(
1181 MIRBuilder.buildICmp(Pred: CmpInst::Predicate::ICMP_ULT, Res: DstTy, Op0: ExpMinusOne,
1182 Op1: buildSPIRVConstant(IntTy, MaxExpMinusOne)));
1183 if (PartialCheck == fcNegNormal)
1184 NormalRes = MIRBuilder.buildAnd(Dst: DstTy, Src0: NormalRes, Src1: Sign);
1185 else if (PartialCheck == fcPosNormal) {
1186 auto PosSign = assignSPIRVTy(MIRBuilder.buildXor(
1187 Dst: DstTy, Src0: Sign, Src1: buildSPIRVConstant(DstTy, InversionMask)));
1188 NormalRes = MIRBuilder.buildAnd(Dst: DstTy, Src0: NormalRes, Src1: PosSign);
1189 }
1190 appendToRes(std::move(NormalRes));
1191 }
1192
1193 MIRBuilder.buildCopy(Res: DstReg, Op: Res);
1194 MI.eraseFromParent();
1195 return true;
1196}
1197