1 | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Builtin calls as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "ABIInfo.h" |
14 | #include "CGCUDARuntime.h" |
15 | #include "CGCXXABI.h" |
16 | #include "CGHLSLRuntime.h" |
17 | #include "CGObjCRuntime.h" |
18 | #include "CGOpenCLRuntime.h" |
19 | #include "CGRecordLayout.h" |
20 | #include "CodeGenFunction.h" |
21 | #include "CodeGenModule.h" |
22 | #include "ConstantEmitter.h" |
23 | #include "PatternInit.h" |
24 | #include "TargetInfo.h" |
25 | #include "clang/AST/ASTContext.h" |
26 | #include "clang/AST/Attr.h" |
27 | #include "clang/AST/Decl.h" |
28 | #include "clang/AST/OSLog.h" |
29 | #include "clang/AST/OperationKinds.h" |
30 | #include "clang/Basic/TargetBuiltins.h" |
31 | #include "clang/Basic/TargetInfo.h" |
32 | #include "clang/Basic/TargetOptions.h" |
33 | #include "clang/CodeGen/CGFunctionInfo.h" |
34 | #include "clang/Frontend/FrontendDiagnostic.h" |
35 | #include "llvm/ADT/APFloat.h" |
36 | #include "llvm/ADT/APInt.h" |
37 | #include "llvm/ADT/FloatingPointMode.h" |
38 | #include "llvm/ADT/SmallPtrSet.h" |
39 | #include "llvm/ADT/StringExtras.h" |
40 | #include "llvm/Analysis/ValueTracking.h" |
41 | #include "llvm/IR/DataLayout.h" |
42 | #include "llvm/IR/InlineAsm.h" |
43 | #include "llvm/IR/Intrinsics.h" |
44 | #include "llvm/IR/IntrinsicsAArch64.h" |
45 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
46 | #include "llvm/IR/IntrinsicsARM.h" |
47 | #include "llvm/IR/IntrinsicsBPF.h" |
48 | #include "llvm/IR/IntrinsicsDirectX.h" |
49 | #include "llvm/IR/IntrinsicsHexagon.h" |
50 | #include "llvm/IR/IntrinsicsNVPTX.h" |
51 | #include "llvm/IR/IntrinsicsPowerPC.h" |
52 | #include "llvm/IR/IntrinsicsR600.h" |
53 | #include "llvm/IR/IntrinsicsRISCV.h" |
54 | #include "llvm/IR/IntrinsicsS390.h" |
55 | #include "llvm/IR/IntrinsicsVE.h" |
56 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
57 | #include "llvm/IR/IntrinsicsX86.h" |
58 | #include "llvm/IR/MDBuilder.h" |
59 | #include "llvm/IR/MatrixBuilder.h" |
60 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
61 | #include "llvm/Support/ConvertUTF.h" |
62 | #include "llvm/Support/MathExtras.h" |
63 | #include "llvm/Support/ScopedPrinter.h" |
64 | #include "llvm/TargetParser/AArch64TargetParser.h" |
65 | #include "llvm/TargetParser/X86TargetParser.h" |
66 | #include <optional> |
67 | #include <sstream> |
68 | |
69 | using namespace clang; |
70 | using namespace CodeGen; |
71 | using namespace llvm; |
72 | |
73 | static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, |
74 | Align AlignmentInBytes) { |
75 | ConstantInt *Byte; |
76 | switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
77 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
78 | // Nothing to initialize. |
79 | return; |
80 | case LangOptions::TrivialAutoVarInitKind::Zero: |
81 | Byte = CGF.Builder.getInt8(C: 0x00); |
82 | break; |
83 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
84 | llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(C&: CGF.CGM.getLLVMContext()); |
85 | Byte = llvm::dyn_cast<llvm::ConstantInt>( |
86 | Val: initializationPatternFor(CGF.CGM, Int8)); |
87 | break; |
88 | } |
89 | } |
90 | if (CGF.CGM.stopAutoInit()) |
91 | return; |
92 | auto *I = CGF.Builder.CreateMemSet(Ptr: AI, Val: Byte, Size, Align: AlignmentInBytes); |
93 | I->addAnnotationMetadata(Annotation: "auto-init" ); |
94 | } |
95 | |
96 | /// getBuiltinLibFunction - Given a builtin id for a function like |
97 | /// "__builtin_fabsf", return a Function* for "fabsf". |
98 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
99 | unsigned BuiltinID) { |
100 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); |
101 | |
102 | // Get the name, skip over the __builtin_ prefix (if necessary). |
103 | StringRef Name; |
104 | GlobalDecl D(FD); |
105 | |
106 | // TODO: This list should be expanded or refactored after all GCC-compatible |
107 | // std libcall builtins are implemented. |
108 | static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{ |
109 | {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128" }, |
110 | {Builtin::BI__builtin___printf_chk, "__printf_chkieee128" }, |
111 | {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128" }, |
112 | {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128" }, |
113 | {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128" }, |
114 | {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128" }, |
115 | {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128" }, |
116 | {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128" }, |
117 | {Builtin::BI__builtin_fprintf, "__fprintfieee128" }, |
118 | {Builtin::BI__builtin_printf, "__printfieee128" }, |
119 | {Builtin::BI__builtin_snprintf, "__snprintfieee128" }, |
120 | {Builtin::BI__builtin_sprintf, "__sprintfieee128" }, |
121 | {Builtin::BI__builtin_vfprintf, "__vfprintfieee128" }, |
122 | {Builtin::BI__builtin_vprintf, "__vprintfieee128" }, |
123 | {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128" }, |
124 | {Builtin::BI__builtin_vsprintf, "__vsprintfieee128" }, |
125 | {Builtin::BI__builtin_fscanf, "__fscanfieee128" }, |
126 | {Builtin::BI__builtin_scanf, "__scanfieee128" }, |
127 | {Builtin::BI__builtin_sscanf, "__sscanfieee128" }, |
128 | {Builtin::BI__builtin_vfscanf, "__vfscanfieee128" }, |
129 | {Builtin::BI__builtin_vscanf, "__vscanfieee128" }, |
130 | {Builtin::BI__builtin_vsscanf, "__vsscanfieee128" }, |
131 | {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128" }, |
132 | }; |
133 | |
134 | // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit |
135 | // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions |
136 | // if it is 64-bit 'long double' mode. |
137 | static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{ |
138 | {Builtin::BI__builtin_frexpl, "frexp" }, |
139 | {Builtin::BI__builtin_ldexpl, "ldexp" }, |
140 | {Builtin::BI__builtin_modfl, "modf" }, |
141 | }; |
142 | |
143 | // If the builtin has been declared explicitly with an assembler label, |
144 | // use the mangled name. This differs from the plain label on platforms |
145 | // that prefix labels. |
146 | if (FD->hasAttr<AsmLabelAttr>()) |
147 | Name = getMangledName(GD: D); |
148 | else { |
149 | // TODO: This mutation should also be applied to other targets other than |
150 | // PPC, after backend supports IEEE 128-bit style libcalls. |
151 | if (getTriple().isPPC64() && |
152 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() && |
153 | F128Builtins.contains(Val: BuiltinID)) |
154 | Name = F128Builtins[BuiltinID]; |
155 | else if (getTriple().isOSAIX() && |
156 | &getTarget().getLongDoubleFormat() == |
157 | &llvm::APFloat::IEEEdouble() && |
158 | AIXLongDouble64Builtins.contains(Val: BuiltinID)) |
159 | Name = AIXLongDouble64Builtins[BuiltinID]; |
160 | else |
161 | Name = Context.BuiltinInfo.getName(ID: BuiltinID).substr(Start: 10); |
162 | } |
163 | |
164 | llvm::FunctionType *Ty = |
165 | cast<llvm::FunctionType>(Val: getTypes().ConvertType(T: FD->getType())); |
166 | |
167 | return GetOrCreateLLVMFunction(MangledName: Name, Ty, D, /*ForVTable=*/false); |
168 | } |
169 | |
170 | /// Emit the conversions required to turn the given value into an |
171 | /// integer of the given size. |
172 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
173 | QualType T, llvm::IntegerType *IntType) { |
174 | V = CGF.EmitToMemory(Value: V, Ty: T); |
175 | |
176 | if (V->getType()->isPointerTy()) |
177 | return CGF.Builder.CreatePtrToInt(V, DestTy: IntType); |
178 | |
179 | assert(V->getType() == IntType); |
180 | return V; |
181 | } |
182 | |
183 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
184 | QualType T, llvm::Type *ResultType) { |
185 | V = CGF.EmitFromMemory(Value: V, Ty: T); |
186 | |
187 | if (ResultType->isPointerTy()) |
188 | return CGF.Builder.CreateIntToPtr(V, DestTy: ResultType); |
189 | |
190 | assert(V->getType() == ResultType); |
191 | return V; |
192 | } |
193 | |
194 | static Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E) { |
195 | ASTContext &Ctx = CGF.getContext(); |
196 | Address Ptr = CGF.EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
197 | unsigned Bytes = Ptr.getElementType()->isPointerTy() |
198 | ? Ctx.getTypeSizeInChars(T: Ctx.VoidPtrTy).getQuantity() |
199 | : Ptr.getElementType()->getScalarSizeInBits() / 8; |
200 | unsigned Align = Ptr.getAlignment().getQuantity(); |
201 | if (Align % Bytes != 0) { |
202 | DiagnosticsEngine &Diags = CGF.CGM.getDiags(); |
203 | Diags.Report(Loc: E->getBeginLoc(), DiagID: diag::warn_sync_op_misaligned); |
204 | // Force address to be at least naturally-aligned. |
205 | return Ptr.withAlignment(NewAlignment: CharUnits::fromQuantity(Quantity: Bytes)); |
206 | } |
207 | return Ptr; |
208 | } |
209 | |
210 | /// Utility to insert an atomic instruction based on Intrinsic::ID |
211 | /// and the expression node. |
212 | static Value *MakeBinaryAtomicValue( |
213 | CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
214 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
215 | |
216 | QualType T = E->getType(); |
217 | assert(E->getArg(0)->getType()->isPointerType()); |
218 | assert(CGF.getContext().hasSameUnqualifiedType(T, |
219 | E->getArg(0)->getType()->getPointeeType())); |
220 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
221 | |
222 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
223 | |
224 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
225 | C&: CGF.getLLVMContext(), NumBits: CGF.getContext().getTypeSize(T)); |
226 | |
227 | llvm::Value *Val = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
228 | llvm::Type *ValueType = Val->getType(); |
229 | Val = EmitToInt(CGF, V: Val, T, IntType); |
230 | |
231 | llvm::Value *Result = |
232 | CGF.Builder.CreateAtomicRMW(Op: Kind, Addr: DestAddr, Val, Ordering); |
233 | return EmitFromInt(CGF, V: Result, T, ResultType: ValueType); |
234 | } |
235 | |
236 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
237 | Value *Val = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
238 | Address Addr = CGF.EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
239 | |
240 | Val = CGF.EmitToMemory(Value: Val, Ty: E->getArg(Arg: 0)->getType()); |
241 | LValue LV = CGF.MakeAddrLValue(Addr, T: E->getArg(Arg: 0)->getType()); |
242 | LV.setNontemporal(true); |
243 | CGF.EmitStoreOfScalar(value: Val, lvalue: LV, isInit: false); |
244 | return nullptr; |
245 | } |
246 | |
247 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
248 | Address Addr = CGF.EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
249 | |
250 | LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType()); |
251 | LV.setNontemporal(true); |
252 | return CGF.EmitLoadOfScalar(lvalue: LV, Loc: E->getExprLoc()); |
253 | } |
254 | |
255 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
256 | llvm::AtomicRMWInst::BinOp Kind, |
257 | const CallExpr *E) { |
258 | return RValue::get(V: MakeBinaryAtomicValue(CGF, Kind, E)); |
259 | } |
260 | |
261 | /// Utility to insert an atomic instruction based Intrinsic::ID and |
262 | /// the expression node, where the return value is the result of the |
263 | /// operation. |
264 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
265 | llvm::AtomicRMWInst::BinOp Kind, |
266 | const CallExpr *E, |
267 | Instruction::BinaryOps Op, |
268 | bool Invert = false) { |
269 | QualType T = E->getType(); |
270 | assert(E->getArg(0)->getType()->isPointerType()); |
271 | assert(CGF.getContext().hasSameUnqualifiedType(T, |
272 | E->getArg(0)->getType()->getPointeeType())); |
273 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
274 | |
275 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
276 | |
277 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
278 | C&: CGF.getLLVMContext(), NumBits: CGF.getContext().getTypeSize(T)); |
279 | |
280 | llvm::Value *Val = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
281 | llvm::Type *ValueType = Val->getType(); |
282 | Val = EmitToInt(CGF, V: Val, T, IntType); |
283 | |
284 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
285 | Op: Kind, Addr: DestAddr, Val, Ordering: llvm::AtomicOrdering::SequentiallyConsistent); |
286 | Result = CGF.Builder.CreateBinOp(Opc: Op, LHS: Result, RHS: Val); |
287 | if (Invert) |
288 | Result = |
289 | CGF.Builder.CreateBinOp(Opc: llvm::Instruction::Xor, LHS: Result, |
290 | RHS: llvm::ConstantInt::getAllOnesValue(Ty: IntType)); |
291 | Result = EmitFromInt(CGF, V: Result, T, ResultType: ValueType); |
292 | return RValue::get(V: Result); |
293 | } |
294 | |
295 | /// Utility to insert an atomic cmpxchg instruction. |
296 | /// |
297 | /// @param CGF The current codegen function. |
298 | /// @param E Builtin call expression to convert to cmpxchg. |
299 | /// arg0 - address to operate on |
300 | /// arg1 - value to compare with |
301 | /// arg2 - new value |
302 | /// @param ReturnBool Specifies whether to return success flag of |
303 | /// cmpxchg result or the old value. |
304 | /// |
305 | /// @returns result of cmpxchg, according to ReturnBool |
306 | /// |
307 | /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics |
308 | /// invoke the function EmitAtomicCmpXchgForMSIntrin. |
309 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
310 | bool ReturnBool) { |
311 | QualType T = ReturnBool ? E->getArg(Arg: 1)->getType() : E->getType(); |
312 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
313 | |
314 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
315 | C&: CGF.getLLVMContext(), NumBits: CGF.getContext().getTypeSize(T)); |
316 | |
317 | Value *Cmp = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
318 | llvm::Type *ValueType = Cmp->getType(); |
319 | Cmp = EmitToInt(CGF, V: Cmp, T, IntType); |
320 | Value *New = EmitToInt(CGF, V: CGF.EmitScalarExpr(E: E->getArg(Arg: 2)), T, IntType); |
321 | |
322 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
323 | Addr: DestAddr, Cmp, New, SuccessOrdering: llvm::AtomicOrdering::SequentiallyConsistent, |
324 | FailureOrdering: llvm::AtomicOrdering::SequentiallyConsistent); |
325 | if (ReturnBool) |
326 | // Extract boolean success flag and zext it to int. |
327 | return CGF.Builder.CreateZExt(V: CGF.Builder.CreateExtractValue(Agg: Pair, Idxs: 1), |
328 | DestTy: CGF.ConvertType(T: E->getType())); |
329 | else |
330 | // Extract old value and emit it using the same type as compare value. |
331 | return EmitFromInt(CGF, V: CGF.Builder.CreateExtractValue(Agg: Pair, Idxs: 0), T, |
332 | ResultType: ValueType); |
333 | } |
334 | |
335 | /// This function should be invoked to emit atomic cmpxchg for Microsoft's |
336 | /// _InterlockedCompareExchange* intrinsics which have the following signature: |
337 | /// T _InterlockedCompareExchange(T volatile *Destination, |
338 | /// T Exchange, |
339 | /// T Comparand); |
340 | /// |
341 | /// Whereas the llvm 'cmpxchg' instruction has the following syntax: |
342 | /// cmpxchg *Destination, Comparand, Exchange. |
343 | /// So we need to swap Comparand and Exchange when invoking |
344 | /// CreateAtomicCmpXchg. That is the reason we could not use the above utility |
345 | /// function MakeAtomicCmpXchgValue since it expects the arguments to be |
346 | /// already swapped. |
347 | |
348 | static |
349 | Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
350 | AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
351 | assert(E->getArg(0)->getType()->isPointerType()); |
352 | assert(CGF.getContext().hasSameUnqualifiedType( |
353 | E->getType(), E->getArg(0)->getType()->getPointeeType())); |
354 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
355 | E->getArg(1)->getType())); |
356 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
357 | E->getArg(2)->getType())); |
358 | |
359 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
360 | |
361 | auto *Comparand = CGF.EmitScalarExpr(E: E->getArg(Arg: 2)); |
362 | auto *Exchange = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
363 | |
364 | // For Release ordering, the failure ordering should be Monotonic. |
365 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
366 | AtomicOrdering::Monotonic : |
367 | SuccessOrdering; |
368 | |
369 | // The atomic instruction is marked volatile for consistency with MSVC. This |
370 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
371 | // _Interlocked* operations in the future, we will have to remove the volatile |
372 | // marker. |
373 | auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
374 | Addr: DestAddr, Cmp: Comparand, New: Exchange, SuccessOrdering, FailureOrdering); |
375 | Result->setVolatile(true); |
376 | return CGF.Builder.CreateExtractValue(Agg: Result, Idxs: 0); |
377 | } |
378 | |
379 | // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are |
380 | // prototyped like this: |
381 | // |
382 | // unsigned char _InterlockedCompareExchange128...( |
383 | // __int64 volatile * _Destination, |
384 | // __int64 _ExchangeHigh, |
385 | // __int64 _ExchangeLow, |
386 | // __int64 * _ComparandResult); |
387 | // |
388 | // Note that Destination is assumed to be at least 16-byte aligned, despite |
389 | // being typed int64. |
390 | |
391 | static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, |
392 | const CallExpr *E, |
393 | AtomicOrdering SuccessOrdering) { |
394 | assert(E->getNumArgs() == 4); |
395 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
396 | llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
397 | llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E: E->getArg(Arg: 2)); |
398 | Address ComparandAddr = CGF.EmitPointerWithAlignment(Addr: E->getArg(Arg: 3)); |
399 | |
400 | assert(DestPtr->getType()->isPointerTy()); |
401 | assert(!ExchangeHigh->getType()->isPointerTy()); |
402 | assert(!ExchangeLow->getType()->isPointerTy()); |
403 | |
404 | // For Release ordering, the failure ordering should be Monotonic. |
405 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release |
406 | ? AtomicOrdering::Monotonic |
407 | : SuccessOrdering; |
408 | |
409 | // Convert to i128 pointers and values. Alignment is also overridden for |
410 | // destination pointer. |
411 | llvm::Type *Int128Ty = llvm::IntegerType::get(C&: CGF.getLLVMContext(), NumBits: 128); |
412 | Address DestAddr(DestPtr, Int128Ty, |
413 | CGF.getContext().toCharUnitsFromBits(BitSize: 128)); |
414 | ComparandAddr = ComparandAddr.withElementType(ElemTy: Int128Ty); |
415 | |
416 | // (((i128)hi) << 64) | ((i128)lo) |
417 | ExchangeHigh = CGF.Builder.CreateZExt(V: ExchangeHigh, DestTy: Int128Ty); |
418 | ExchangeLow = CGF.Builder.CreateZExt(V: ExchangeLow, DestTy: Int128Ty); |
419 | ExchangeHigh = |
420 | CGF.Builder.CreateShl(LHS: ExchangeHigh, RHS: llvm::ConstantInt::get(Ty: Int128Ty, V: 64)); |
421 | llvm::Value *Exchange = CGF.Builder.CreateOr(LHS: ExchangeHigh, RHS: ExchangeLow); |
422 | |
423 | // Load the comparand for the instruction. |
424 | llvm::Value *Comparand = CGF.Builder.CreateLoad(Addr: ComparandAddr); |
425 | |
426 | auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Addr: DestAddr, Cmp: Comparand, New: Exchange, |
427 | SuccessOrdering, FailureOrdering); |
428 | |
429 | // The atomic instruction is marked volatile for consistency with MSVC. This |
430 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
431 | // _Interlocked* operations in the future, we will have to remove the volatile |
432 | // marker. |
433 | CXI->setVolatile(true); |
434 | |
435 | // Store the result as an outparameter. |
436 | CGF.Builder.CreateStore(Val: CGF.Builder.CreateExtractValue(Agg: CXI, Idxs: 0), |
437 | Addr: ComparandAddr); |
438 | |
439 | // Get the success boolean and zero extend it to i8. |
440 | Value *Success = CGF.Builder.CreateExtractValue(Agg: CXI, Idxs: 1); |
441 | return CGF.Builder.CreateZExt(V: Success, DestTy: CGF.Int8Ty); |
442 | } |
443 | |
444 | static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
445 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
446 | assert(E->getArg(0)->getType()->isPointerType()); |
447 | |
448 | auto *IntTy = CGF.ConvertType(T: E->getType()); |
449 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
450 | auto *Result = CGF.Builder.CreateAtomicRMW( |
451 | Op: AtomicRMWInst::Add, Addr: DestAddr, Val: ConstantInt::get(Ty: IntTy, V: 1), Ordering); |
452 | return CGF.Builder.CreateAdd(LHS: Result, RHS: ConstantInt::get(Ty: IntTy, V: 1)); |
453 | } |
454 | |
455 | static Value *EmitAtomicDecrementValue( |
456 | CodeGenFunction &CGF, const CallExpr *E, |
457 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
458 | assert(E->getArg(0)->getType()->isPointerType()); |
459 | |
460 | auto *IntTy = CGF.ConvertType(T: E->getType()); |
461 | Address DestAddr = CheckAtomicAlignment(CGF, E); |
462 | auto *Result = CGF.Builder.CreateAtomicRMW( |
463 | Op: AtomicRMWInst::Sub, Addr: DestAddr, Val: ConstantInt::get(Ty: IntTy, V: 1), Ordering); |
464 | return CGF.Builder.CreateSub(LHS: Result, RHS: ConstantInt::get(Ty: IntTy, V: 1)); |
465 | } |
466 | |
467 | // Build a plain volatile load. |
468 | static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
469 | Value *Ptr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
470 | QualType ElTy = E->getArg(Arg: 0)->getType()->getPointeeType(); |
471 | CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(T: ElTy); |
472 | llvm::Type *ITy = |
473 | llvm::IntegerType::get(C&: CGF.getLLVMContext(), NumBits: LoadSize.getQuantity() * 8); |
474 | llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ty: ITy, Addr: Ptr, Align: LoadSize); |
475 | Load->setVolatile(true); |
476 | return Load; |
477 | } |
478 | |
479 | // Build a plain volatile store. |
480 | static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
481 | Value *Ptr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
482 | Value *Value = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
483 | QualType ElTy = E->getArg(Arg: 0)->getType()->getPointeeType(); |
484 | CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(T: ElTy); |
485 | llvm::StoreInst *Store = |
486 | CGF.Builder.CreateAlignedStore(Val: Value, Addr: Ptr, Align: StoreSize); |
487 | Store->setVolatile(true); |
488 | return Store; |
489 | } |
490 | |
491 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
492 | // matching the argument type. Depending on mode, this may be a constrained |
493 | // floating-point intrinsic. |
494 | static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
495 | const CallExpr *E, unsigned IntrinsicID, |
496 | unsigned ConstrainedIntrinsicID) { |
497 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
498 | |
499 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
500 | if (CGF.Builder.getIsFPConstrained()) { |
501 | Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, Tys: Src0->getType()); |
502 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: { Src0 }); |
503 | } else { |
504 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Src0->getType()); |
505 | return CGF.Builder.CreateCall(Callee: F, Args: Src0); |
506 | } |
507 | } |
508 | |
509 | // Emit an intrinsic that has 2 operands of the same type as its result. |
510 | // Depending on mode, this may be a constrained floating-point intrinsic. |
511 | static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
512 | const CallExpr *E, unsigned IntrinsicID, |
513 | unsigned ConstrainedIntrinsicID) { |
514 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
515 | llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
516 | |
517 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
518 | if (CGF.Builder.getIsFPConstrained()) { |
519 | Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, Tys: Src0->getType()); |
520 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: { Src0, Src1 }); |
521 | } else { |
522 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Src0->getType()); |
523 | return CGF.Builder.CreateCall(Callee: F, Args: { Src0, Src1 }); |
524 | } |
525 | } |
526 | |
527 | // Has second type mangled argument. |
528 | static Value *emitBinaryExpMaybeConstrainedFPBuiltin( |
529 | CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID, |
530 | llvm::Intrinsic::ID ConstrainedIntrinsicID) { |
531 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
532 | llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
533 | |
534 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
535 | if (CGF.Builder.getIsFPConstrained()) { |
536 | Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, |
537 | Tys: {Src0->getType(), Src1->getType()}); |
538 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: {Src0, Src1}); |
539 | } |
540 | |
541 | Function *F = |
542 | CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {Src0->getType(), Src1->getType()}); |
543 | return CGF.Builder.CreateCall(Callee: F, Args: {Src0, Src1}); |
544 | } |
545 | |
546 | // Emit an intrinsic that has 3 operands of the same type as its result. |
547 | // Depending on mode, this may be a constrained floating-point intrinsic. |
548 | static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
549 | const CallExpr *E, unsigned IntrinsicID, |
550 | unsigned ConstrainedIntrinsicID) { |
551 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
552 | llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
553 | llvm::Value *Src2 = CGF.EmitScalarExpr(E: E->getArg(Arg: 2)); |
554 | |
555 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
556 | if (CGF.Builder.getIsFPConstrained()) { |
557 | Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, Tys: Src0->getType()); |
558 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: { Src0, Src1, Src2 }); |
559 | } else { |
560 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Src0->getType()); |
561 | return CGF.Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 }); |
562 | } |
563 | } |
564 | |
565 | // Emit an intrinsic where all operands are of the same type as the result. |
566 | // Depending on mode, this may be a constrained floating-point intrinsic. |
567 | static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
568 | unsigned IntrinsicID, |
569 | unsigned ConstrainedIntrinsicID, |
570 | llvm::Type *Ty, |
571 | ArrayRef<Value *> Args) { |
572 | Function *F; |
573 | if (CGF.Builder.getIsFPConstrained()) |
574 | F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, Tys: Ty); |
575 | else |
576 | F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Ty); |
577 | |
578 | if (CGF.Builder.getIsFPConstrained()) |
579 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args); |
580 | else |
581 | return CGF.Builder.CreateCall(Callee: F, Args); |
582 | } |
583 | |
584 | // Emit a simple intrinsic that has N scalar arguments and a return type |
585 | // matching the argument type. It is assumed that only the first argument is |
586 | // overloaded. |
587 | template <unsigned N> |
588 | Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF, const CallExpr *E, |
589 | unsigned IntrinsicID, |
590 | llvm::StringRef Name = "" ) { |
591 | static_assert(N, "expect non-empty argument" ); |
592 | SmallVector<Value *, N> Args; |
593 | for (unsigned I = 0; I < N; ++I) |
594 | Args.push_back(CGF.EmitScalarExpr(E: E->getArg(Arg: I))); |
595 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Args[0]->getType()); |
596 | return CGF.Builder.CreateCall(F, Args, Name); |
597 | } |
598 | |
599 | // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
600 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
601 | const CallExpr *E, |
602 | unsigned IntrinsicID) { |
603 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
604 | llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
605 | |
606 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Src0->getType()); |
607 | return CGF.Builder.CreateCall(Callee: F, Args: {Src0, Src1}); |
608 | } |
609 | |
610 | // Emit an intrinsic that has overloaded integer result and fp operand. |
611 | static Value * |
612 | emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
613 | unsigned IntrinsicID, |
614 | unsigned ConstrainedIntrinsicID) { |
615 | llvm::Type *ResultType = CGF.ConvertType(T: E->getType()); |
616 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
617 | |
618 | if (CGF.Builder.getIsFPConstrained()) { |
619 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
620 | Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID, |
621 | Tys: {ResultType, Src0->getType()}); |
622 | return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: {Src0}); |
623 | } else { |
624 | Function *F = |
625 | CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {ResultType, Src0->getType()}); |
626 | return CGF.Builder.CreateCall(Callee: F, Args: Src0); |
627 | } |
628 | } |
629 | |
630 | static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
631 | llvm::Intrinsic::ID IntrinsicID) { |
632 | llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
633 | llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
634 | |
635 | QualType IntPtrTy = E->getArg(Arg: 1)->getType()->getPointeeType(); |
636 | llvm::Type *IntTy = CGF.ConvertType(T: IntPtrTy); |
637 | llvm::Function *F = |
638 | CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {Src0->getType(), IntTy}); |
639 | llvm::Value *Call = CGF.Builder.CreateCall(Callee: F, Args: Src0); |
640 | |
641 | llvm::Value *Exp = CGF.Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
642 | LValue LV = CGF.MakeNaturalAlignAddrLValue(V: Src1, T: IntPtrTy); |
643 | CGF.EmitStoreOfScalar(value: Exp, lvalue: LV); |
644 | |
645 | return CGF.Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
646 | } |
647 | |
648 | /// EmitFAbs - Emit a call to @llvm.fabs(). |
649 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
650 | Function *F = CGF.CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: V->getType()); |
651 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: F, Args: V); |
652 | Call->setDoesNotAccessMemory(); |
653 | return Call; |
654 | } |
655 | |
656 | /// Emit the computation of the sign bit for a floating point value. Returns |
657 | /// the i1 sign bit value. |
658 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
659 | LLVMContext &C = CGF.CGM.getLLVMContext(); |
660 | |
661 | llvm::Type *Ty = V->getType(); |
662 | int Width = Ty->getPrimitiveSizeInBits(); |
663 | llvm::Type *IntTy = llvm::IntegerType::get(C, NumBits: Width); |
664 | V = CGF.Builder.CreateBitCast(V, DestTy: IntTy); |
665 | if (Ty->isPPC_FP128Ty()) { |
666 | // We want the sign bit of the higher-order double. The bitcast we just |
667 | // did works as if the double-double was stored to memory and then |
668 | // read as an i128. The "store" will put the higher-order double in the |
669 | // lower address in both little- and big-Endian modes, but the "load" |
670 | // will treat those bits as a different part of the i128: the low bits in |
671 | // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
672 | // we need to shift the high bits down to the low before truncating. |
673 | Width >>= 1; |
674 | if (CGF.getTarget().isBigEndian()) { |
675 | Value *ShiftCst = llvm::ConstantInt::get(Ty: IntTy, V: Width); |
676 | V = CGF.Builder.CreateLShr(LHS: V, RHS: ShiftCst); |
677 | } |
678 | // We are truncating value in order to extract the higher-order |
679 | // double, which we will be using to extract the sign from. |
680 | IntTy = llvm::IntegerType::get(C, NumBits: Width); |
681 | V = CGF.Builder.CreateTrunc(V, DestTy: IntTy); |
682 | } |
683 | Value *Zero = llvm::Constant::getNullValue(Ty: IntTy); |
684 | return CGF.Builder.CreateICmpSLT(LHS: V, RHS: Zero); |
685 | } |
686 | |
687 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
688 | const CallExpr *E, llvm::Constant *calleeValue) { |
689 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
690 | CGCallee callee = CGCallee::forDirect(functionPtr: calleeValue, abstractInfo: GlobalDecl(FD)); |
691 | RValue Call = |
692 | CGF.EmitCall(FnType: E->getCallee()->getType(), Callee: callee, E, ReturnValue: ReturnValueSlot()); |
693 | |
694 | // Check the supported intrinsic. |
695 | if (unsigned BuiltinID = FD->getBuiltinID()) { |
696 | auto IsErrnoIntrinsic = [&]() -> unsigned { |
697 | switch (BuiltinID) { |
698 | case Builtin::BIexpf: |
699 | case Builtin::BI__builtin_expf: |
700 | case Builtin::BI__builtin_expf128: |
701 | return true; |
702 | } |
703 | // TODO: support more FP math libcalls |
704 | return false; |
705 | }(); |
706 | |
707 | // Restrict to target with errno, for example, MacOS doesn't set errno. |
708 | if (IsErrnoIntrinsic && CGF.CGM.getLangOpts().MathErrno && |
709 | !CGF.Builder.getIsFPConstrained()) { |
710 | ASTContext &Context = CGF.getContext(); |
711 | // Emit "int" TBAA metadata on FP math libcalls. |
712 | clang::QualType IntTy = Context.IntTy; |
713 | TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(AccessType: IntTy); |
714 | Instruction *Inst = cast<llvm::Instruction>(Val: Call.getScalarVal()); |
715 | CGF.CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo); |
716 | } |
717 | } |
718 | return Call; |
719 | } |
720 | |
721 | /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
722 | /// depending on IntrinsicID. |
723 | /// |
724 | /// \arg CGF The current codegen function. |
725 | /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
726 | /// \arg X The first argument to the llvm.*.with.overflow.*. |
727 | /// \arg Y The second argument to the llvm.*.with.overflow.*. |
728 | /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
729 | /// \returns The result (i.e. sum/product) returned by the intrinsic. |
730 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
731 | const llvm::Intrinsic::ID IntrinsicID, |
732 | llvm::Value *X, llvm::Value *Y, |
733 | llvm::Value *&Carry) { |
734 | // Make sure we have integers of the same width. |
735 | assert(X->getType() == Y->getType() && |
736 | "Arguments must be the same type. (Did you forget to make sure both " |
737 | "arguments have the same integer width?)" ); |
738 | |
739 | Function *Callee = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: X->getType()); |
740 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, Args: {X, Y}); |
741 | Carry = CGF.Builder.CreateExtractValue(Agg: Tmp, Idxs: 1); |
742 | return CGF.Builder.CreateExtractValue(Agg: Tmp, Idxs: 0); |
743 | } |
744 | |
745 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID, |
746 | int low, int high) { |
747 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {}); |
748 | llvm::CallInst *Call = CGF.Builder.CreateCall(Callee: F); |
749 | llvm::ConstantRange CR(APInt(32, low), APInt(32, high)); |
750 | Call->addRangeRetAttr(CR); |
751 | Call->addRetAttr(Kind: llvm::Attribute::AttrKind::NoUndef); |
752 | return Call; |
753 | } |
754 | |
755 | namespace { |
756 | struct WidthAndSignedness { |
757 | unsigned Width; |
758 | bool Signed; |
759 | }; |
760 | } |
761 | |
762 | static WidthAndSignedness |
763 | getIntegerWidthAndSignedness(const clang::ASTContext &context, |
764 | const clang::QualType Type) { |
765 | assert(Type->isIntegerType() && "Given type is not an integer." ); |
766 | unsigned Width = Type->isBooleanType() ? 1 |
767 | : Type->isBitIntType() ? context.getIntWidth(T: Type) |
768 | : context.getTypeInfo(T: Type).Width; |
769 | bool Signed = Type->isSignedIntegerType(); |
770 | return {.Width: Width, .Signed: Signed}; |
771 | } |
772 | |
773 | // Given one or more integer types, this function produces an integer type that |
774 | // encompasses them: any value in one of the given types could be expressed in |
775 | // the encompassing type. |
776 | static struct WidthAndSignedness |
777 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
778 | assert(Types.size() > 0 && "Empty list of types." ); |
779 | |
780 | // If any of the given types is signed, we must return a signed type. |
781 | bool Signed = false; |
782 | for (const auto &Type : Types) { |
783 | Signed |= Type.Signed; |
784 | } |
785 | |
786 | // The encompassing type must have a width greater than or equal to the width |
787 | // of the specified types. Additionally, if the encompassing type is signed, |
788 | // its width must be strictly greater than the width of any unsigned types |
789 | // given. |
790 | unsigned Width = 0; |
791 | for (const auto &Type : Types) { |
792 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
793 | if (Width < MinWidth) { |
794 | Width = MinWidth; |
795 | } |
796 | } |
797 | |
798 | return {.Width: Width, .Signed: Signed}; |
799 | } |
800 | |
801 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
802 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
803 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: inst, Tys: {ArgValue->getType()}), |
804 | Args: ArgValue); |
805 | } |
806 | |
807 | /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
808 | /// __builtin_object_size(p, @p To) is correct |
809 | static bool areBOSTypesCompatible(int From, int To) { |
810 | // Note: Our __builtin_object_size implementation currently treats Type=0 and |
811 | // Type=2 identically. Encoding this implementation detail here may make |
812 | // improving __builtin_object_size difficult in the future, so it's omitted. |
813 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
814 | } |
815 | |
816 | static llvm::Value * |
817 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
818 | return ConstantInt::get(Ty: ResType, V: (Type & 2) ? 0 : -1, /*isSigned=*/IsSigned: true); |
819 | } |
820 | |
821 | llvm::Value * |
822 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
823 | llvm::IntegerType *ResType, |
824 | llvm::Value *EmittedE, |
825 | bool IsDynamic) { |
826 | uint64_t ObjectSize; |
827 | if (!E->tryEvaluateObjectSize(Result&: ObjectSize, Ctx&: getContext(), Type)) |
828 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
829 | return ConstantInt::get(Ty: ResType, V: ObjectSize, /*isSigned=*/IsSigned: true); |
830 | } |
831 | |
832 | const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberFieldAndOffset( |
833 | ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl, |
834 | uint64_t &Offset) { |
835 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
836 | getLangOpts().getStrictFlexArraysLevel(); |
837 | uint32_t FieldNo = 0; |
838 | |
839 | if (RD->isImplicit()) |
840 | return nullptr; |
841 | |
842 | for (const FieldDecl *FD : RD->fields()) { |
843 | if ((!FAMDecl || FD == FAMDecl) && |
844 | Decl::isFlexibleArrayMemberLike( |
845 | Context&: Ctx, D: FD, Ty: FD->getType(), StrictFlexArraysLevel, |
846 | /*IgnoreTemplateOrMacroSubstitution=*/true)) { |
847 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
848 | Offset += Layout.getFieldOffset(FieldNo); |
849 | return FD; |
850 | } |
851 | |
852 | QualType Ty = FD->getType(); |
853 | if (Ty->isRecordType()) { |
854 | if (const FieldDecl *Field = FindFlexibleArrayMemberFieldAndOffset( |
855 | Ctx, RD: Ty->getAsRecordDecl(), FAMDecl, Offset)) { |
856 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: RD); |
857 | Offset += Layout.getFieldOffset(FieldNo); |
858 | return Field; |
859 | } |
860 | } |
861 | |
862 | if (!RD->isUnion()) |
863 | ++FieldNo; |
864 | } |
865 | |
866 | return nullptr; |
867 | } |
868 | |
869 | static unsigned CountCountedByAttrs(const RecordDecl *RD) { |
870 | unsigned Num = 0; |
871 | |
872 | for (const FieldDecl *FD : RD->fields()) { |
873 | if (FD->getType()->isCountAttributedType()) |
874 | return ++Num; |
875 | |
876 | QualType Ty = FD->getType(); |
877 | if (Ty->isRecordType()) |
878 | Num += CountCountedByAttrs(RD: Ty->getAsRecordDecl()); |
879 | } |
880 | |
881 | return Num; |
882 | } |
883 | |
884 | llvm::Value * |
885 | CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type, |
886 | llvm::IntegerType *ResType) { |
887 | // The code generated here calculates the size of a struct with a flexible |
888 | // array member that uses the counted_by attribute. There are two instances |
889 | // we handle: |
890 | // |
891 | // struct s { |
892 | // unsigned long flags; |
893 | // int count; |
894 | // int array[] __attribute__((counted_by(count))); |
895 | // } |
896 | // |
897 | // 1) bdos of the flexible array itself: |
898 | // |
899 | // __builtin_dynamic_object_size(p->array, 1) == |
900 | // p->count * sizeof(*p->array) |
901 | // |
902 | // 2) bdos of a pointer into the flexible array: |
903 | // |
904 | // __builtin_dynamic_object_size(&p->array[42], 1) == |
905 | // (p->count - 42) * sizeof(*p->array) |
906 | // |
907 | // 2) bdos of the whole struct, including the flexible array: |
908 | // |
909 | // __builtin_dynamic_object_size(p, 1) == |
910 | // max(sizeof(struct s), |
911 | // offsetof(struct s, array) + p->count * sizeof(*p->array)) |
912 | // |
913 | ASTContext &Ctx = getContext(); |
914 | const Expr *Base = E->IgnoreParenImpCasts(); |
915 | const Expr *Idx = nullptr; |
916 | |
917 | if (const auto *UO = dyn_cast<UnaryOperator>(Val: Base); |
918 | UO && UO->getOpcode() == UO_AddrOf) { |
919 | Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts(); |
920 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: SubExpr)) { |
921 | Base = ASE->getBase()->IgnoreParenImpCasts(); |
922 | Idx = ASE->getIdx()->IgnoreParenImpCasts(); |
923 | |
924 | if (const auto *IL = dyn_cast<IntegerLiteral>(Val: Idx)) { |
925 | int64_t Val = IL->getValue().getSExtValue(); |
926 | if (Val < 0) |
927 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
928 | |
929 | if (Val == 0) |
930 | // The index is 0, so we don't need to take it into account. |
931 | Idx = nullptr; |
932 | } |
933 | } else { |
934 | // Potential pointer to another element in the struct. |
935 | Base = SubExpr; |
936 | } |
937 | } |
938 | |
939 | // Get the flexible array member Decl. |
940 | const RecordDecl *OuterRD = nullptr; |
941 | const FieldDecl *FAMDecl = nullptr; |
942 | if (const auto *ME = dyn_cast<MemberExpr>(Val: Base)) { |
943 | // Check if \p Base is referencing the FAM itself. |
944 | const ValueDecl *VD = ME->getMemberDecl(); |
945 | OuterRD = VD->getDeclContext()->getOuterLexicalRecordContext(); |
946 | FAMDecl = dyn_cast<FieldDecl>(Val: VD); |
947 | if (!FAMDecl) |
948 | return nullptr; |
949 | } else if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Base)) { |
950 | // Check if we're pointing to the whole struct. |
951 | QualType Ty = DRE->getDecl()->getType(); |
952 | if (Ty->isPointerType()) |
953 | Ty = Ty->getPointeeType(); |
954 | OuterRD = Ty->getAsRecordDecl(); |
955 | |
956 | // If we have a situation like this: |
957 | // |
958 | // struct union_of_fams { |
959 | // int flags; |
960 | // union { |
961 | // signed char normal_field; |
962 | // struct { |
963 | // int count1; |
964 | // int arr1[] __counted_by(count1); |
965 | // }; |
966 | // struct { |
967 | // signed char count2; |
968 | // int arr2[] __counted_by(count2); |
969 | // }; |
970 | // }; |
971 | // }; |
972 | // |
973 | // We don't know which 'count' to use in this scenario: |
974 | // |
975 | // size_t get_size(struct union_of_fams *p) { |
976 | // return __builtin_dynamic_object_size(p, 1); |
977 | // } |
978 | // |
979 | // Instead of calculating a wrong number, we give up. |
980 | if (OuterRD && CountCountedByAttrs(RD: OuterRD) > 1) |
981 | return nullptr; |
982 | } |
983 | |
984 | if (!OuterRD) |
985 | return nullptr; |
986 | |
987 | // We call FindFlexibleArrayMemberAndOffset even if FAMDecl is non-null to |
988 | // get its offset. |
989 | uint64_t Offset = 0; |
990 | FAMDecl = |
991 | FindFlexibleArrayMemberFieldAndOffset(Ctx, RD: OuterRD, FAMDecl, Offset); |
992 | Offset = Ctx.toCharUnitsFromBits(BitSize: Offset).getQuantity(); |
993 | |
994 | if (!FAMDecl || !FAMDecl->getType()->isCountAttributedType()) |
995 | // No flexible array member found or it doesn't have the "counted_by" |
996 | // attribute. |
997 | return nullptr; |
998 | |
999 | const FieldDecl *CountedByFD = FindCountedByField(FD: FAMDecl); |
1000 | if (!CountedByFD) |
1001 | // Can't find the field referenced by the "counted_by" attribute. |
1002 | return nullptr; |
1003 | |
1004 | // Build a load of the counted_by field. |
1005 | bool IsSigned = CountedByFD->getType()->isSignedIntegerType(); |
1006 | Value *CountedByInst = EmitCountedByFieldExpr(Base, FAMDecl, CountDecl: CountedByFD); |
1007 | if (!CountedByInst) |
1008 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
1009 | |
1010 | CountedByInst = Builder.CreateIntCast(V: CountedByInst, DestTy: ResType, isSigned: IsSigned); |
1011 | |
1012 | // Build a load of the index and subtract it from the count. |
1013 | Value *IdxInst = nullptr; |
1014 | if (Idx) { |
1015 | if (Idx->HasSideEffects(Ctx: getContext())) |
1016 | // We can't have side-effects. |
1017 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
1018 | |
1019 | bool IdxSigned = Idx->getType()->isSignedIntegerType(); |
1020 | IdxInst = EmitAnyExprToTemp(E: Idx).getScalarVal(); |
1021 | IdxInst = Builder.CreateIntCast(V: IdxInst, DestTy: ResType, isSigned: IdxSigned); |
1022 | |
1023 | // We go ahead with the calculation here. If the index turns out to be |
1024 | // negative, we'll catch it at the end. |
1025 | CountedByInst = |
1026 | Builder.CreateSub(LHS: CountedByInst, RHS: IdxInst, Name: "" , HasNUW: !IsSigned, HasNSW: IsSigned); |
1027 | } |
1028 | |
1029 | // Calculate how large the flexible array member is in bytes. |
1030 | const ArrayType *ArrayTy = Ctx.getAsArrayType(T: FAMDecl->getType()); |
1031 | CharUnits Size = Ctx.getTypeSizeInChars(T: ArrayTy->getElementType()); |
1032 | llvm::Constant *ElemSize = |
1033 | llvm::ConstantInt::get(Ty: ResType, V: Size.getQuantity(), IsSigned); |
1034 | Value *FAMSize = |
1035 | Builder.CreateMul(LHS: CountedByInst, RHS: ElemSize, Name: "" , HasNUW: !IsSigned, HasNSW: IsSigned); |
1036 | FAMSize = Builder.CreateIntCast(V: FAMSize, DestTy: ResType, isSigned: IsSigned); |
1037 | Value *Res = FAMSize; |
1038 | |
1039 | if (isa<DeclRefExpr>(Val: Base)) { |
1040 | // The whole struct is specificed in the __bdos. |
1041 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: OuterRD); |
1042 | |
1043 | // Get the offset of the FAM. |
1044 | llvm::Constant *FAMOffset = ConstantInt::get(Ty: ResType, V: Offset, IsSigned); |
1045 | Value *OffsetAndFAMSize = |
1046 | Builder.CreateAdd(LHS: FAMOffset, RHS: Res, Name: "" , HasNUW: !IsSigned, HasNSW: IsSigned); |
1047 | |
1048 | // Get the full size of the struct. |
1049 | llvm::Constant *SizeofStruct = |
1050 | ConstantInt::get(Ty: ResType, V: Layout.getSize().getQuantity(), IsSigned); |
1051 | |
1052 | // max(sizeof(struct s), |
1053 | // offsetof(struct s, array) + p->count * sizeof(*p->array)) |
1054 | Res = IsSigned |
1055 | ? Builder.CreateBinaryIntrinsic(ID: llvm::Intrinsic::smax, |
1056 | LHS: OffsetAndFAMSize, RHS: SizeofStruct) |
1057 | : Builder.CreateBinaryIntrinsic(ID: llvm::Intrinsic::umax, |
1058 | LHS: OffsetAndFAMSize, RHS: SizeofStruct); |
1059 | } |
1060 | |
1061 | // A negative \p IdxInst or \p CountedByInst means that the index lands |
1062 | // outside of the flexible array member. If that's the case, we want to |
1063 | // return 0. |
1064 | Value *Cmp = Builder.CreateIsNotNeg(Arg: CountedByInst); |
1065 | if (IdxInst) |
1066 | Cmp = Builder.CreateAnd(LHS: Builder.CreateIsNotNeg(Arg: IdxInst), RHS: Cmp); |
1067 | |
1068 | return Builder.CreateSelect(C: Cmp, True: Res, False: ConstantInt::get(Ty: ResType, V: 0, IsSigned)); |
1069 | } |
1070 | |
1071 | /// Returns a Value corresponding to the size of the given expression. |
1072 | /// This Value may be either of the following: |
1073 | /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
1074 | /// it) |
1075 | /// - A call to the @llvm.objectsize intrinsic |
1076 | /// |
1077 | /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
1078 | /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
1079 | /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
1080 | llvm::Value * |
1081 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
1082 | llvm::IntegerType *ResType, |
1083 | llvm::Value *EmittedE, bool IsDynamic) { |
1084 | // We need to reference an argument if the pointer is a parameter with the |
1085 | // pass_object_size attribute. |
1086 | if (auto *D = dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts())) { |
1087 | auto *Param = dyn_cast<ParmVarDecl>(Val: D->getDecl()); |
1088 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
1089 | if (Param != nullptr && PS != nullptr && |
1090 | areBOSTypesCompatible(From: PS->getType(), To: Type)) { |
1091 | auto Iter = SizeArguments.find(Val: Param); |
1092 | assert(Iter != SizeArguments.end()); |
1093 | |
1094 | const ImplicitParamDecl *D = Iter->second; |
1095 | auto DIter = LocalDeclMap.find(Val: D); |
1096 | assert(DIter != LocalDeclMap.end()); |
1097 | |
1098 | return EmitLoadOfScalar(Addr: DIter->second, /*Volatile=*/false, |
1099 | Ty: getContext().getSizeType(), Loc: E->getBeginLoc()); |
1100 | } |
1101 | } |
1102 | |
1103 | if (IsDynamic) { |
1104 | // Emit special code for a flexible array member with the "counted_by" |
1105 | // attribute. |
1106 | if (Value *V = emitFlexibleArrayMemberSize(E, Type, ResType)) |
1107 | return V; |
1108 | } |
1109 | |
1110 | // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
1111 | // evaluate E for side-effects. In either case, we shouldn't lower to |
1112 | // @llvm.objectsize. |
1113 | if (Type == 3 || (!EmittedE && E->HasSideEffects(Ctx: getContext()))) |
1114 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
1115 | |
1116 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
1117 | assert(Ptr->getType()->isPointerTy() && |
1118 | "Non-pointer passed to __builtin_object_size?" ); |
1119 | |
1120 | Function *F = |
1121 | CGM.getIntrinsic(IID: Intrinsic::objectsize, Tys: {ResType, Ptr->getType()}); |
1122 | |
1123 | // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
1124 | Value *Min = Builder.getInt1(V: (Type & 2) != 0); |
1125 | // For GCC compatibility, __builtin_object_size treat NULL as unknown size. |
1126 | Value *NullIsUnknown = Builder.getTrue(); |
1127 | Value *Dynamic = Builder.getInt1(V: IsDynamic); |
1128 | return Builder.CreateCall(Callee: F, Args: {Ptr, Min, NullIsUnknown, Dynamic}); |
1129 | } |
1130 | |
1131 | namespace { |
1132 | /// A struct to generically describe a bit test intrinsic. |
1133 | struct BitTest { |
1134 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
1135 | enum InterlockingKind : uint8_t { |
1136 | Unlocked, |
1137 | Sequential, |
1138 | Acquire, |
1139 | Release, |
1140 | NoFence |
1141 | }; |
1142 | |
1143 | ActionKind Action; |
1144 | InterlockingKind Interlocking; |
1145 | bool Is64Bit; |
1146 | |
1147 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
1148 | }; |
1149 | |
1150 | } // namespace |
1151 | |
1152 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
1153 | switch (BuiltinID) { |
1154 | // Main portable variants. |
1155 | case Builtin::BI_bittest: |
1156 | return {.Action: TestOnly, .Interlocking: Unlocked, .Is64Bit: false}; |
1157 | case Builtin::BI_bittestandcomplement: |
1158 | return {.Action: Complement, .Interlocking: Unlocked, .Is64Bit: false}; |
1159 | case Builtin::BI_bittestandreset: |
1160 | return {.Action: Reset, .Interlocking: Unlocked, .Is64Bit: false}; |
1161 | case Builtin::BI_bittestandset: |
1162 | return {.Action: Set, .Interlocking: Unlocked, .Is64Bit: false}; |
1163 | case Builtin::BI_interlockedbittestandreset: |
1164 | return {.Action: Reset, .Interlocking: Sequential, .Is64Bit: false}; |
1165 | case Builtin::BI_interlockedbittestandset: |
1166 | return {.Action: Set, .Interlocking: Sequential, .Is64Bit: false}; |
1167 | |
1168 | // X86-specific 64-bit variants. |
1169 | case Builtin::BI_bittest64: |
1170 | return {.Action: TestOnly, .Interlocking: Unlocked, .Is64Bit: true}; |
1171 | case Builtin::BI_bittestandcomplement64: |
1172 | return {.Action: Complement, .Interlocking: Unlocked, .Is64Bit: true}; |
1173 | case Builtin::BI_bittestandreset64: |
1174 | return {.Action: Reset, .Interlocking: Unlocked, .Is64Bit: true}; |
1175 | case Builtin::BI_bittestandset64: |
1176 | return {.Action: Set, .Interlocking: Unlocked, .Is64Bit: true}; |
1177 | case Builtin::BI_interlockedbittestandreset64: |
1178 | return {.Action: Reset, .Interlocking: Sequential, .Is64Bit: true}; |
1179 | case Builtin::BI_interlockedbittestandset64: |
1180 | return {.Action: Set, .Interlocking: Sequential, .Is64Bit: true}; |
1181 | |
1182 | // ARM/AArch64-specific ordering variants. |
1183 | case Builtin::BI_interlockedbittestandset_acq: |
1184 | return {.Action: Set, .Interlocking: Acquire, .Is64Bit: false}; |
1185 | case Builtin::BI_interlockedbittestandset_rel: |
1186 | return {.Action: Set, .Interlocking: Release, .Is64Bit: false}; |
1187 | case Builtin::BI_interlockedbittestandset_nf: |
1188 | return {.Action: Set, .Interlocking: NoFence, .Is64Bit: false}; |
1189 | case Builtin::BI_interlockedbittestandreset_acq: |
1190 | return {.Action: Reset, .Interlocking: Acquire, .Is64Bit: false}; |
1191 | case Builtin::BI_interlockedbittestandreset_rel: |
1192 | return {.Action: Reset, .Interlocking: Release, .Is64Bit: false}; |
1193 | case Builtin::BI_interlockedbittestandreset_nf: |
1194 | return {.Action: Reset, .Interlocking: NoFence, .Is64Bit: false}; |
1195 | } |
1196 | llvm_unreachable("expected only bittest intrinsics" ); |
1197 | } |
1198 | |
1199 | static char bitActionToX86BTCode(BitTest::ActionKind A) { |
1200 | switch (A) { |
1201 | case BitTest::TestOnly: return '\0'; |
1202 | case BitTest::Complement: return 'c'; |
1203 | case BitTest::Reset: return 'r'; |
1204 | case BitTest::Set: return 's'; |
1205 | } |
1206 | llvm_unreachable("invalid action" ); |
1207 | } |
1208 | |
1209 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
1210 | BitTest BT, |
1211 | const CallExpr *E, Value *BitBase, |
1212 | Value *BitPos) { |
1213 | char Action = bitActionToX86BTCode(A: BT.Action); |
1214 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
1215 | |
1216 | // Build the assembly. |
1217 | SmallString<64> Asm; |
1218 | raw_svector_ostream AsmOS(Asm); |
1219 | if (BT.Interlocking != BitTest::Unlocked) |
1220 | AsmOS << "lock " ; |
1221 | AsmOS << "bt" ; |
1222 | if (Action) |
1223 | AsmOS << Action; |
1224 | AsmOS << SizeSuffix << " $2, ($1)" ; |
1225 | |
1226 | // Build the constraints. FIXME: We should support immediates when possible. |
1227 | std::string Constraints = "={@ccc},r,r,~{cc},~{memory}" ; |
1228 | std::string_view MachineClobbers = CGF.getTarget().getClobbers(); |
1229 | if (!MachineClobbers.empty()) { |
1230 | Constraints += ','; |
1231 | Constraints += MachineClobbers; |
1232 | } |
1233 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
1234 | C&: CGF.getLLVMContext(), |
1235 | NumBits: CGF.getContext().getTypeSize(T: E->getArg(Arg: 1)->getType())); |
1236 | llvm::FunctionType *FTy = |
1237 | llvm::FunctionType::get(Result: CGF.Int8Ty, Params: {CGF.UnqualPtrTy, IntType}, isVarArg: false); |
1238 | |
1239 | llvm::InlineAsm *IA = |
1240 | llvm::InlineAsm::get(Ty: FTy, AsmString: Asm, Constraints, /*hasSideEffects=*/true); |
1241 | return CGF.Builder.CreateCall(Callee: IA, Args: {BitBase, BitPos}); |
1242 | } |
1243 | |
1244 | static llvm::AtomicOrdering |
1245 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
1246 | switch (I) { |
1247 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
1248 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
1249 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
1250 | case BitTest::Release: return llvm::AtomicOrdering::Release; |
1251 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
1252 | } |
1253 | llvm_unreachable("invalid interlocking" ); |
1254 | } |
1255 | |
1256 | /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of |
1257 | /// bits and a bit position and read and optionally modify the bit at that |
1258 | /// position. The position index can be arbitrarily large, i.e. it can be larger |
1259 | /// than 31 or 63, so we need an indexed load in the general case. |
1260 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
1261 | unsigned BuiltinID, |
1262 | const CallExpr *E) { |
1263 | Value *BitBase = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
1264 | Value *BitPos = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
1265 | |
1266 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
1267 | |
1268 | // X86 has special BT, BTC, BTR, and BTS instructions that handle the array |
1269 | // indexing operation internally. Use them if possible. |
1270 | if (CGF.getTarget().getTriple().isX86()) |
1271 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
1272 | |
1273 | // Otherwise, use generic code to load one byte and test the bit. Use all but |
1274 | // the bottom three bits as the array index, and the bottom three bits to form |
1275 | // a mask. |
1276 | // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; |
1277 | Value *ByteIndex = CGF.Builder.CreateAShr( |
1278 | LHS: BitPos, RHS: llvm::ConstantInt::get(Ty: BitPos->getType(), V: 3), Name: "bittest.byteidx" ); |
1279 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(V: BitBase, DestTy: CGF.Int8PtrTy); |
1280 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: BitBaseI8, |
1281 | IdxList: ByteIndex, Name: "bittest.byteaddr" ), |
1282 | CGF.Int8Ty, CharUnits::One()); |
1283 | Value *PosLow = |
1284 | CGF.Builder.CreateAnd(LHS: CGF.Builder.CreateTrunc(V: BitPos, DestTy: CGF.Int8Ty), |
1285 | RHS: llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: 0x7)); |
1286 | |
1287 | // The updating instructions will need a mask. |
1288 | Value *Mask = nullptr; |
1289 | if (BT.Action != BitTest::TestOnly) { |
1290 | Mask = CGF.Builder.CreateShl(LHS: llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: 1), RHS: PosLow, |
1291 | Name: "bittest.mask" ); |
1292 | } |
1293 | |
1294 | // Check the action and ordering of the interlocked intrinsics. |
1295 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(I: BT.Interlocking); |
1296 | |
1297 | Value *OldByte = nullptr; |
1298 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
1299 | // Emit a combined atomicrmw load/store operation for the interlocked |
1300 | // intrinsics. |
1301 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
1302 | if (BT.Action == BitTest::Reset) { |
1303 | Mask = CGF.Builder.CreateNot(V: Mask); |
1304 | RMWOp = llvm::AtomicRMWInst::And; |
1305 | } |
1306 | OldByte = CGF.Builder.CreateAtomicRMW(Op: RMWOp, Addr: ByteAddr, Val: Mask, Ordering); |
1307 | } else { |
1308 | // Emit a plain load for the non-interlocked intrinsics. |
1309 | OldByte = CGF.Builder.CreateLoad(Addr: ByteAddr, Name: "bittest.byte" ); |
1310 | Value *NewByte = nullptr; |
1311 | switch (BT.Action) { |
1312 | case BitTest::TestOnly: |
1313 | // Don't store anything. |
1314 | break; |
1315 | case BitTest::Complement: |
1316 | NewByte = CGF.Builder.CreateXor(LHS: OldByte, RHS: Mask); |
1317 | break; |
1318 | case BitTest::Reset: |
1319 | NewByte = CGF.Builder.CreateAnd(LHS: OldByte, RHS: CGF.Builder.CreateNot(V: Mask)); |
1320 | break; |
1321 | case BitTest::Set: |
1322 | NewByte = CGF.Builder.CreateOr(LHS: OldByte, RHS: Mask); |
1323 | break; |
1324 | } |
1325 | if (NewByte) |
1326 | CGF.Builder.CreateStore(Val: NewByte, Addr: ByteAddr); |
1327 | } |
1328 | |
1329 | // However we loaded the old byte, either by plain load or atomicrmw, shift |
1330 | // the bit into the low position and mask it to 0 or 1. |
1331 | Value *ShiftedByte = CGF.Builder.CreateLShr(LHS: OldByte, RHS: PosLow, Name: "bittest.shr" ); |
1332 | return CGF.Builder.CreateAnd( |
1333 | LHS: ShiftedByte, RHS: llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: 1), Name: "bittest.res" ); |
1334 | } |
1335 | |
1336 | static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, |
1337 | unsigned BuiltinID, |
1338 | const CallExpr *E) { |
1339 | Value *Addr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
1340 | |
1341 | SmallString<64> Asm; |
1342 | raw_svector_ostream AsmOS(Asm); |
1343 | llvm::IntegerType *RetType = CGF.Int32Ty; |
1344 | |
1345 | switch (BuiltinID) { |
1346 | case clang::PPC::BI__builtin_ppc_ldarx: |
1347 | AsmOS << "ldarx " ; |
1348 | RetType = CGF.Int64Ty; |
1349 | break; |
1350 | case clang::PPC::BI__builtin_ppc_lwarx: |
1351 | AsmOS << "lwarx " ; |
1352 | RetType = CGF.Int32Ty; |
1353 | break; |
1354 | case clang::PPC::BI__builtin_ppc_lharx: |
1355 | AsmOS << "lharx " ; |
1356 | RetType = CGF.Int16Ty; |
1357 | break; |
1358 | case clang::PPC::BI__builtin_ppc_lbarx: |
1359 | AsmOS << "lbarx " ; |
1360 | RetType = CGF.Int8Ty; |
1361 | break; |
1362 | default: |
1363 | llvm_unreachable("Expected only PowerPC load reserve intrinsics" ); |
1364 | } |
1365 | |
1366 | AsmOS << "$0, ${1:y}" ; |
1367 | |
1368 | std::string Constraints = "=r,*Z,~{memory}" ; |
1369 | std::string_view MachineClobbers = CGF.getTarget().getClobbers(); |
1370 | if (!MachineClobbers.empty()) { |
1371 | Constraints += ','; |
1372 | Constraints += MachineClobbers; |
1373 | } |
1374 | |
1375 | llvm::Type *PtrType = CGF.UnqualPtrTy; |
1376 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: RetType, Params: {PtrType}, isVarArg: false); |
1377 | |
1378 | llvm::InlineAsm *IA = |
1379 | llvm::InlineAsm::get(Ty: FTy, AsmString: Asm, Constraints, /*hasSideEffects=*/true); |
1380 | llvm::CallInst *CI = CGF.Builder.CreateCall(Callee: IA, Args: {Addr}); |
1381 | CI->addParamAttr( |
1382 | ArgNo: 0, Attr: Attribute::get(Context&: CGF.getLLVMContext(), Kind: Attribute::ElementType, Ty: RetType)); |
1383 | return CI; |
1384 | } |
1385 | |
1386 | namespace { |
1387 | enum class MSVCSetJmpKind { |
1388 | _setjmpex, |
1389 | _setjmp3, |
1390 | _setjmp |
1391 | }; |
1392 | } |
1393 | |
1394 | /// MSVC handles setjmp a bit differently on different platforms. On every |
1395 | /// architecture except 32-bit x86, the frame address is passed. On x86, extra |
1396 | /// parameters can be passed as variadic arguments, but we always pass none. |
1397 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
1398 | const CallExpr *E) { |
1399 | llvm::Value *Arg1 = nullptr; |
1400 | llvm::Type *Arg1Ty = nullptr; |
1401 | StringRef Name; |
1402 | bool IsVarArg = false; |
1403 | if (SJKind == MSVCSetJmpKind::_setjmp3) { |
1404 | Name = "_setjmp3" ; |
1405 | Arg1Ty = CGF.Int32Ty; |
1406 | Arg1 = llvm::ConstantInt::get(Ty: CGF.IntTy, V: 0); |
1407 | IsVarArg = true; |
1408 | } else { |
1409 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex" ; |
1410 | Arg1Ty = CGF.Int8PtrTy; |
1411 | if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
1412 | Arg1 = CGF.Builder.CreateCall( |
1413 | Callee: CGF.CGM.getIntrinsic(IID: Intrinsic::sponentry, Tys: CGF.AllocaInt8PtrTy)); |
1414 | } else |
1415 | Arg1 = CGF.Builder.CreateCall( |
1416 | Callee: CGF.CGM.getIntrinsic(IID: Intrinsic::frameaddress, Tys: CGF.AllocaInt8PtrTy), |
1417 | Args: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: 0)); |
1418 | } |
1419 | |
1420 | // Mark the call site and declaration with ReturnsTwice. |
1421 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
1422 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
1423 | C&: CGF.getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, |
1424 | Kinds: llvm::Attribute::ReturnsTwice); |
1425 | llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
1426 | Ty: llvm::FunctionType::get(Result: CGF.IntTy, Params: ArgTypes, isVarArg: IsVarArg), Name, |
1427 | ExtraAttrs: ReturnsTwiceAttr, /*Local=*/true); |
1428 | |
1429 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
1430 | V: CGF.EmitScalarExpr(E: E->getArg(Arg: 0)), DestTy: CGF.Int8PtrTy); |
1431 | llvm::Value *Args[] = {Buf, Arg1}; |
1432 | llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(callee: SetJmpFn, args: Args); |
1433 | CB->setAttributes(ReturnsTwiceAttr); |
1434 | return RValue::get(V: CB); |
1435 | } |
1436 | |
1437 | // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, |
1438 | // we handle them here. |
1439 | enum class CodeGenFunction::MSVCIntrin { |
1440 | _BitScanForward, |
1441 | _BitScanReverse, |
1442 | _InterlockedAnd, |
1443 | _InterlockedDecrement, |
1444 | _InterlockedExchange, |
1445 | _InterlockedExchangeAdd, |
1446 | _InterlockedExchangeSub, |
1447 | _InterlockedIncrement, |
1448 | _InterlockedOr, |
1449 | _InterlockedXor, |
1450 | _InterlockedExchangeAdd_acq, |
1451 | _InterlockedExchangeAdd_rel, |
1452 | _InterlockedExchangeAdd_nf, |
1453 | _InterlockedExchange_acq, |
1454 | _InterlockedExchange_rel, |
1455 | _InterlockedExchange_nf, |
1456 | _InterlockedCompareExchange_acq, |
1457 | _InterlockedCompareExchange_rel, |
1458 | _InterlockedCompareExchange_nf, |
1459 | _InterlockedCompareExchange128, |
1460 | _InterlockedCompareExchange128_acq, |
1461 | _InterlockedCompareExchange128_rel, |
1462 | _InterlockedCompareExchange128_nf, |
1463 | _InterlockedOr_acq, |
1464 | _InterlockedOr_rel, |
1465 | _InterlockedOr_nf, |
1466 | _InterlockedXor_acq, |
1467 | _InterlockedXor_rel, |
1468 | _InterlockedXor_nf, |
1469 | _InterlockedAnd_acq, |
1470 | _InterlockedAnd_rel, |
1471 | _InterlockedAnd_nf, |
1472 | _InterlockedIncrement_acq, |
1473 | _InterlockedIncrement_rel, |
1474 | _InterlockedIncrement_nf, |
1475 | _InterlockedDecrement_acq, |
1476 | _InterlockedDecrement_rel, |
1477 | _InterlockedDecrement_nf, |
1478 | __fastfail, |
1479 | }; |
1480 | |
1481 | static std::optional<CodeGenFunction::MSVCIntrin> |
1482 | translateArmToMsvcIntrin(unsigned BuiltinID) { |
1483 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1484 | switch (BuiltinID) { |
1485 | default: |
1486 | return std::nullopt; |
1487 | case clang::ARM::BI_BitScanForward: |
1488 | case clang::ARM::BI_BitScanForward64: |
1489 | return MSVCIntrin::_BitScanForward; |
1490 | case clang::ARM::BI_BitScanReverse: |
1491 | case clang::ARM::BI_BitScanReverse64: |
1492 | return MSVCIntrin::_BitScanReverse; |
1493 | case clang::ARM::BI_InterlockedAnd64: |
1494 | return MSVCIntrin::_InterlockedAnd; |
1495 | case clang::ARM::BI_InterlockedExchange64: |
1496 | return MSVCIntrin::_InterlockedExchange; |
1497 | case clang::ARM::BI_InterlockedExchangeAdd64: |
1498 | return MSVCIntrin::_InterlockedExchangeAdd; |
1499 | case clang::ARM::BI_InterlockedExchangeSub64: |
1500 | return MSVCIntrin::_InterlockedExchangeSub; |
1501 | case clang::ARM::BI_InterlockedOr64: |
1502 | return MSVCIntrin::_InterlockedOr; |
1503 | case clang::ARM::BI_InterlockedXor64: |
1504 | return MSVCIntrin::_InterlockedXor; |
1505 | case clang::ARM::BI_InterlockedDecrement64: |
1506 | return MSVCIntrin::_InterlockedDecrement; |
1507 | case clang::ARM::BI_InterlockedIncrement64: |
1508 | return MSVCIntrin::_InterlockedIncrement; |
1509 | case clang::ARM::BI_InterlockedExchangeAdd8_acq: |
1510 | case clang::ARM::BI_InterlockedExchangeAdd16_acq: |
1511 | case clang::ARM::BI_InterlockedExchangeAdd_acq: |
1512 | case clang::ARM::BI_InterlockedExchangeAdd64_acq: |
1513 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1514 | case clang::ARM::BI_InterlockedExchangeAdd8_rel: |
1515 | case clang::ARM::BI_InterlockedExchangeAdd16_rel: |
1516 | case clang::ARM::BI_InterlockedExchangeAdd_rel: |
1517 | case clang::ARM::BI_InterlockedExchangeAdd64_rel: |
1518 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1519 | case clang::ARM::BI_InterlockedExchangeAdd8_nf: |
1520 | case clang::ARM::BI_InterlockedExchangeAdd16_nf: |
1521 | case clang::ARM::BI_InterlockedExchangeAdd_nf: |
1522 | case clang::ARM::BI_InterlockedExchangeAdd64_nf: |
1523 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1524 | case clang::ARM::BI_InterlockedExchange8_acq: |
1525 | case clang::ARM::BI_InterlockedExchange16_acq: |
1526 | case clang::ARM::BI_InterlockedExchange_acq: |
1527 | case clang::ARM::BI_InterlockedExchange64_acq: |
1528 | return MSVCIntrin::_InterlockedExchange_acq; |
1529 | case clang::ARM::BI_InterlockedExchange8_rel: |
1530 | case clang::ARM::BI_InterlockedExchange16_rel: |
1531 | case clang::ARM::BI_InterlockedExchange_rel: |
1532 | case clang::ARM::BI_InterlockedExchange64_rel: |
1533 | return MSVCIntrin::_InterlockedExchange_rel; |
1534 | case clang::ARM::BI_InterlockedExchange8_nf: |
1535 | case clang::ARM::BI_InterlockedExchange16_nf: |
1536 | case clang::ARM::BI_InterlockedExchange_nf: |
1537 | case clang::ARM::BI_InterlockedExchange64_nf: |
1538 | return MSVCIntrin::_InterlockedExchange_nf; |
1539 | case clang::ARM::BI_InterlockedCompareExchange8_acq: |
1540 | case clang::ARM::BI_InterlockedCompareExchange16_acq: |
1541 | case clang::ARM::BI_InterlockedCompareExchange_acq: |
1542 | case clang::ARM::BI_InterlockedCompareExchange64_acq: |
1543 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1544 | case clang::ARM::BI_InterlockedCompareExchange8_rel: |
1545 | case clang::ARM::BI_InterlockedCompareExchange16_rel: |
1546 | case clang::ARM::BI_InterlockedCompareExchange_rel: |
1547 | case clang::ARM::BI_InterlockedCompareExchange64_rel: |
1548 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1549 | case clang::ARM::BI_InterlockedCompareExchange8_nf: |
1550 | case clang::ARM::BI_InterlockedCompareExchange16_nf: |
1551 | case clang::ARM::BI_InterlockedCompareExchange_nf: |
1552 | case clang::ARM::BI_InterlockedCompareExchange64_nf: |
1553 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1554 | case clang::ARM::BI_InterlockedOr8_acq: |
1555 | case clang::ARM::BI_InterlockedOr16_acq: |
1556 | case clang::ARM::BI_InterlockedOr_acq: |
1557 | case clang::ARM::BI_InterlockedOr64_acq: |
1558 | return MSVCIntrin::_InterlockedOr_acq; |
1559 | case clang::ARM::BI_InterlockedOr8_rel: |
1560 | case clang::ARM::BI_InterlockedOr16_rel: |
1561 | case clang::ARM::BI_InterlockedOr_rel: |
1562 | case clang::ARM::BI_InterlockedOr64_rel: |
1563 | return MSVCIntrin::_InterlockedOr_rel; |
1564 | case clang::ARM::BI_InterlockedOr8_nf: |
1565 | case clang::ARM::BI_InterlockedOr16_nf: |
1566 | case clang::ARM::BI_InterlockedOr_nf: |
1567 | case clang::ARM::BI_InterlockedOr64_nf: |
1568 | return MSVCIntrin::_InterlockedOr_nf; |
1569 | case clang::ARM::BI_InterlockedXor8_acq: |
1570 | case clang::ARM::BI_InterlockedXor16_acq: |
1571 | case clang::ARM::BI_InterlockedXor_acq: |
1572 | case clang::ARM::BI_InterlockedXor64_acq: |
1573 | return MSVCIntrin::_InterlockedXor_acq; |
1574 | case clang::ARM::BI_InterlockedXor8_rel: |
1575 | case clang::ARM::BI_InterlockedXor16_rel: |
1576 | case clang::ARM::BI_InterlockedXor_rel: |
1577 | case clang::ARM::BI_InterlockedXor64_rel: |
1578 | return MSVCIntrin::_InterlockedXor_rel; |
1579 | case clang::ARM::BI_InterlockedXor8_nf: |
1580 | case clang::ARM::BI_InterlockedXor16_nf: |
1581 | case clang::ARM::BI_InterlockedXor_nf: |
1582 | case clang::ARM::BI_InterlockedXor64_nf: |
1583 | return MSVCIntrin::_InterlockedXor_nf; |
1584 | case clang::ARM::BI_InterlockedAnd8_acq: |
1585 | case clang::ARM::BI_InterlockedAnd16_acq: |
1586 | case clang::ARM::BI_InterlockedAnd_acq: |
1587 | case clang::ARM::BI_InterlockedAnd64_acq: |
1588 | return MSVCIntrin::_InterlockedAnd_acq; |
1589 | case clang::ARM::BI_InterlockedAnd8_rel: |
1590 | case clang::ARM::BI_InterlockedAnd16_rel: |
1591 | case clang::ARM::BI_InterlockedAnd_rel: |
1592 | case clang::ARM::BI_InterlockedAnd64_rel: |
1593 | return MSVCIntrin::_InterlockedAnd_rel; |
1594 | case clang::ARM::BI_InterlockedAnd8_nf: |
1595 | case clang::ARM::BI_InterlockedAnd16_nf: |
1596 | case clang::ARM::BI_InterlockedAnd_nf: |
1597 | case clang::ARM::BI_InterlockedAnd64_nf: |
1598 | return MSVCIntrin::_InterlockedAnd_nf; |
1599 | case clang::ARM::BI_InterlockedIncrement16_acq: |
1600 | case clang::ARM::BI_InterlockedIncrement_acq: |
1601 | case clang::ARM::BI_InterlockedIncrement64_acq: |
1602 | return MSVCIntrin::_InterlockedIncrement_acq; |
1603 | case clang::ARM::BI_InterlockedIncrement16_rel: |
1604 | case clang::ARM::BI_InterlockedIncrement_rel: |
1605 | case clang::ARM::BI_InterlockedIncrement64_rel: |
1606 | return MSVCIntrin::_InterlockedIncrement_rel; |
1607 | case clang::ARM::BI_InterlockedIncrement16_nf: |
1608 | case clang::ARM::BI_InterlockedIncrement_nf: |
1609 | case clang::ARM::BI_InterlockedIncrement64_nf: |
1610 | return MSVCIntrin::_InterlockedIncrement_nf; |
1611 | case clang::ARM::BI_InterlockedDecrement16_acq: |
1612 | case clang::ARM::BI_InterlockedDecrement_acq: |
1613 | case clang::ARM::BI_InterlockedDecrement64_acq: |
1614 | return MSVCIntrin::_InterlockedDecrement_acq; |
1615 | case clang::ARM::BI_InterlockedDecrement16_rel: |
1616 | case clang::ARM::BI_InterlockedDecrement_rel: |
1617 | case clang::ARM::BI_InterlockedDecrement64_rel: |
1618 | return MSVCIntrin::_InterlockedDecrement_rel; |
1619 | case clang::ARM::BI_InterlockedDecrement16_nf: |
1620 | case clang::ARM::BI_InterlockedDecrement_nf: |
1621 | case clang::ARM::BI_InterlockedDecrement64_nf: |
1622 | return MSVCIntrin::_InterlockedDecrement_nf; |
1623 | } |
1624 | llvm_unreachable("must return from switch" ); |
1625 | } |
1626 | |
1627 | static std::optional<CodeGenFunction::MSVCIntrin> |
1628 | translateAarch64ToMsvcIntrin(unsigned BuiltinID) { |
1629 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1630 | switch (BuiltinID) { |
1631 | default: |
1632 | return std::nullopt; |
1633 | case clang::AArch64::BI_BitScanForward: |
1634 | case clang::AArch64::BI_BitScanForward64: |
1635 | return MSVCIntrin::_BitScanForward; |
1636 | case clang::AArch64::BI_BitScanReverse: |
1637 | case clang::AArch64::BI_BitScanReverse64: |
1638 | return MSVCIntrin::_BitScanReverse; |
1639 | case clang::AArch64::BI_InterlockedAnd64: |
1640 | return MSVCIntrin::_InterlockedAnd; |
1641 | case clang::AArch64::BI_InterlockedExchange64: |
1642 | return MSVCIntrin::_InterlockedExchange; |
1643 | case clang::AArch64::BI_InterlockedExchangeAdd64: |
1644 | return MSVCIntrin::_InterlockedExchangeAdd; |
1645 | case clang::AArch64::BI_InterlockedExchangeSub64: |
1646 | return MSVCIntrin::_InterlockedExchangeSub; |
1647 | case clang::AArch64::BI_InterlockedOr64: |
1648 | return MSVCIntrin::_InterlockedOr; |
1649 | case clang::AArch64::BI_InterlockedXor64: |
1650 | return MSVCIntrin::_InterlockedXor; |
1651 | case clang::AArch64::BI_InterlockedDecrement64: |
1652 | return MSVCIntrin::_InterlockedDecrement; |
1653 | case clang::AArch64::BI_InterlockedIncrement64: |
1654 | return MSVCIntrin::_InterlockedIncrement; |
1655 | case clang::AArch64::BI_InterlockedExchangeAdd8_acq: |
1656 | case clang::AArch64::BI_InterlockedExchangeAdd16_acq: |
1657 | case clang::AArch64::BI_InterlockedExchangeAdd_acq: |
1658 | case clang::AArch64::BI_InterlockedExchangeAdd64_acq: |
1659 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1660 | case clang::AArch64::BI_InterlockedExchangeAdd8_rel: |
1661 | case clang::AArch64::BI_InterlockedExchangeAdd16_rel: |
1662 | case clang::AArch64::BI_InterlockedExchangeAdd_rel: |
1663 | case clang::AArch64::BI_InterlockedExchangeAdd64_rel: |
1664 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1665 | case clang::AArch64::BI_InterlockedExchangeAdd8_nf: |
1666 | case clang::AArch64::BI_InterlockedExchangeAdd16_nf: |
1667 | case clang::AArch64::BI_InterlockedExchangeAdd_nf: |
1668 | case clang::AArch64::BI_InterlockedExchangeAdd64_nf: |
1669 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1670 | case clang::AArch64::BI_InterlockedExchange8_acq: |
1671 | case clang::AArch64::BI_InterlockedExchange16_acq: |
1672 | case clang::AArch64::BI_InterlockedExchange_acq: |
1673 | case clang::AArch64::BI_InterlockedExchange64_acq: |
1674 | return MSVCIntrin::_InterlockedExchange_acq; |
1675 | case clang::AArch64::BI_InterlockedExchange8_rel: |
1676 | case clang::AArch64::BI_InterlockedExchange16_rel: |
1677 | case clang::AArch64::BI_InterlockedExchange_rel: |
1678 | case clang::AArch64::BI_InterlockedExchange64_rel: |
1679 | return MSVCIntrin::_InterlockedExchange_rel; |
1680 | case clang::AArch64::BI_InterlockedExchange8_nf: |
1681 | case clang::AArch64::BI_InterlockedExchange16_nf: |
1682 | case clang::AArch64::BI_InterlockedExchange_nf: |
1683 | case clang::AArch64::BI_InterlockedExchange64_nf: |
1684 | return MSVCIntrin::_InterlockedExchange_nf; |
1685 | case clang::AArch64::BI_InterlockedCompareExchange8_acq: |
1686 | case clang::AArch64::BI_InterlockedCompareExchange16_acq: |
1687 | case clang::AArch64::BI_InterlockedCompareExchange_acq: |
1688 | case clang::AArch64::BI_InterlockedCompareExchange64_acq: |
1689 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1690 | case clang::AArch64::BI_InterlockedCompareExchange8_rel: |
1691 | case clang::AArch64::BI_InterlockedCompareExchange16_rel: |
1692 | case clang::AArch64::BI_InterlockedCompareExchange_rel: |
1693 | case clang::AArch64::BI_InterlockedCompareExchange64_rel: |
1694 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1695 | case clang::AArch64::BI_InterlockedCompareExchange8_nf: |
1696 | case clang::AArch64::BI_InterlockedCompareExchange16_nf: |
1697 | case clang::AArch64::BI_InterlockedCompareExchange_nf: |
1698 | case clang::AArch64::BI_InterlockedCompareExchange64_nf: |
1699 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1700 | case clang::AArch64::BI_InterlockedCompareExchange128: |
1701 | return MSVCIntrin::_InterlockedCompareExchange128; |
1702 | case clang::AArch64::BI_InterlockedCompareExchange128_acq: |
1703 | return MSVCIntrin::_InterlockedCompareExchange128_acq; |
1704 | case clang::AArch64::BI_InterlockedCompareExchange128_nf: |
1705 | return MSVCIntrin::_InterlockedCompareExchange128_nf; |
1706 | case clang::AArch64::BI_InterlockedCompareExchange128_rel: |
1707 | return MSVCIntrin::_InterlockedCompareExchange128_rel; |
1708 | case clang::AArch64::BI_InterlockedOr8_acq: |
1709 | case clang::AArch64::BI_InterlockedOr16_acq: |
1710 | case clang::AArch64::BI_InterlockedOr_acq: |
1711 | case clang::AArch64::BI_InterlockedOr64_acq: |
1712 | return MSVCIntrin::_InterlockedOr_acq; |
1713 | case clang::AArch64::BI_InterlockedOr8_rel: |
1714 | case clang::AArch64::BI_InterlockedOr16_rel: |
1715 | case clang::AArch64::BI_InterlockedOr_rel: |
1716 | case clang::AArch64::BI_InterlockedOr64_rel: |
1717 | return MSVCIntrin::_InterlockedOr_rel; |
1718 | case clang::AArch64::BI_InterlockedOr8_nf: |
1719 | case clang::AArch64::BI_InterlockedOr16_nf: |
1720 | case clang::AArch64::BI_InterlockedOr_nf: |
1721 | case clang::AArch64::BI_InterlockedOr64_nf: |
1722 | return MSVCIntrin::_InterlockedOr_nf; |
1723 | case clang::AArch64::BI_InterlockedXor8_acq: |
1724 | case clang::AArch64::BI_InterlockedXor16_acq: |
1725 | case clang::AArch64::BI_InterlockedXor_acq: |
1726 | case clang::AArch64::BI_InterlockedXor64_acq: |
1727 | return MSVCIntrin::_InterlockedXor_acq; |
1728 | case clang::AArch64::BI_InterlockedXor8_rel: |
1729 | case clang::AArch64::BI_InterlockedXor16_rel: |
1730 | case clang::AArch64::BI_InterlockedXor_rel: |
1731 | case clang::AArch64::BI_InterlockedXor64_rel: |
1732 | return MSVCIntrin::_InterlockedXor_rel; |
1733 | case clang::AArch64::BI_InterlockedXor8_nf: |
1734 | case clang::AArch64::BI_InterlockedXor16_nf: |
1735 | case clang::AArch64::BI_InterlockedXor_nf: |
1736 | case clang::AArch64::BI_InterlockedXor64_nf: |
1737 | return MSVCIntrin::_InterlockedXor_nf; |
1738 | case clang::AArch64::BI_InterlockedAnd8_acq: |
1739 | case clang::AArch64::BI_InterlockedAnd16_acq: |
1740 | case clang::AArch64::BI_InterlockedAnd_acq: |
1741 | case clang::AArch64::BI_InterlockedAnd64_acq: |
1742 | return MSVCIntrin::_InterlockedAnd_acq; |
1743 | case clang::AArch64::BI_InterlockedAnd8_rel: |
1744 | case clang::AArch64::BI_InterlockedAnd16_rel: |
1745 | case clang::AArch64::BI_InterlockedAnd_rel: |
1746 | case clang::AArch64::BI_InterlockedAnd64_rel: |
1747 | return MSVCIntrin::_InterlockedAnd_rel; |
1748 | case clang::AArch64::BI_InterlockedAnd8_nf: |
1749 | case clang::AArch64::BI_InterlockedAnd16_nf: |
1750 | case clang::AArch64::BI_InterlockedAnd_nf: |
1751 | case clang::AArch64::BI_InterlockedAnd64_nf: |
1752 | return MSVCIntrin::_InterlockedAnd_nf; |
1753 | case clang::AArch64::BI_InterlockedIncrement16_acq: |
1754 | case clang::AArch64::BI_InterlockedIncrement_acq: |
1755 | case clang::AArch64::BI_InterlockedIncrement64_acq: |
1756 | return MSVCIntrin::_InterlockedIncrement_acq; |
1757 | case clang::AArch64::BI_InterlockedIncrement16_rel: |
1758 | case clang::AArch64::BI_InterlockedIncrement_rel: |
1759 | case clang::AArch64::BI_InterlockedIncrement64_rel: |
1760 | return MSVCIntrin::_InterlockedIncrement_rel; |
1761 | case clang::AArch64::BI_InterlockedIncrement16_nf: |
1762 | case clang::AArch64::BI_InterlockedIncrement_nf: |
1763 | case clang::AArch64::BI_InterlockedIncrement64_nf: |
1764 | return MSVCIntrin::_InterlockedIncrement_nf; |
1765 | case clang::AArch64::BI_InterlockedDecrement16_acq: |
1766 | case clang::AArch64::BI_InterlockedDecrement_acq: |
1767 | case clang::AArch64::BI_InterlockedDecrement64_acq: |
1768 | return MSVCIntrin::_InterlockedDecrement_acq; |
1769 | case clang::AArch64::BI_InterlockedDecrement16_rel: |
1770 | case clang::AArch64::BI_InterlockedDecrement_rel: |
1771 | case clang::AArch64::BI_InterlockedDecrement64_rel: |
1772 | return MSVCIntrin::_InterlockedDecrement_rel; |
1773 | case clang::AArch64::BI_InterlockedDecrement16_nf: |
1774 | case clang::AArch64::BI_InterlockedDecrement_nf: |
1775 | case clang::AArch64::BI_InterlockedDecrement64_nf: |
1776 | return MSVCIntrin::_InterlockedDecrement_nf; |
1777 | } |
1778 | llvm_unreachable("must return from switch" ); |
1779 | } |
1780 | |
1781 | static std::optional<CodeGenFunction::MSVCIntrin> |
1782 | translateX86ToMsvcIntrin(unsigned BuiltinID) { |
1783 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1784 | switch (BuiltinID) { |
1785 | default: |
1786 | return std::nullopt; |
1787 | case clang::X86::BI_BitScanForward: |
1788 | case clang::X86::BI_BitScanForward64: |
1789 | return MSVCIntrin::_BitScanForward; |
1790 | case clang::X86::BI_BitScanReverse: |
1791 | case clang::X86::BI_BitScanReverse64: |
1792 | return MSVCIntrin::_BitScanReverse; |
1793 | case clang::X86::BI_InterlockedAnd64: |
1794 | return MSVCIntrin::_InterlockedAnd; |
1795 | case clang::X86::BI_InterlockedCompareExchange128: |
1796 | return MSVCIntrin::_InterlockedCompareExchange128; |
1797 | case clang::X86::BI_InterlockedExchange64: |
1798 | return MSVCIntrin::_InterlockedExchange; |
1799 | case clang::X86::BI_InterlockedExchangeAdd64: |
1800 | return MSVCIntrin::_InterlockedExchangeAdd; |
1801 | case clang::X86::BI_InterlockedExchangeSub64: |
1802 | return MSVCIntrin::_InterlockedExchangeSub; |
1803 | case clang::X86::BI_InterlockedOr64: |
1804 | return MSVCIntrin::_InterlockedOr; |
1805 | case clang::X86::BI_InterlockedXor64: |
1806 | return MSVCIntrin::_InterlockedXor; |
1807 | case clang::X86::BI_InterlockedDecrement64: |
1808 | return MSVCIntrin::_InterlockedDecrement; |
1809 | case clang::X86::BI_InterlockedIncrement64: |
1810 | return MSVCIntrin::_InterlockedIncrement; |
1811 | } |
1812 | llvm_unreachable("must return from switch" ); |
1813 | } |
1814 | |
1815 | // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated. |
1816 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
1817 | const CallExpr *E) { |
1818 | switch (BuiltinID) { |
1819 | case MSVCIntrin::_BitScanForward: |
1820 | case MSVCIntrin::_BitScanReverse: { |
1821 | Address IndexAddress(EmitPointerWithAlignment(Addr: E->getArg(Arg: 0))); |
1822 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
1823 | |
1824 | llvm::Type *ArgType = ArgValue->getType(); |
1825 | llvm::Type *IndexType = IndexAddress.getElementType(); |
1826 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
1827 | |
1828 | Value *ArgZero = llvm::Constant::getNullValue(Ty: ArgType); |
1829 | Value *ResZero = llvm::Constant::getNullValue(Ty: ResultType); |
1830 | Value *ResOne = llvm::ConstantInt::get(Ty: ResultType, V: 1); |
1831 | |
1832 | BasicBlock *Begin = Builder.GetInsertBlock(); |
1833 | BasicBlock *End = createBasicBlock(name: "bitscan_end" , parent: this->CurFn); |
1834 | Builder.SetInsertPoint(End); |
1835 | PHINode *Result = Builder.CreatePHI(Ty: ResultType, NumReservedValues: 2, Name: "bitscan_result" ); |
1836 | |
1837 | Builder.SetInsertPoint(Begin); |
1838 | Value *IsZero = Builder.CreateICmpEQ(LHS: ArgValue, RHS: ArgZero); |
1839 | BasicBlock *NotZero = createBasicBlock(name: "bitscan_not_zero" , parent: this->CurFn); |
1840 | Builder.CreateCondBr(Cond: IsZero, True: End, False: NotZero); |
1841 | Result->addIncoming(V: ResZero, BB: Begin); |
1842 | |
1843 | Builder.SetInsertPoint(NotZero); |
1844 | |
1845 | if (BuiltinID == MSVCIntrin::_BitScanForward) { |
1846 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: ArgType); |
1847 | Value *ZeroCount = Builder.CreateCall(Callee: F, Args: {ArgValue, Builder.getTrue()}); |
1848 | ZeroCount = Builder.CreateIntCast(V: ZeroCount, DestTy: IndexType, isSigned: false); |
1849 | Builder.CreateStore(Val: ZeroCount, Addr: IndexAddress, IsVolatile: false); |
1850 | } else { |
1851 | unsigned ArgWidth = cast<llvm::IntegerType>(Val: ArgType)->getBitWidth(); |
1852 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(Ty: IndexType, V: ArgWidth - 1); |
1853 | |
1854 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ArgType); |
1855 | Value *ZeroCount = Builder.CreateCall(Callee: F, Args: {ArgValue, Builder.getTrue()}); |
1856 | ZeroCount = Builder.CreateIntCast(V: ZeroCount, DestTy: IndexType, isSigned: false); |
1857 | Value *Index = Builder.CreateNSWSub(LHS: ArgTypeLastIndex, RHS: ZeroCount); |
1858 | Builder.CreateStore(Val: Index, Addr: IndexAddress, IsVolatile: false); |
1859 | } |
1860 | Builder.CreateBr(Dest: End); |
1861 | Result->addIncoming(V: ResOne, BB: NotZero); |
1862 | |
1863 | Builder.SetInsertPoint(End); |
1864 | return Result; |
1865 | } |
1866 | case MSVCIntrin::_InterlockedAnd: |
1867 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::And, E); |
1868 | case MSVCIntrin::_InterlockedExchange: |
1869 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xchg, E); |
1870 | case MSVCIntrin::_InterlockedExchangeAdd: |
1871 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Add, E); |
1872 | case MSVCIntrin::_InterlockedExchangeSub: |
1873 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Sub, E); |
1874 | case MSVCIntrin::_InterlockedOr: |
1875 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Or, E); |
1876 | case MSVCIntrin::_InterlockedXor: |
1877 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xor, E); |
1878 | case MSVCIntrin::_InterlockedExchangeAdd_acq: |
1879 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Add, E, |
1880 | Ordering: AtomicOrdering::Acquire); |
1881 | case MSVCIntrin::_InterlockedExchangeAdd_rel: |
1882 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Add, E, |
1883 | Ordering: AtomicOrdering::Release); |
1884 | case MSVCIntrin::_InterlockedExchangeAdd_nf: |
1885 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Add, E, |
1886 | Ordering: AtomicOrdering::Monotonic); |
1887 | case MSVCIntrin::_InterlockedExchange_acq: |
1888 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xchg, E, |
1889 | Ordering: AtomicOrdering::Acquire); |
1890 | case MSVCIntrin::_InterlockedExchange_rel: |
1891 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xchg, E, |
1892 | Ordering: AtomicOrdering::Release); |
1893 | case MSVCIntrin::_InterlockedExchange_nf: |
1894 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xchg, E, |
1895 | Ordering: AtomicOrdering::Monotonic); |
1896 | case MSVCIntrin::_InterlockedCompareExchange_acq: |
1897 | return EmitAtomicCmpXchgForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Acquire); |
1898 | case MSVCIntrin::_InterlockedCompareExchange_rel: |
1899 | return EmitAtomicCmpXchgForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Release); |
1900 | case MSVCIntrin::_InterlockedCompareExchange_nf: |
1901 | return EmitAtomicCmpXchgForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Monotonic); |
1902 | case MSVCIntrin::_InterlockedCompareExchange128: |
1903 | return EmitAtomicCmpXchg128ForMSIntrin( |
1904 | CGF&: *this, E, SuccessOrdering: AtomicOrdering::SequentiallyConsistent); |
1905 | case MSVCIntrin::_InterlockedCompareExchange128_acq: |
1906 | return EmitAtomicCmpXchg128ForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Acquire); |
1907 | case MSVCIntrin::_InterlockedCompareExchange128_rel: |
1908 | return EmitAtomicCmpXchg128ForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Release); |
1909 | case MSVCIntrin::_InterlockedCompareExchange128_nf: |
1910 | return EmitAtomicCmpXchg128ForMSIntrin(CGF&: *this, E, SuccessOrdering: AtomicOrdering::Monotonic); |
1911 | case MSVCIntrin::_InterlockedOr_acq: |
1912 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Or, E, |
1913 | Ordering: AtomicOrdering::Acquire); |
1914 | case MSVCIntrin::_InterlockedOr_rel: |
1915 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Or, E, |
1916 | Ordering: AtomicOrdering::Release); |
1917 | case MSVCIntrin::_InterlockedOr_nf: |
1918 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Or, E, |
1919 | Ordering: AtomicOrdering::Monotonic); |
1920 | case MSVCIntrin::_InterlockedXor_acq: |
1921 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xor, E, |
1922 | Ordering: AtomicOrdering::Acquire); |
1923 | case MSVCIntrin::_InterlockedXor_rel: |
1924 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xor, E, |
1925 | Ordering: AtomicOrdering::Release); |
1926 | case MSVCIntrin::_InterlockedXor_nf: |
1927 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xor, E, |
1928 | Ordering: AtomicOrdering::Monotonic); |
1929 | case MSVCIntrin::_InterlockedAnd_acq: |
1930 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::And, E, |
1931 | Ordering: AtomicOrdering::Acquire); |
1932 | case MSVCIntrin::_InterlockedAnd_rel: |
1933 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::And, E, |
1934 | Ordering: AtomicOrdering::Release); |
1935 | case MSVCIntrin::_InterlockedAnd_nf: |
1936 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::And, E, |
1937 | Ordering: AtomicOrdering::Monotonic); |
1938 | case MSVCIntrin::_InterlockedIncrement_acq: |
1939 | return EmitAtomicIncrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Acquire); |
1940 | case MSVCIntrin::_InterlockedIncrement_rel: |
1941 | return EmitAtomicIncrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Release); |
1942 | case MSVCIntrin::_InterlockedIncrement_nf: |
1943 | return EmitAtomicIncrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Monotonic); |
1944 | case MSVCIntrin::_InterlockedDecrement_acq: |
1945 | return EmitAtomicDecrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Acquire); |
1946 | case MSVCIntrin::_InterlockedDecrement_rel: |
1947 | return EmitAtomicDecrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Release); |
1948 | case MSVCIntrin::_InterlockedDecrement_nf: |
1949 | return EmitAtomicDecrementValue(CGF&: *this, E, Ordering: AtomicOrdering::Monotonic); |
1950 | |
1951 | case MSVCIntrin::_InterlockedDecrement: |
1952 | return EmitAtomicDecrementValue(CGF&: *this, E); |
1953 | case MSVCIntrin::_InterlockedIncrement: |
1954 | return EmitAtomicIncrementValue(CGF&: *this, E); |
1955 | |
1956 | case MSVCIntrin::__fastfail: { |
1957 | // Request immediate process termination from the kernel. The instruction |
1958 | // sequences to do this are documented on MSDN: |
1959 | // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
1960 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
1961 | StringRef Asm, Constraints; |
1962 | switch (ISA) { |
1963 | default: |
1964 | ErrorUnsupported(S: E, Type: "__fastfail call for this architecture" ); |
1965 | break; |
1966 | case llvm::Triple::x86: |
1967 | case llvm::Triple::x86_64: |
1968 | Asm = "int $$0x29" ; |
1969 | Constraints = "{cx}" ; |
1970 | break; |
1971 | case llvm::Triple::thumb: |
1972 | Asm = "udf #251" ; |
1973 | Constraints = "{r0}" ; |
1974 | break; |
1975 | case llvm::Triple::aarch64: |
1976 | Asm = "brk #0xF003" ; |
1977 | Constraints = "{w0}" ; |
1978 | } |
1979 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, Params: {Int32Ty}, isVarArg: false); |
1980 | llvm::InlineAsm *IA = |
1981 | llvm::InlineAsm::get(Ty: FTy, AsmString: Asm, Constraints, /*hasSideEffects=*/true); |
1982 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
1983 | C&: getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, |
1984 | Kinds: llvm::Attribute::NoReturn); |
1985 | llvm::CallInst *CI = Builder.CreateCall(Callee: IA, Args: EmitScalarExpr(E: E->getArg(Arg: 0))); |
1986 | CI->setAttributes(NoReturnAttr); |
1987 | return CI; |
1988 | } |
1989 | } |
1990 | llvm_unreachable("Incorrect MSVC intrinsic!" ); |
1991 | } |
1992 | |
1993 | namespace { |
1994 | // ARC cleanup for __builtin_os_log_format |
1995 | struct CallObjCArcUse final : EHScopeStack::Cleanup { |
1996 | CallObjCArcUse(llvm::Value *object) : object(object) {} |
1997 | llvm::Value *object; |
1998 | |
1999 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2000 | CGF.EmitARCIntrinsicUse(values: object); |
2001 | } |
2002 | }; |
2003 | } |
2004 | |
2005 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
2006 | BuiltinCheckKind Kind) { |
2007 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) |
2008 | && "Unsupported builtin check kind" ); |
2009 | |
2010 | Value *ArgValue = EmitScalarExpr(E); |
2011 | if (!SanOpts.has(K: SanitizerKind::Builtin)) |
2012 | return ArgValue; |
2013 | |
2014 | SanitizerScope SanScope(this); |
2015 | Value *Cond = Builder.CreateICmpNE( |
2016 | LHS: ArgValue, RHS: llvm::Constant::getNullValue(Ty: ArgValue->getType())); |
2017 | EmitCheck(Checked: std::make_pair(x&: Cond, y: SanitizerKind::Builtin), |
2018 | Check: SanitizerHandler::InvalidBuiltin, |
2019 | StaticArgs: {EmitCheckSourceLocation(Loc: E->getExprLoc()), |
2020 | llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Kind)}, |
2021 | DynamicArgs: std::nullopt); |
2022 | return ArgValue; |
2023 | } |
2024 | |
2025 | static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) { |
2026 | return CGF.Builder.CreateBinaryIntrinsic( |
2027 | ID: Intrinsic::abs, LHS: ArgValue, |
2028 | RHS: ConstantInt::get(Ty: CGF.Builder.getInt1Ty(), V: HasNSW)); |
2029 | } |
2030 | |
2031 | static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, |
2032 | bool SanitizeOverflow) { |
2033 | Value *ArgValue = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
2034 | |
2035 | // Try to eliminate overflow check. |
2036 | if (const auto *VCI = dyn_cast<llvm::ConstantInt>(Val: ArgValue)) { |
2037 | if (!VCI->isMinSignedValue()) |
2038 | return EmitAbs(CGF, ArgValue, HasNSW: true); |
2039 | } |
2040 | |
2041 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
2042 | |
2043 | Constant *Zero = Constant::getNullValue(Ty: ArgValue->getType()); |
2044 | Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic( |
2045 | ID: Intrinsic::ssub_with_overflow, LHS: Zero, RHS: ArgValue); |
2046 | Value *Result = CGF.Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0); |
2047 | Value *NotOverflow = CGF.Builder.CreateNot( |
2048 | V: CGF.Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1)); |
2049 | |
2050 | // TODO: support -ftrapv-handler. |
2051 | if (SanitizeOverflow) { |
2052 | CGF.EmitCheck(Checked: {{NotOverflow, SanitizerKind::SignedIntegerOverflow}}, |
2053 | Check: SanitizerHandler::NegateOverflow, |
2054 | StaticArgs: {CGF.EmitCheckSourceLocation(Loc: E->getArg(Arg: 0)->getExprLoc()), |
2055 | CGF.EmitCheckTypeDescriptor(T: E->getType())}, |
2056 | DynamicArgs: {ArgValue}); |
2057 | } else |
2058 | CGF.EmitTrapCheck(Checked: NotOverflow, CheckHandlerID: SanitizerHandler::SubOverflow); |
2059 | |
2060 | Value *CmpResult = CGF.Builder.CreateICmpSLT(LHS: ArgValue, RHS: Zero, Name: "abscond" ); |
2061 | return CGF.Builder.CreateSelect(C: CmpResult, True: Result, False: ArgValue, Name: "abs" ); |
2062 | } |
2063 | |
2064 | /// Get the argument type for arguments to os_log_helper. |
2065 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
2066 | QualType UnsignedTy = C.getIntTypeForBitwidth(DestWidth: Size * 8, /*Signed=*/false); |
2067 | return C.getCanonicalType(T: UnsignedTy); |
2068 | } |
2069 | |
2070 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
2071 | const analyze_os_log::OSLogBufferLayout &Layout, |
2072 | CharUnits BufferAlignment) { |
2073 | ASTContext &Ctx = getContext(); |
2074 | |
2075 | llvm::SmallString<64> Name; |
2076 | { |
2077 | raw_svector_ostream OS(Name); |
2078 | OS << "__os_log_helper" ; |
2079 | OS << "_" << BufferAlignment.getQuantity(); |
2080 | OS << "_" << int(Layout.getSummaryByte()); |
2081 | OS << "_" << int(Layout.getNumArgsByte()); |
2082 | for (const auto &Item : Layout.Items) |
2083 | OS << "_" << int(Item.getSizeByte()) << "_" |
2084 | << int(Item.getDescriptorByte()); |
2085 | } |
2086 | |
2087 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
2088 | return F; |
2089 | |
2090 | llvm::SmallVector<QualType, 4> ArgTys; |
2091 | FunctionArgList Args; |
2092 | Args.push_back(Elt: ImplicitParamDecl::Create( |
2093 | C&: Ctx, DC: nullptr, IdLoc: SourceLocation(), Id: &Ctx.Idents.get(Name: "buffer" ), T: Ctx.VoidPtrTy, |
2094 | ParamKind: ImplicitParamKind::Other)); |
2095 | ArgTys.emplace_back(Args&: Ctx.VoidPtrTy); |
2096 | |
2097 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
2098 | char Size = Layout.Items[I].getSizeByte(); |
2099 | if (!Size) |
2100 | continue; |
2101 | |
2102 | QualType ArgTy = getOSLogArgType(C&: Ctx, Size); |
2103 | Args.push_back(Elt: ImplicitParamDecl::Create( |
2104 | C&: Ctx, DC: nullptr, IdLoc: SourceLocation(), |
2105 | Id: &Ctx.Idents.get(Name: std::string("arg" ) + llvm::to_string(Value: I)), T: ArgTy, |
2106 | ParamKind: ImplicitParamKind::Other)); |
2107 | ArgTys.emplace_back(Args&: ArgTy); |
2108 | } |
2109 | |
2110 | QualType ReturnTy = Ctx.VoidTy; |
2111 | |
2112 | // The helper function has linkonce_odr linkage to enable the linker to merge |
2113 | // identical functions. To ensure the merging always happens, 'noinline' is |
2114 | // attached to the function when compiling with -Oz. |
2115 | const CGFunctionInfo &FI = |
2116 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args: Args); |
2117 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(Info: FI); |
2118 | llvm::Function *Fn = llvm::Function::Create( |
2119 | Ty: FuncTy, Linkage: llvm::GlobalValue::LinkOnceODRLinkage, N: Name, M: &CGM.getModule()); |
2120 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
2121 | CGM.SetLLVMFunctionAttributes(GD: GlobalDecl(), Info: FI, F: Fn, /*IsThunk=*/false); |
2122 | CGM.SetLLVMFunctionAttributesForDefinition(D: nullptr, F: Fn); |
2123 | Fn->setDoesNotThrow(); |
2124 | |
2125 | // Attach 'noinline' at -Oz. |
2126 | if (CGM.getCodeGenOpts().OptimizeSize == 2) |
2127 | Fn->addFnAttr(Kind: llvm::Attribute::NoInline); |
2128 | |
2129 | auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this); |
2130 | StartFunction(GD: GlobalDecl(), RetTy: ReturnTy, Fn, FnInfo: FI, Args); |
2131 | |
2132 | // Create a scope with an artificial location for the body of this function. |
2133 | auto AL = ApplyDebugLocation::CreateArtificial(CGF&: *this); |
2134 | |
2135 | CharUnits Offset; |
2136 | Address BufAddr = makeNaturalAddressForPointer( |
2137 | Ptr: Builder.CreateLoad(Addr: GetAddrOfLocalVar(VD: Args[0]), Name: "buf" ), T: Ctx.VoidTy, |
2138 | Alignment: BufferAlignment); |
2139 | Builder.CreateStore(Val: Builder.getInt8(C: Layout.getSummaryByte()), |
2140 | Addr: Builder.CreateConstByteGEP(Addr: BufAddr, Offset: Offset++, Name: "summary" )); |
2141 | Builder.CreateStore(Val: Builder.getInt8(C: Layout.getNumArgsByte()), |
2142 | Addr: Builder.CreateConstByteGEP(Addr: BufAddr, Offset: Offset++, Name: "numArgs" )); |
2143 | |
2144 | unsigned I = 1; |
2145 | for (const auto &Item : Layout.Items) { |
2146 | Builder.CreateStore( |
2147 | Val: Builder.getInt8(C: Item.getDescriptorByte()), |
2148 | Addr: Builder.CreateConstByteGEP(Addr: BufAddr, Offset: Offset++, Name: "argDescriptor" )); |
2149 | Builder.CreateStore( |
2150 | Val: Builder.getInt8(C: Item.getSizeByte()), |
2151 | Addr: Builder.CreateConstByteGEP(Addr: BufAddr, Offset: Offset++, Name: "argSize" )); |
2152 | |
2153 | CharUnits Size = Item.size(); |
2154 | if (!Size.getQuantity()) |
2155 | continue; |
2156 | |
2157 | Address Arg = GetAddrOfLocalVar(VD: Args[I]); |
2158 | Address Addr = Builder.CreateConstByteGEP(Addr: BufAddr, Offset, Name: "argData" ); |
2159 | Addr = Addr.withElementType(ElemTy: Arg.getElementType()); |
2160 | Builder.CreateStore(Val: Builder.CreateLoad(Addr: Arg), Addr); |
2161 | Offset += Size; |
2162 | ++I; |
2163 | } |
2164 | |
2165 | FinishFunction(); |
2166 | |
2167 | return Fn; |
2168 | } |
2169 | |
2170 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
2171 | assert(E.getNumArgs() >= 2 && |
2172 | "__builtin_os_log_format takes at least 2 arguments" ); |
2173 | ASTContext &Ctx = getContext(); |
2174 | analyze_os_log::OSLogBufferLayout Layout; |
2175 | analyze_os_log::computeOSLogBufferLayout(Ctx, E: &E, layout&: Layout); |
2176 | Address BufAddr = EmitPointerWithAlignment(Addr: E.getArg(Arg: 0)); |
2177 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
2178 | |
2179 | // Ignore argument 1, the format string. It is not currently used. |
2180 | CallArgList Args; |
2181 | Args.add(rvalue: RValue::get(V: BufAddr.emitRawPointer(CGF&: *this)), type: Ctx.VoidPtrTy); |
2182 | |
2183 | for (const auto &Item : Layout.Items) { |
2184 | int Size = Item.getSizeByte(); |
2185 | if (!Size) |
2186 | continue; |
2187 | |
2188 | llvm::Value *ArgVal; |
2189 | |
2190 | if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
2191 | uint64_t Val = 0; |
2192 | for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
2193 | Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
2194 | ArgVal = llvm::Constant::getIntegerValue(Ty: Int64Ty, V: llvm::APInt(64, Val)); |
2195 | } else if (const Expr *TheExpr = Item.getExpr()) { |
2196 | ArgVal = EmitScalarExpr(E: TheExpr, /*Ignore*/ IgnoreResultAssign: false); |
2197 | |
2198 | // If a temporary object that requires destruction after the full |
2199 | // expression is passed, push a lifetime-extended cleanup to extend its |
2200 | // lifetime to the end of the enclosing block scope. |
2201 | auto LifetimeExtendObject = [&](const Expr *E) { |
2202 | E = E->IgnoreParenCasts(); |
2203 | // Extend lifetimes of objects returned by function calls and message |
2204 | // sends. |
2205 | |
2206 | // FIXME: We should do this in other cases in which temporaries are |
2207 | // created including arguments of non-ARC types (e.g., C++ |
2208 | // temporaries). |
2209 | if (isa<CallExpr>(Val: E) || isa<ObjCMessageExpr>(Val: E)) |
2210 | return true; |
2211 | return false; |
2212 | }; |
2213 | |
2214 | if (TheExpr->getType()->isObjCRetainableType() && |
2215 | getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) { |
2216 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar && |
2217 | "Only scalar can be a ObjC retainable type" ); |
2218 | if (!isa<Constant>(Val: ArgVal)) { |
2219 | CleanupKind Cleanup = getARCCleanupKind(); |
2220 | QualType Ty = TheExpr->getType(); |
2221 | RawAddress Alloca = RawAddress::invalid(); |
2222 | RawAddress Addr = CreateMemTemp(T: Ty, Name: "os.log.arg" , Alloca: &Alloca); |
2223 | ArgVal = EmitARCRetain(type: Ty, value: ArgVal); |
2224 | Builder.CreateStore(Val: ArgVal, Addr); |
2225 | pushLifetimeExtendedDestroy(kind: Cleanup, addr: Alloca, type: Ty, |
2226 | destroyer: CodeGenFunction::destroyARCStrongPrecise, |
2227 | useEHCleanupForArray: Cleanup & EHCleanup); |
2228 | |
2229 | // Push a clang.arc.use call to ensure ARC optimizer knows that the |
2230 | // argument has to be alive. |
2231 | if (CGM.getCodeGenOpts().OptimizationLevel != 0) |
2232 | pushCleanupAfterFullExpr<CallObjCArcUse>(Kind: Cleanup, A: ArgVal); |
2233 | } |
2234 | } |
2235 | } else { |
2236 | ArgVal = Builder.getInt32(C: Item.getConstValue().getQuantity()); |
2237 | } |
2238 | |
2239 | unsigned ArgValSize = |
2240 | CGM.getDataLayout().getTypeSizeInBits(Ty: ArgVal->getType()); |
2241 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(C&: getLLVMContext(), |
2242 | N: ArgValSize); |
2243 | ArgVal = Builder.CreateBitOrPointerCast(V: ArgVal, DestTy: IntTy); |
2244 | CanQualType ArgTy = getOSLogArgType(C&: Ctx, Size); |
2245 | // If ArgVal has type x86_fp80, zero-extend ArgVal. |
2246 | ArgVal = Builder.CreateZExtOrBitCast(V: ArgVal, DestTy: ConvertType(T: ArgTy)); |
2247 | Args.add(rvalue: RValue::get(V: ArgVal), type: ArgTy); |
2248 | } |
2249 | |
2250 | const CGFunctionInfo &FI = |
2251 | CGM.getTypes().arrangeBuiltinFunctionCall(resultType: Ctx.VoidTy, args: Args); |
2252 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
2253 | Layout, BufferAlignment: BufAddr.getAlignment()); |
2254 | EmitCall(CallInfo: FI, Callee: CGCallee::forDirect(functionPtr: F), ReturnValue: ReturnValueSlot(), Args); |
2255 | return RValue::get(Addr: BufAddr, CGF&: *this); |
2256 | } |
2257 | |
2258 | static bool isSpecialUnsignedMultiplySignedResult( |
2259 | unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, |
2260 | WidthAndSignedness ResultInfo) { |
2261 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
2262 | Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width && |
2263 | !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed; |
2264 | } |
2265 | |
2266 | static RValue EmitCheckedUnsignedMultiplySignedResult( |
2267 | CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, |
2268 | const clang::Expr *Op2, WidthAndSignedness Op2Info, |
2269 | const clang::Expr *ResultArg, QualType ResultQTy, |
2270 | WidthAndSignedness ResultInfo) { |
2271 | assert(isSpecialUnsignedMultiplySignedResult( |
2272 | Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && |
2273 | "Cannot specialize this multiply" ); |
2274 | |
2275 | llvm::Value *V1 = CGF.EmitScalarExpr(E: Op1); |
2276 | llvm::Value *V2 = CGF.EmitScalarExpr(E: Op2); |
2277 | |
2278 | llvm::Value *HasOverflow; |
2279 | llvm::Value *Result = EmitOverflowIntrinsic( |
2280 | CGF, IntrinsicID: llvm::Intrinsic::umul_with_overflow, X: V1, Y: V2, Carry&: HasOverflow); |
2281 | |
2282 | // The intrinsic call will detect overflow when the value is > UINT_MAX, |
2283 | // however, since the original builtin had a signed result, we need to report |
2284 | // an overflow when the result is greater than INT_MAX. |
2285 | auto IntMax = llvm::APInt::getSignedMaxValue(numBits: ResultInfo.Width); |
2286 | llvm::Value *IntMaxValue = llvm::ConstantInt::get(Ty: Result->getType(), V: IntMax); |
2287 | |
2288 | llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(LHS: Result, RHS: IntMaxValue); |
2289 | HasOverflow = CGF.Builder.CreateOr(LHS: HasOverflow, RHS: IntMaxOverflow); |
2290 | |
2291 | bool isVolatile = |
2292 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
2293 | Address ResultPtr = CGF.EmitPointerWithAlignment(Addr: ResultArg); |
2294 | CGF.Builder.CreateStore(Val: CGF.EmitToMemory(Value: Result, Ty: ResultQTy), Addr: ResultPtr, |
2295 | IsVolatile: isVolatile); |
2296 | return RValue::get(V: HasOverflow); |
2297 | } |
2298 | |
2299 | /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
2300 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
2301 | WidthAndSignedness Op1Info, |
2302 | WidthAndSignedness Op2Info, |
2303 | WidthAndSignedness ResultInfo) { |
2304 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
2305 | std::max(a: Op1Info.Width, b: Op2Info.Width) >= ResultInfo.Width && |
2306 | Op1Info.Signed != Op2Info.Signed; |
2307 | } |
2308 | |
2309 | /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
2310 | /// the generic checked-binop irgen. |
2311 | static RValue |
2312 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
2313 | WidthAndSignedness Op1Info, const clang::Expr *Op2, |
2314 | WidthAndSignedness Op2Info, |
2315 | const clang::Expr *ResultArg, QualType ResultQTy, |
2316 | WidthAndSignedness ResultInfo) { |
2317 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, |
2318 | Op2Info, ResultInfo) && |
2319 | "Not a mixed-sign multipliction we can specialize" ); |
2320 | |
2321 | // Emit the signed and unsigned operands. |
2322 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
2323 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
2324 | llvm::Value *Signed = CGF.EmitScalarExpr(E: SignedOp); |
2325 | llvm::Value *Unsigned = CGF.EmitScalarExpr(E: UnsignedOp); |
2326 | unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
2327 | unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
2328 | |
2329 | // One of the operands may be smaller than the other. If so, [s|z]ext it. |
2330 | if (SignedOpWidth < UnsignedOpWidth) |
2331 | Signed = CGF.Builder.CreateSExt(V: Signed, DestTy: Unsigned->getType(), Name: "op.sext" ); |
2332 | if (UnsignedOpWidth < SignedOpWidth) |
2333 | Unsigned = CGF.Builder.CreateZExt(V: Unsigned, DestTy: Signed->getType(), Name: "op.zext" ); |
2334 | |
2335 | llvm::Type *OpTy = Signed->getType(); |
2336 | llvm::Value *Zero = llvm::Constant::getNullValue(Ty: OpTy); |
2337 | Address ResultPtr = CGF.EmitPointerWithAlignment(Addr: ResultArg); |
2338 | llvm::Type *ResTy = ResultPtr.getElementType(); |
2339 | unsigned OpWidth = std::max(a: Op1Info.Width, b: Op2Info.Width); |
2340 | |
2341 | // Take the absolute value of the signed operand. |
2342 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(LHS: Signed, RHS: Zero); |
2343 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(LHS: Zero, RHS: Signed); |
2344 | llvm::Value *AbsSigned = |
2345 | CGF.Builder.CreateSelect(C: IsNegative, True: AbsOfNegative, False: Signed); |
2346 | |
2347 | // Perform a checked unsigned multiplication. |
2348 | llvm::Value *UnsignedOverflow; |
2349 | llvm::Value *UnsignedResult = |
2350 | EmitOverflowIntrinsic(CGF, IntrinsicID: llvm::Intrinsic::umul_with_overflow, X: AbsSigned, |
2351 | Y: Unsigned, Carry&: UnsignedOverflow); |
2352 | |
2353 | llvm::Value *Overflow, *Result; |
2354 | if (ResultInfo.Signed) { |
2355 | // Signed overflow occurs if the result is greater than INT_MAX or lesser |
2356 | // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
2357 | auto IntMax = |
2358 | llvm::APInt::getSignedMaxValue(numBits: ResultInfo.Width).zext(width: OpWidth); |
2359 | llvm::Value *MaxResult = |
2360 | CGF.Builder.CreateAdd(LHS: llvm::ConstantInt::get(Ty: OpTy, V: IntMax), |
2361 | RHS: CGF.Builder.CreateZExt(V: IsNegative, DestTy: OpTy)); |
2362 | llvm::Value *SignedOverflow = |
2363 | CGF.Builder.CreateICmpUGT(LHS: UnsignedResult, RHS: MaxResult); |
2364 | Overflow = CGF.Builder.CreateOr(LHS: UnsignedOverflow, RHS: SignedOverflow); |
2365 | |
2366 | // Prepare the signed result (possibly by negating it). |
2367 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(V: UnsignedResult); |
2368 | llvm::Value *SignedResult = |
2369 | CGF.Builder.CreateSelect(C: IsNegative, True: NegativeResult, False: UnsignedResult); |
2370 | Result = CGF.Builder.CreateTrunc(V: SignedResult, DestTy: ResTy); |
2371 | } else { |
2372 | // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
2373 | llvm::Value *Underflow = CGF.Builder.CreateAnd( |
2374 | LHS: IsNegative, RHS: CGF.Builder.CreateIsNotNull(Arg: UnsignedResult)); |
2375 | Overflow = CGF.Builder.CreateOr(LHS: UnsignedOverflow, RHS: Underflow); |
2376 | if (ResultInfo.Width < OpWidth) { |
2377 | auto IntMax = |
2378 | llvm::APInt::getMaxValue(numBits: ResultInfo.Width).zext(width: OpWidth); |
2379 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
2380 | LHS: UnsignedResult, RHS: llvm::ConstantInt::get(Ty: OpTy, V: IntMax)); |
2381 | Overflow = CGF.Builder.CreateOr(LHS: Overflow, RHS: TruncOverflow); |
2382 | } |
2383 | |
2384 | // Negate the product if it would be negative in infinite precision. |
2385 | Result = CGF.Builder.CreateSelect( |
2386 | C: IsNegative, True: CGF.Builder.CreateNeg(V: UnsignedResult), False: UnsignedResult); |
2387 | |
2388 | Result = CGF.Builder.CreateTrunc(V: Result, DestTy: ResTy); |
2389 | } |
2390 | assert(Overflow && Result && "Missing overflow or result" ); |
2391 | |
2392 | bool isVolatile = |
2393 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
2394 | CGF.Builder.CreateStore(Val: CGF.EmitToMemory(Value: Result, Ty: ResultQTy), Addr: ResultPtr, |
2395 | IsVolatile: isVolatile); |
2396 | return RValue::get(V: Overflow); |
2397 | } |
2398 | |
2399 | static bool |
2400 | TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
2401 | llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
2402 | if (const auto *Arr = Ctx.getAsArrayType(T: Ty)) |
2403 | Ty = Ctx.getBaseElementType(VAT: Arr); |
2404 | |
2405 | const auto *Record = Ty->getAsCXXRecordDecl(); |
2406 | if (!Record) |
2407 | return false; |
2408 | |
2409 | // We've already checked this type, or are in the process of checking it. |
2410 | if (!Seen.insert(Ptr: Record).second) |
2411 | return false; |
2412 | |
2413 | assert(Record->hasDefinition() && |
2414 | "Incomplete types should already be diagnosed" ); |
2415 | |
2416 | if (Record->isDynamicClass()) |
2417 | return true; |
2418 | |
2419 | for (FieldDecl *F : Record->fields()) { |
2420 | if (TypeRequiresBuiltinLaunderImp(Ctx, Ty: F->getType(), Seen)) |
2421 | return true; |
2422 | } |
2423 | return false; |
2424 | } |
2425 | |
2426 | /// Determine if the specified type requires laundering by checking if it is a |
2427 | /// dynamic class type or contains a subobject which is a dynamic class type. |
2428 | static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
2429 | if (!CGM.getCodeGenOpts().StrictVTablePointers) |
2430 | return false; |
2431 | llvm::SmallPtrSet<const Decl *, 16> Seen; |
2432 | return TypeRequiresBuiltinLaunderImp(Ctx: CGM.getContext(), Ty, Seen); |
2433 | } |
2434 | |
2435 | RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
2436 | llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
2437 | llvm::Value *ShiftAmt = EmitScalarExpr(E: E->getArg(Arg: 1)); |
2438 | |
2439 | // The builtin's shift arg may have a different type than the source arg and |
2440 | // result, but the LLVM intrinsic uses the same type for all values. |
2441 | llvm::Type *Ty = Src->getType(); |
2442 | ShiftAmt = Builder.CreateIntCast(V: ShiftAmt, DestTy: Ty, isSigned: false); |
2443 | |
2444 | // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. |
2445 | unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
2446 | Function *F = CGM.getIntrinsic(IID, Tys: Ty); |
2447 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: { Src, Src, ShiftAmt })); |
2448 | } |
2449 | |
2450 | // Map math builtins for long-double to f128 version. |
2451 | static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { |
2452 | switch (BuiltinID) { |
2453 | #define MUTATE_LDBL(func) \ |
2454 | case Builtin::BI__builtin_##func##l: \ |
2455 | return Builtin::BI__builtin_##func##f128; |
2456 | MUTATE_LDBL(sqrt) |
2457 | MUTATE_LDBL(cbrt) |
2458 | MUTATE_LDBL(fabs) |
2459 | MUTATE_LDBL(log) |
2460 | MUTATE_LDBL(log2) |
2461 | MUTATE_LDBL(log10) |
2462 | MUTATE_LDBL(log1p) |
2463 | MUTATE_LDBL(logb) |
2464 | MUTATE_LDBL(exp) |
2465 | MUTATE_LDBL(exp2) |
2466 | MUTATE_LDBL(expm1) |
2467 | MUTATE_LDBL(fdim) |
2468 | MUTATE_LDBL(hypot) |
2469 | MUTATE_LDBL(ilogb) |
2470 | MUTATE_LDBL(pow) |
2471 | MUTATE_LDBL(fmin) |
2472 | MUTATE_LDBL(fmax) |
2473 | MUTATE_LDBL(ceil) |
2474 | MUTATE_LDBL(trunc) |
2475 | MUTATE_LDBL(rint) |
2476 | MUTATE_LDBL(nearbyint) |
2477 | MUTATE_LDBL(round) |
2478 | MUTATE_LDBL(floor) |
2479 | MUTATE_LDBL(lround) |
2480 | MUTATE_LDBL(llround) |
2481 | MUTATE_LDBL(lrint) |
2482 | MUTATE_LDBL(llrint) |
2483 | MUTATE_LDBL(fmod) |
2484 | MUTATE_LDBL(modf) |
2485 | MUTATE_LDBL(nan) |
2486 | MUTATE_LDBL(nans) |
2487 | MUTATE_LDBL(inf) |
2488 | MUTATE_LDBL(fma) |
2489 | MUTATE_LDBL(sin) |
2490 | MUTATE_LDBL(cos) |
2491 | MUTATE_LDBL(tan) |
2492 | MUTATE_LDBL(sinh) |
2493 | MUTATE_LDBL(cosh) |
2494 | MUTATE_LDBL(tanh) |
2495 | MUTATE_LDBL(asin) |
2496 | MUTATE_LDBL(acos) |
2497 | MUTATE_LDBL(atan) |
2498 | MUTATE_LDBL(asinh) |
2499 | MUTATE_LDBL(acosh) |
2500 | MUTATE_LDBL(atanh) |
2501 | MUTATE_LDBL(atan2) |
2502 | MUTATE_LDBL(erf) |
2503 | MUTATE_LDBL(erfc) |
2504 | MUTATE_LDBL(ldexp) |
2505 | MUTATE_LDBL(frexp) |
2506 | MUTATE_LDBL(huge_val) |
2507 | MUTATE_LDBL(copysign) |
2508 | MUTATE_LDBL(nextafter) |
2509 | MUTATE_LDBL(nexttoward) |
2510 | MUTATE_LDBL(remainder) |
2511 | MUTATE_LDBL(remquo) |
2512 | MUTATE_LDBL(scalbln) |
2513 | MUTATE_LDBL(scalbn) |
2514 | MUTATE_LDBL(tgamma) |
2515 | MUTATE_LDBL(lgamma) |
2516 | #undef MUTATE_LDBL |
2517 | default: |
2518 | return BuiltinID; |
2519 | } |
2520 | } |
2521 | |
2522 | static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, |
2523 | Value *V) { |
2524 | if (CGF.Builder.getIsFPConstrained() && |
2525 | CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) { |
2526 | if (Value *Result = |
2527 | CGF.getTargetHooks().testFPKind(V, BuiltinID, Builder&: CGF.Builder, CGM&: CGF.CGM)) |
2528 | return Result; |
2529 | } |
2530 | return nullptr; |
2531 | } |
2532 | |
2533 | static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, |
2534 | const FunctionDecl *FD) { |
2535 | auto Name = FD->getNameAsString() + "__hipstdpar_unsupported" ; |
2536 | auto FnTy = CGF->CGM.getTypes().GetFunctionType(GD: FD); |
2537 | auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, T: FnTy); |
2538 | |
2539 | SmallVector<Value *, 16> Args; |
2540 | for (auto &&FormalTy : FnTy->params()) |
2541 | Args.push_back(Elt: llvm::PoisonValue::get(T: FormalTy)); |
2542 | |
2543 | return RValue::get(V: CGF->Builder.CreateCall(Callee: UBF, Args)); |
2544 | } |
2545 | |
2546 | RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
2547 | const CallExpr *E, |
2548 | ReturnValueSlot ReturnValue) { |
2549 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
2550 | // See if we can constant fold this builtin. If so, don't emit it at all. |
2551 | // TODO: Extend this handling to all builtin calls that we can constant-fold. |
2552 | Expr::EvalResult Result; |
2553 | if (E->isPRValue() && E->EvaluateAsRValue(Result, Ctx: CGM.getContext()) && |
2554 | !Result.hasSideEffects()) { |
2555 | if (Result.Val.isInt()) |
2556 | return RValue::get(V: llvm::ConstantInt::get(Context&: getLLVMContext(), |
2557 | V: Result.Val.getInt())); |
2558 | if (Result.Val.isFloat()) |
2559 | return RValue::get(V: llvm::ConstantFP::get(Context&: getLLVMContext(), |
2560 | V: Result.Val.getFloat())); |
2561 | } |
2562 | |
2563 | // If current long-double semantics is IEEE 128-bit, replace math builtins |
2564 | // of long-double with f128 equivalent. |
2565 | // TODO: This mutation should also be applied to other targets other than PPC, |
2566 | // after backend supports IEEE 128-bit style libcalls. |
2567 | if (getTarget().getTriple().isPPC64() && |
2568 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) |
2569 | BuiltinID = mutateLongDoubleBuiltin(BuiltinID); |
2570 | |
2571 | // If the builtin has been declared explicitly with an assembler label, |
2572 | // disable the specialized emitting below. Ideally we should communicate the |
2573 | // rename in IR, or at least avoid generating the intrinsic calls that are |
2574 | // likely to get lowered to the renamed library functions. |
2575 | const unsigned BuiltinIDIfNoAsmLabel = |
2576 | FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID; |
2577 | |
2578 | std::optional<bool> ErrnoOverriden; |
2579 | // ErrnoOverriden is true if math-errno is overriden via the |
2580 | // '#pragma float_control(precise, on)'. This pragma disables fast-math, |
2581 | // which implies math-errno. |
2582 | if (E->hasStoredFPFeatures()) { |
2583 | FPOptionsOverride OP = E->getFPFeatures(); |
2584 | if (OP.hasMathErrnoOverride()) |
2585 | ErrnoOverriden = OP.getMathErrnoOverride(); |
2586 | } |
2587 | // True if 'attribute__((optnone))' is used. This attribute overrides |
2588 | // fast-math which implies math-errno. |
2589 | bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>(); |
2590 | |
2591 | // True if we are compiling at -O2 and errno has been disabled |
2592 | // using the '#pragma float_control(precise, off)', and |
2593 | // attribute opt-none hasn't been seen. |
2594 | bool ErrnoOverridenToFalseWithOpt = |
2595 | ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone && |
2596 | CGM.getCodeGenOpts().OptimizationLevel != 0; |
2597 | |
2598 | // There are LLVM math intrinsics/instructions corresponding to math library |
2599 | // functions except the LLVM op will never set errno while the math library |
2600 | // might. Also, math builtins have the same semantics as their math library |
2601 | // twins. Thus, we can transform math library and builtin calls to their |
2602 | // LLVM counterparts if the call is marked 'const' (known to never set errno). |
2603 | // In case FP exceptions are enabled, the experimental versions of the |
2604 | // intrinsics model those. |
2605 | bool ConstAlways = |
2606 | getContext().BuiltinInfo.isConst(ID: BuiltinID); |
2607 | |
2608 | // There's a special case with the fma builtins where they are always const |
2609 | // if the target environment is GNU or the target is OS is Windows and we're |
2610 | // targeting the MSVCRT.dll environment. |
2611 | // FIXME: This list can be become outdated. Need to find a way to get it some |
2612 | // other way. |
2613 | switch (BuiltinID) { |
2614 | case Builtin::BI__builtin_fma: |
2615 | case Builtin::BI__builtin_fmaf: |
2616 | case Builtin::BI__builtin_fmal: |
2617 | case Builtin::BI__builtin_fmaf16: |
2618 | case Builtin::BIfma: |
2619 | case Builtin::BIfmaf: |
2620 | case Builtin::BIfmal: { |
2621 | auto &Trip = CGM.getTriple(); |
2622 | if (Trip.isGNUEnvironment() || Trip.isOSMSVCRT()) |
2623 | ConstAlways = true; |
2624 | break; |
2625 | } |
2626 | default: |
2627 | break; |
2628 | } |
2629 | |
2630 | bool ConstWithoutErrnoAndExceptions = |
2631 | getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(ID: BuiltinID); |
2632 | bool ConstWithoutExceptions = |
2633 | getContext().BuiltinInfo.isConstWithoutExceptions(ID: BuiltinID); |
2634 | |
2635 | // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is |
2636 | // disabled. |
2637 | // Math intrinsics are generated only when math-errno is disabled. Any pragmas |
2638 | // or attributes that affect math-errno should prevent or allow math |
2639 | // intrincs to be generated. Intrinsics are generated: |
2640 | // 1- In fast math mode, unless math-errno is overriden |
2641 | // via '#pragma float_control(precise, on)', or via an |
2642 | // 'attribute__((optnone))'. |
2643 | // 2- If math-errno was enabled on command line but overriden |
2644 | // to false via '#pragma float_control(precise, off))' and |
2645 | // 'attribute__((optnone))' hasn't been used. |
2646 | // 3- If we are compiling with optimization and errno has been disabled |
2647 | // via '#pragma float_control(precise, off)', and |
2648 | // 'attribute__((optnone))' hasn't been used. |
2649 | |
2650 | bool ConstWithoutErrnoOrExceptions = |
2651 | ConstWithoutErrnoAndExceptions || ConstWithoutExceptions; |
2652 | bool GenerateIntrinsics = |
2653 | (ConstAlways && !OptNone) || |
2654 | (!getLangOpts().MathErrno && |
2655 | !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone); |
2656 | if (!GenerateIntrinsics) { |
2657 | GenerateIntrinsics = |
2658 | ConstWithoutErrnoOrExceptions && !ConstWithoutErrnoAndExceptions; |
2659 | if (!GenerateIntrinsics) |
2660 | GenerateIntrinsics = |
2661 | ConstWithoutErrnoOrExceptions && |
2662 | (!getLangOpts().MathErrno && |
2663 | !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone); |
2664 | if (!GenerateIntrinsics) |
2665 | GenerateIntrinsics = |
2666 | ConstWithoutErrnoOrExceptions && ErrnoOverridenToFalseWithOpt; |
2667 | } |
2668 | if (GenerateIntrinsics) { |
2669 | switch (BuiltinIDIfNoAsmLabel) { |
2670 | case Builtin::BIacos: |
2671 | case Builtin::BIacosf: |
2672 | case Builtin::BIacosl: |
2673 | case Builtin::BI__builtin_acos: |
2674 | case Builtin::BI__builtin_acosf: |
2675 | case Builtin::BI__builtin_acosf16: |
2676 | case Builtin::BI__builtin_acosl: |
2677 | case Builtin::BI__builtin_acosf128: |
2678 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
2679 | CGF&: *this, E, IntrinsicID: Intrinsic::acos, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_acos)); |
2680 | |
2681 | case Builtin::BIasin: |
2682 | case Builtin::BIasinf: |
2683 | case Builtin::BIasinl: |
2684 | case Builtin::BI__builtin_asin: |
2685 | case Builtin::BI__builtin_asinf: |
2686 | case Builtin::BI__builtin_asinf16: |
2687 | case Builtin::BI__builtin_asinl: |
2688 | case Builtin::BI__builtin_asinf128: |
2689 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
2690 | CGF&: *this, E, IntrinsicID: Intrinsic::asin, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_asin)); |
2691 | |
2692 | case Builtin::BIatan: |
2693 | case Builtin::BIatanf: |
2694 | case Builtin::BIatanl: |
2695 | case Builtin::BI__builtin_atan: |
2696 | case Builtin::BI__builtin_atanf: |
2697 | case Builtin::BI__builtin_atanf16: |
2698 | case Builtin::BI__builtin_atanl: |
2699 | case Builtin::BI__builtin_atanf128: |
2700 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
2701 | CGF&: *this, E, IntrinsicID: Intrinsic::atan, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_atan)); |
2702 | |
2703 | case Builtin::BIceil: |
2704 | case Builtin::BIceilf: |
2705 | case Builtin::BIceill: |
2706 | case Builtin::BI__builtin_ceil: |
2707 | case Builtin::BI__builtin_ceilf: |
2708 | case Builtin::BI__builtin_ceilf16: |
2709 | case Builtin::BI__builtin_ceill: |
2710 | case Builtin::BI__builtin_ceilf128: |
2711 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2712 | IntrinsicID: Intrinsic::ceil, |
2713 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_ceil)); |
2714 | |
2715 | case Builtin::BIcopysign: |
2716 | case Builtin::BIcopysignf: |
2717 | case Builtin::BIcopysignl: |
2718 | case Builtin::BI__builtin_copysign: |
2719 | case Builtin::BI__builtin_copysignf: |
2720 | case Builtin::BI__builtin_copysignf16: |
2721 | case Builtin::BI__builtin_copysignl: |
2722 | case Builtin::BI__builtin_copysignf128: |
2723 | return RValue::get( |
2724 | V: emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E, IntrinsicID: Intrinsic::copysign)); |
2725 | |
2726 | case Builtin::BIcos: |
2727 | case Builtin::BIcosf: |
2728 | case Builtin::BIcosl: |
2729 | case Builtin::BI__builtin_cos: |
2730 | case Builtin::BI__builtin_cosf: |
2731 | case Builtin::BI__builtin_cosf16: |
2732 | case Builtin::BI__builtin_cosl: |
2733 | case Builtin::BI__builtin_cosf128: |
2734 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2735 | IntrinsicID: Intrinsic::cos, |
2736 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_cos)); |
2737 | |
2738 | case Builtin::BIcosh: |
2739 | case Builtin::BIcoshf: |
2740 | case Builtin::BIcoshl: |
2741 | case Builtin::BI__builtin_cosh: |
2742 | case Builtin::BI__builtin_coshf: |
2743 | case Builtin::BI__builtin_coshf16: |
2744 | case Builtin::BI__builtin_coshl: |
2745 | case Builtin::BI__builtin_coshf128: |
2746 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
2747 | CGF&: *this, E, IntrinsicID: Intrinsic::cosh, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_cosh)); |
2748 | |
2749 | case Builtin::BIexp: |
2750 | case Builtin::BIexpf: |
2751 | case Builtin::BIexpl: |
2752 | case Builtin::BI__builtin_exp: |
2753 | case Builtin::BI__builtin_expf: |
2754 | case Builtin::BI__builtin_expf16: |
2755 | case Builtin::BI__builtin_expl: |
2756 | case Builtin::BI__builtin_expf128: |
2757 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2758 | IntrinsicID: Intrinsic::exp, |
2759 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_exp)); |
2760 | |
2761 | case Builtin::BIexp2: |
2762 | case Builtin::BIexp2f: |
2763 | case Builtin::BIexp2l: |
2764 | case Builtin::BI__builtin_exp2: |
2765 | case Builtin::BI__builtin_exp2f: |
2766 | case Builtin::BI__builtin_exp2f16: |
2767 | case Builtin::BI__builtin_exp2l: |
2768 | case Builtin::BI__builtin_exp2f128: |
2769 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2770 | IntrinsicID: Intrinsic::exp2, |
2771 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_exp2)); |
2772 | case Builtin::BI__builtin_exp10: |
2773 | case Builtin::BI__builtin_exp10f: |
2774 | case Builtin::BI__builtin_exp10f16: |
2775 | case Builtin::BI__builtin_exp10l: |
2776 | case Builtin::BI__builtin_exp10f128: { |
2777 | // TODO: strictfp support |
2778 | if (Builder.getIsFPConstrained()) |
2779 | break; |
2780 | return RValue::get( |
2781 | V: emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::exp10)); |
2782 | } |
2783 | case Builtin::BIfabs: |
2784 | case Builtin::BIfabsf: |
2785 | case Builtin::BIfabsl: |
2786 | case Builtin::BI__builtin_fabs: |
2787 | case Builtin::BI__builtin_fabsf: |
2788 | case Builtin::BI__builtin_fabsf16: |
2789 | case Builtin::BI__builtin_fabsl: |
2790 | case Builtin::BI__builtin_fabsf128: |
2791 | return RValue::get( |
2792 | V: emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::fabs)); |
2793 | |
2794 | case Builtin::BIfloor: |
2795 | case Builtin::BIfloorf: |
2796 | case Builtin::BIfloorl: |
2797 | case Builtin::BI__builtin_floor: |
2798 | case Builtin::BI__builtin_floorf: |
2799 | case Builtin::BI__builtin_floorf16: |
2800 | case Builtin::BI__builtin_floorl: |
2801 | case Builtin::BI__builtin_floorf128: |
2802 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2803 | IntrinsicID: Intrinsic::floor, |
2804 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_floor)); |
2805 | |
2806 | case Builtin::BIfma: |
2807 | case Builtin::BIfmaf: |
2808 | case Builtin::BIfmal: |
2809 | case Builtin::BI__builtin_fma: |
2810 | case Builtin::BI__builtin_fmaf: |
2811 | case Builtin::BI__builtin_fmaf16: |
2812 | case Builtin::BI__builtin_fmal: |
2813 | case Builtin::BI__builtin_fmaf128: |
2814 | return RValue::get(V: emitTernaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2815 | IntrinsicID: Intrinsic::fma, |
2816 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma)); |
2817 | |
2818 | case Builtin::BIfmax: |
2819 | case Builtin::BIfmaxf: |
2820 | case Builtin::BIfmaxl: |
2821 | case Builtin::BI__builtin_fmax: |
2822 | case Builtin::BI__builtin_fmaxf: |
2823 | case Builtin::BI__builtin_fmaxf16: |
2824 | case Builtin::BI__builtin_fmaxl: |
2825 | case Builtin::BI__builtin_fmaxf128: |
2826 | return RValue::get(V: emitBinaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2827 | IntrinsicID: Intrinsic::maxnum, |
2828 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_maxnum)); |
2829 | |
2830 | case Builtin::BIfmin: |
2831 | case Builtin::BIfminf: |
2832 | case Builtin::BIfminl: |
2833 | case Builtin::BI__builtin_fmin: |
2834 | case Builtin::BI__builtin_fminf: |
2835 | case Builtin::BI__builtin_fminf16: |
2836 | case Builtin::BI__builtin_fminl: |
2837 | case Builtin::BI__builtin_fminf128: |
2838 | return RValue::get(V: emitBinaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2839 | IntrinsicID: Intrinsic::minnum, |
2840 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_minnum)); |
2841 | |
2842 | // fmod() is a special-case. It maps to the frem instruction rather than an |
2843 | // LLVM intrinsic. |
2844 | case Builtin::BIfmod: |
2845 | case Builtin::BIfmodf: |
2846 | case Builtin::BIfmodl: |
2847 | case Builtin::BI__builtin_fmod: |
2848 | case Builtin::BI__builtin_fmodf: |
2849 | case Builtin::BI__builtin_fmodf16: |
2850 | case Builtin::BI__builtin_fmodl: |
2851 | case Builtin::BI__builtin_fmodf128: { |
2852 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2853 | Value *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
2854 | Value *Arg2 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
2855 | return RValue::get(V: Builder.CreateFRem(L: Arg1, R: Arg2, Name: "fmod" )); |
2856 | } |
2857 | |
2858 | case Builtin::BIlog: |
2859 | case Builtin::BIlogf: |
2860 | case Builtin::BIlogl: |
2861 | case Builtin::BI__builtin_log: |
2862 | case Builtin::BI__builtin_logf: |
2863 | case Builtin::BI__builtin_logf16: |
2864 | case Builtin::BI__builtin_logl: |
2865 | case Builtin::BI__builtin_logf128: |
2866 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2867 | IntrinsicID: Intrinsic::log, |
2868 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_log)); |
2869 | |
2870 | case Builtin::BIlog10: |
2871 | case Builtin::BIlog10f: |
2872 | case Builtin::BIlog10l: |
2873 | case Builtin::BI__builtin_log10: |
2874 | case Builtin::BI__builtin_log10f: |
2875 | case Builtin::BI__builtin_log10f16: |
2876 | case Builtin::BI__builtin_log10l: |
2877 | case Builtin::BI__builtin_log10f128: |
2878 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2879 | IntrinsicID: Intrinsic::log10, |
2880 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_log10)); |
2881 | |
2882 | case Builtin::BIlog2: |
2883 | case Builtin::BIlog2f: |
2884 | case Builtin::BIlog2l: |
2885 | case Builtin::BI__builtin_log2: |
2886 | case Builtin::BI__builtin_log2f: |
2887 | case Builtin::BI__builtin_log2f16: |
2888 | case Builtin::BI__builtin_log2l: |
2889 | case Builtin::BI__builtin_log2f128: |
2890 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2891 | IntrinsicID: Intrinsic::log2, |
2892 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_log2)); |
2893 | |
2894 | case Builtin::BInearbyint: |
2895 | case Builtin::BInearbyintf: |
2896 | case Builtin::BInearbyintl: |
2897 | case Builtin::BI__builtin_nearbyint: |
2898 | case Builtin::BI__builtin_nearbyintf: |
2899 | case Builtin::BI__builtin_nearbyintl: |
2900 | case Builtin::BI__builtin_nearbyintf128: |
2901 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2902 | IntrinsicID: Intrinsic::nearbyint, |
2903 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_nearbyint)); |
2904 | |
2905 | case Builtin::BIpow: |
2906 | case Builtin::BIpowf: |
2907 | case Builtin::BIpowl: |
2908 | case Builtin::BI__builtin_pow: |
2909 | case Builtin::BI__builtin_powf: |
2910 | case Builtin::BI__builtin_powf16: |
2911 | case Builtin::BI__builtin_powl: |
2912 | case Builtin::BI__builtin_powf128: |
2913 | return RValue::get(V: emitBinaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2914 | IntrinsicID: Intrinsic::pow, |
2915 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_pow)); |
2916 | |
2917 | case Builtin::BIrint: |
2918 | case Builtin::BIrintf: |
2919 | case Builtin::BIrintl: |
2920 | case Builtin::BI__builtin_rint: |
2921 | case Builtin::BI__builtin_rintf: |
2922 | case Builtin::BI__builtin_rintf16: |
2923 | case Builtin::BI__builtin_rintl: |
2924 | case Builtin::BI__builtin_rintf128: |
2925 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2926 | IntrinsicID: Intrinsic::rint, |
2927 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_rint)); |
2928 | |
2929 | case Builtin::BIround: |
2930 | case Builtin::BIroundf: |
2931 | case Builtin::BIroundl: |
2932 | case Builtin::BI__builtin_round: |
2933 | case Builtin::BI__builtin_roundf: |
2934 | case Builtin::BI__builtin_roundf16: |
2935 | case Builtin::BI__builtin_roundl: |
2936 | case Builtin::BI__builtin_roundf128: |
2937 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2938 | IntrinsicID: Intrinsic::round, |
2939 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_round)); |
2940 | |
2941 | case Builtin::BIroundeven: |
2942 | case Builtin::BIroundevenf: |
2943 | case Builtin::BIroundevenl: |
2944 | case Builtin::BI__builtin_roundeven: |
2945 | case Builtin::BI__builtin_roundevenf: |
2946 | case Builtin::BI__builtin_roundevenf16: |
2947 | case Builtin::BI__builtin_roundevenl: |
2948 | case Builtin::BI__builtin_roundevenf128: |
2949 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2950 | IntrinsicID: Intrinsic::roundeven, |
2951 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_roundeven)); |
2952 | |
2953 | case Builtin::BIsin: |
2954 | case Builtin::BIsinf: |
2955 | case Builtin::BIsinl: |
2956 | case Builtin::BI__builtin_sin: |
2957 | case Builtin::BI__builtin_sinf: |
2958 | case Builtin::BI__builtin_sinf16: |
2959 | case Builtin::BI__builtin_sinl: |
2960 | case Builtin::BI__builtin_sinf128: |
2961 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
2962 | IntrinsicID: Intrinsic::sin, |
2963 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_sin)); |
2964 | |
2965 | case Builtin::BIsinh: |
2966 | case Builtin::BIsinhf: |
2967 | case Builtin::BIsinhl: |
2968 | case Builtin::BI__builtin_sinh: |
2969 | case Builtin::BI__builtin_sinhf: |
2970 | case Builtin::BI__builtin_sinhf16: |
2971 | case Builtin::BI__builtin_sinhl: |
2972 | case Builtin::BI__builtin_sinhf128: |
2973 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
2974 | CGF&: *this, E, IntrinsicID: Intrinsic::sinh, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_sinh)); |
2975 | |
2976 | case Builtin::BIsqrt: |
2977 | case Builtin::BIsqrtf: |
2978 | case Builtin::BIsqrtl: |
2979 | case Builtin::BI__builtin_sqrt: |
2980 | case Builtin::BI__builtin_sqrtf: |
2981 | case Builtin::BI__builtin_sqrtf16: |
2982 | case Builtin::BI__builtin_sqrtl: |
2983 | case Builtin::BI__builtin_sqrtf128: |
2984 | case Builtin::BI__builtin_elementwise_sqrt: { |
2985 | llvm::Value *Call = emitUnaryMaybeConstrainedFPBuiltin( |
2986 | CGF&: *this, E, IntrinsicID: Intrinsic::sqrt, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_sqrt); |
2987 | SetSqrtFPAccuracy(Call); |
2988 | return RValue::get(V: Call); |
2989 | } |
2990 | |
2991 | case Builtin::BItan: |
2992 | case Builtin::BItanf: |
2993 | case Builtin::BItanl: |
2994 | case Builtin::BI__builtin_tan: |
2995 | case Builtin::BI__builtin_tanf: |
2996 | case Builtin::BI__builtin_tanf16: |
2997 | case Builtin::BI__builtin_tanl: |
2998 | case Builtin::BI__builtin_tanf128: |
2999 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
3000 | CGF&: *this, E, IntrinsicID: Intrinsic::tan, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_tan)); |
3001 | |
3002 | case Builtin::BItanh: |
3003 | case Builtin::BItanhf: |
3004 | case Builtin::BItanhl: |
3005 | case Builtin::BI__builtin_tanh: |
3006 | case Builtin::BI__builtin_tanhf: |
3007 | case Builtin::BI__builtin_tanhf16: |
3008 | case Builtin::BI__builtin_tanhl: |
3009 | case Builtin::BI__builtin_tanhf128: |
3010 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
3011 | CGF&: *this, E, IntrinsicID: Intrinsic::tanh, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_tanh)); |
3012 | |
3013 | case Builtin::BItrunc: |
3014 | case Builtin::BItruncf: |
3015 | case Builtin::BItruncl: |
3016 | case Builtin::BI__builtin_trunc: |
3017 | case Builtin::BI__builtin_truncf: |
3018 | case Builtin::BI__builtin_truncf16: |
3019 | case Builtin::BI__builtin_truncl: |
3020 | case Builtin::BI__builtin_truncf128: |
3021 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin(CGF&: *this, E, |
3022 | IntrinsicID: Intrinsic::trunc, |
3023 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_trunc)); |
3024 | |
3025 | case Builtin::BIlround: |
3026 | case Builtin::BIlroundf: |
3027 | case Builtin::BIlroundl: |
3028 | case Builtin::BI__builtin_lround: |
3029 | case Builtin::BI__builtin_lroundf: |
3030 | case Builtin::BI__builtin_lroundl: |
3031 | case Builtin::BI__builtin_lroundf128: |
3032 | return RValue::get(V: emitMaybeConstrainedFPToIntRoundBuiltin( |
3033 | CGF&: *this, E, IntrinsicID: Intrinsic::lround, |
3034 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_lround)); |
3035 | |
3036 | case Builtin::BIllround: |
3037 | case Builtin::BIllroundf: |
3038 | case Builtin::BIllroundl: |
3039 | case Builtin::BI__builtin_llround: |
3040 | case Builtin::BI__builtin_llroundf: |
3041 | case Builtin::BI__builtin_llroundl: |
3042 | case Builtin::BI__builtin_llroundf128: |
3043 | return RValue::get(V: emitMaybeConstrainedFPToIntRoundBuiltin( |
3044 | CGF&: *this, E, IntrinsicID: Intrinsic::llround, |
3045 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_llround)); |
3046 | |
3047 | case Builtin::BIlrint: |
3048 | case Builtin::BIlrintf: |
3049 | case Builtin::BIlrintl: |
3050 | case Builtin::BI__builtin_lrint: |
3051 | case Builtin::BI__builtin_lrintf: |
3052 | case Builtin::BI__builtin_lrintl: |
3053 | case Builtin::BI__builtin_lrintf128: |
3054 | return RValue::get(V: emitMaybeConstrainedFPToIntRoundBuiltin( |
3055 | CGF&: *this, E, IntrinsicID: Intrinsic::lrint, |
3056 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_lrint)); |
3057 | |
3058 | case Builtin::BIllrint: |
3059 | case Builtin::BIllrintf: |
3060 | case Builtin::BIllrintl: |
3061 | case Builtin::BI__builtin_llrint: |
3062 | case Builtin::BI__builtin_llrintf: |
3063 | case Builtin::BI__builtin_llrintl: |
3064 | case Builtin::BI__builtin_llrintf128: |
3065 | return RValue::get(V: emitMaybeConstrainedFPToIntRoundBuiltin( |
3066 | CGF&: *this, E, IntrinsicID: Intrinsic::llrint, |
3067 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_llrint)); |
3068 | case Builtin::BI__builtin_ldexp: |
3069 | case Builtin::BI__builtin_ldexpf: |
3070 | case Builtin::BI__builtin_ldexpl: |
3071 | case Builtin::BI__builtin_ldexpf16: |
3072 | case Builtin::BI__builtin_ldexpf128: { |
3073 | return RValue::get(V: emitBinaryExpMaybeConstrainedFPBuiltin( |
3074 | CGF&: *this, E, IntrinsicID: Intrinsic::ldexp, |
3075 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_ldexp)); |
3076 | } |
3077 | default: |
3078 | break; |
3079 | } |
3080 | } |
3081 | |
3082 | // Check NonnullAttribute/NullabilityArg and Alignment. |
3083 | auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg, |
3084 | unsigned ParmNum) { |
3085 | Value *Val = A.emitRawPointer(CGF&: *this); |
3086 | EmitNonNullArgCheck(RV: RValue::get(V: Val), ArgType: Arg->getType(), ArgLoc: Arg->getExprLoc(), AC: FD, |
3087 | ParmNum); |
3088 | |
3089 | if (SanOpts.has(K: SanitizerKind::Alignment)) { |
3090 | SanitizerSet SkippedChecks; |
3091 | SkippedChecks.set(SanitizerKind::All); |
3092 | SkippedChecks.clear(K: SanitizerKind::Alignment); |
3093 | SourceLocation Loc = Arg->getExprLoc(); |
3094 | // Strip an implicit cast. |
3095 | if (auto *CE = dyn_cast<ImplicitCastExpr>(Val: Arg)) |
3096 | if (CE->getCastKind() == CK_BitCast) |
3097 | Arg = CE->getSubExpr(); |
3098 | EmitTypeCheck(TCK: Kind, Loc, V: Val, Type: Arg->getType(), Alignment: A.getAlignment(), |
3099 | SkippedChecks); |
3100 | } |
3101 | }; |
3102 | |
3103 | switch (BuiltinIDIfNoAsmLabel) { |
3104 | default: break; |
3105 | case Builtin::BI__builtin___CFStringMakeConstantString: |
3106 | case Builtin::BI__builtin___NSStringMakeConstantString: |
3107 | return RValue::get(V: ConstantEmitter(*this).emitAbstract(E, T: E->getType())); |
3108 | case Builtin::BI__builtin_stdarg_start: |
3109 | case Builtin::BI__builtin_va_start: |
3110 | case Builtin::BI__va_start: |
3111 | case Builtin::BI__builtin_va_end: |
3112 | EmitVAStartEnd(ArgValue: BuiltinID == Builtin::BI__va_start |
3113 | ? EmitScalarExpr(E: E->getArg(Arg: 0)) |
3114 | : EmitVAListRef(E: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this), |
3115 | IsStart: BuiltinID != Builtin::BI__builtin_va_end); |
3116 | return RValue::get(V: nullptr); |
3117 | case Builtin::BI__builtin_va_copy: { |
3118 | Value *DstPtr = EmitVAListRef(E: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
3119 | Value *SrcPtr = EmitVAListRef(E: E->getArg(Arg: 1)).emitRawPointer(CGF&: *this); |
3120 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::vacopy, Tys: {DstPtr->getType()}), |
3121 | Args: {DstPtr, SrcPtr}); |
3122 | return RValue::get(V: nullptr); |
3123 | } |
3124 | case Builtin::BIabs: |
3125 | case Builtin::BIlabs: |
3126 | case Builtin::BIllabs: |
3127 | case Builtin::BI__builtin_abs: |
3128 | case Builtin::BI__builtin_labs: |
3129 | case Builtin::BI__builtin_llabs: { |
3130 | bool SanitizeOverflow = SanOpts.has(K: SanitizerKind::SignedIntegerOverflow); |
3131 | |
3132 | Value *Result; |
3133 | switch (getLangOpts().getSignedOverflowBehavior()) { |
3134 | case LangOptions::SOB_Defined: |
3135 | Result = EmitAbs(CGF&: *this, ArgValue: EmitScalarExpr(E: E->getArg(Arg: 0)), HasNSW: false); |
3136 | break; |
3137 | case LangOptions::SOB_Undefined: |
3138 | if (!SanitizeOverflow) { |
3139 | Result = EmitAbs(CGF&: *this, ArgValue: EmitScalarExpr(E: E->getArg(Arg: 0)), HasNSW: true); |
3140 | break; |
3141 | } |
3142 | [[fallthrough]]; |
3143 | case LangOptions::SOB_Trapping: |
3144 | // TODO: Somehow handle the corner case when the address of abs is taken. |
3145 | Result = EmitOverflowCheckedAbs(CGF&: *this, E, SanitizeOverflow); |
3146 | break; |
3147 | } |
3148 | return RValue::get(V: Result); |
3149 | } |
3150 | case Builtin::BI__builtin_complex: { |
3151 | Value *Real = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3152 | Value *Imag = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3153 | return RValue::getComplex(C: {Real, Imag}); |
3154 | } |
3155 | case Builtin::BI__builtin_conj: |
3156 | case Builtin::BI__builtin_conjf: |
3157 | case Builtin::BI__builtin_conjl: |
3158 | case Builtin::BIconj: |
3159 | case Builtin::BIconjf: |
3160 | case Builtin::BIconjl: { |
3161 | ComplexPairTy ComplexVal = EmitComplexExpr(E: E->getArg(Arg: 0)); |
3162 | Value *Real = ComplexVal.first; |
3163 | Value *Imag = ComplexVal.second; |
3164 | Imag = Builder.CreateFNeg(V: Imag, Name: "neg" ); |
3165 | return RValue::getComplex(C: std::make_pair(x&: Real, y&: Imag)); |
3166 | } |
3167 | case Builtin::BI__builtin_creal: |
3168 | case Builtin::BI__builtin_crealf: |
3169 | case Builtin::BI__builtin_creall: |
3170 | case Builtin::BIcreal: |
3171 | case Builtin::BIcrealf: |
3172 | case Builtin::BIcreall: { |
3173 | ComplexPairTy ComplexVal = EmitComplexExpr(E: E->getArg(Arg: 0)); |
3174 | return RValue::get(V: ComplexVal.first); |
3175 | } |
3176 | |
3177 | case Builtin::BI__builtin_preserve_access_index: { |
3178 | // Only enabled preserved access index region when debuginfo |
3179 | // is available as debuginfo is needed to preserve user-level |
3180 | // access pattern. |
3181 | if (!getDebugInfo()) { |
3182 | CGM.Error(loc: E->getExprLoc(), error: "using builtin_preserve_access_index() without -g" ); |
3183 | return RValue::get(V: EmitScalarExpr(E: E->getArg(Arg: 0))); |
3184 | } |
3185 | |
3186 | // Nested builtin_preserve_access_index() not supported |
3187 | if (IsInPreservedAIRegion) { |
3188 | CGM.Error(loc: E->getExprLoc(), error: "nested builtin_preserve_access_index() not supported" ); |
3189 | return RValue::get(V: EmitScalarExpr(E: E->getArg(Arg: 0))); |
3190 | } |
3191 | |
3192 | IsInPreservedAIRegion = true; |
3193 | Value *Res = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3194 | IsInPreservedAIRegion = false; |
3195 | return RValue::get(V: Res); |
3196 | } |
3197 | |
3198 | case Builtin::BI__builtin_cimag: |
3199 | case Builtin::BI__builtin_cimagf: |
3200 | case Builtin::BI__builtin_cimagl: |
3201 | case Builtin::BIcimag: |
3202 | case Builtin::BIcimagf: |
3203 | case Builtin::BIcimagl: { |
3204 | ComplexPairTy ComplexVal = EmitComplexExpr(E: E->getArg(Arg: 0)); |
3205 | return RValue::get(V: ComplexVal.second); |
3206 | } |
3207 | |
3208 | case Builtin::BI__builtin_clrsb: |
3209 | case Builtin::BI__builtin_clrsbl: |
3210 | case Builtin::BI__builtin_clrsbll: { |
3211 | // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or |
3212 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3213 | |
3214 | llvm::Type *ArgType = ArgValue->getType(); |
3215 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ArgType); |
3216 | |
3217 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3218 | Value *Zero = llvm::Constant::getNullValue(Ty: ArgType); |
3219 | Value *IsNeg = Builder.CreateICmpSLT(LHS: ArgValue, RHS: Zero, Name: "isneg" ); |
3220 | Value *Inverse = Builder.CreateNot(V: ArgValue, Name: "not" ); |
3221 | Value *Tmp = Builder.CreateSelect(C: IsNeg, True: Inverse, False: ArgValue); |
3222 | Value *Ctlz = Builder.CreateCall(Callee: F, Args: {Tmp, Builder.getFalse()}); |
3223 | Value *Result = Builder.CreateSub(LHS: Ctlz, RHS: llvm::ConstantInt::get(Ty: ArgType, V: 1)); |
3224 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
3225 | Name: "cast" ); |
3226 | return RValue::get(V: Result); |
3227 | } |
3228 | case Builtin::BI__builtin_ctzs: |
3229 | case Builtin::BI__builtin_ctz: |
3230 | case Builtin::BI__builtin_ctzl: |
3231 | case Builtin::BI__builtin_ctzll: |
3232 | case Builtin::BI__builtin_ctzg: { |
3233 | bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg && |
3234 | E->getNumArgs() > 1; |
3235 | |
3236 | Value *ArgValue = |
3237 | HasFallback ? EmitScalarExpr(E: E->getArg(Arg: 0)) |
3238 | : EmitCheckedArgForBuiltin(E: E->getArg(Arg: 0), Kind: BCK_CTZPassedZero); |
3239 | |
3240 | llvm::Type *ArgType = ArgValue->getType(); |
3241 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: ArgType); |
3242 | |
3243 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3244 | Value *ZeroUndef = |
3245 | Builder.getInt1(V: HasFallback || getTarget().isCLZForZeroUndef()); |
3246 | Value *Result = Builder.CreateCall(Callee: F, Args: {ArgValue, ZeroUndef}); |
3247 | if (Result->getType() != ResultType) |
3248 | Result = |
3249 | Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/ false, Name: "cast" ); |
3250 | if (!HasFallback) |
3251 | return RValue::get(V: Result); |
3252 | |
3253 | Value *Zero = Constant::getNullValue(Ty: ArgType); |
3254 | Value *IsZero = Builder.CreateICmpEQ(LHS: ArgValue, RHS: Zero, Name: "iszero" ); |
3255 | Value *FallbackValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3256 | Value *ResultOrFallback = |
3257 | Builder.CreateSelect(C: IsZero, True: FallbackValue, False: Result, Name: "ctzg" ); |
3258 | return RValue::get(V: ResultOrFallback); |
3259 | } |
3260 | case Builtin::BI__builtin_clzs: |
3261 | case Builtin::BI__builtin_clz: |
3262 | case Builtin::BI__builtin_clzl: |
3263 | case Builtin::BI__builtin_clzll: |
3264 | case Builtin::BI__builtin_clzg: { |
3265 | bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg && |
3266 | E->getNumArgs() > 1; |
3267 | |
3268 | Value *ArgValue = |
3269 | HasFallback ? EmitScalarExpr(E: E->getArg(Arg: 0)) |
3270 | : EmitCheckedArgForBuiltin(E: E->getArg(Arg: 0), Kind: BCK_CLZPassedZero); |
3271 | |
3272 | llvm::Type *ArgType = ArgValue->getType(); |
3273 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ArgType); |
3274 | |
3275 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3276 | Value *ZeroUndef = |
3277 | Builder.getInt1(V: HasFallback || getTarget().isCLZForZeroUndef()); |
3278 | Value *Result = Builder.CreateCall(Callee: F, Args: {ArgValue, ZeroUndef}); |
3279 | if (Result->getType() != ResultType) |
3280 | Result = |
3281 | Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/ false, Name: "cast" ); |
3282 | if (!HasFallback) |
3283 | return RValue::get(V: Result); |
3284 | |
3285 | Value *Zero = Constant::getNullValue(Ty: ArgType); |
3286 | Value *IsZero = Builder.CreateICmpEQ(LHS: ArgValue, RHS: Zero, Name: "iszero" ); |
3287 | Value *FallbackValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3288 | Value *ResultOrFallback = |
3289 | Builder.CreateSelect(C: IsZero, True: FallbackValue, False: Result, Name: "clzg" ); |
3290 | return RValue::get(V: ResultOrFallback); |
3291 | } |
3292 | case Builtin::BI__builtin_ffs: |
3293 | case Builtin::BI__builtin_ffsl: |
3294 | case Builtin::BI__builtin_ffsll: { |
3295 | // ffs(x) -> x ? cttz(x) + 1 : 0 |
3296 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3297 | |
3298 | llvm::Type *ArgType = ArgValue->getType(); |
3299 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: ArgType); |
3300 | |
3301 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3302 | Value *Tmp = |
3303 | Builder.CreateAdd(LHS: Builder.CreateCall(Callee: F, Args: {ArgValue, Builder.getTrue()}), |
3304 | RHS: llvm::ConstantInt::get(Ty: ArgType, V: 1)); |
3305 | Value *Zero = llvm::Constant::getNullValue(Ty: ArgType); |
3306 | Value *IsZero = Builder.CreateICmpEQ(LHS: ArgValue, RHS: Zero, Name: "iszero" ); |
3307 | Value *Result = Builder.CreateSelect(C: IsZero, True: Zero, False: Tmp, Name: "ffs" ); |
3308 | if (Result->getType() != ResultType) |
3309 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
3310 | Name: "cast" ); |
3311 | return RValue::get(V: Result); |
3312 | } |
3313 | case Builtin::BI__builtin_parity: |
3314 | case Builtin::BI__builtin_parityl: |
3315 | case Builtin::BI__builtin_parityll: { |
3316 | // parity(x) -> ctpop(x) & 1 |
3317 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3318 | |
3319 | llvm::Type *ArgType = ArgValue->getType(); |
3320 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ArgType); |
3321 | |
3322 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3323 | Value *Tmp = Builder.CreateCall(Callee: F, Args: ArgValue); |
3324 | Value *Result = Builder.CreateAnd(LHS: Tmp, RHS: llvm::ConstantInt::get(Ty: ArgType, V: 1)); |
3325 | if (Result->getType() != ResultType) |
3326 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
3327 | Name: "cast" ); |
3328 | return RValue::get(V: Result); |
3329 | } |
3330 | case Builtin::BI__lzcnt16: |
3331 | case Builtin::BI__lzcnt: |
3332 | case Builtin::BI__lzcnt64: { |
3333 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3334 | |
3335 | llvm::Type *ArgType = ArgValue->getType(); |
3336 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ArgType); |
3337 | |
3338 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3339 | Value *Result = Builder.CreateCall(Callee: F, Args: {ArgValue, Builder.getFalse()}); |
3340 | if (Result->getType() != ResultType) |
3341 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
3342 | Name: "cast" ); |
3343 | return RValue::get(V: Result); |
3344 | } |
3345 | case Builtin::BI__popcnt16: |
3346 | case Builtin::BI__popcnt: |
3347 | case Builtin::BI__popcnt64: |
3348 | case Builtin::BI__builtin_popcount: |
3349 | case Builtin::BI__builtin_popcountl: |
3350 | case Builtin::BI__builtin_popcountll: |
3351 | case Builtin::BI__builtin_popcountg: { |
3352 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3353 | |
3354 | llvm::Type *ArgType = ArgValue->getType(); |
3355 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ArgType); |
3356 | |
3357 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3358 | Value *Result = Builder.CreateCall(Callee: F, Args: ArgValue); |
3359 | if (Result->getType() != ResultType) |
3360 | Result = |
3361 | Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/ false, Name: "cast" ); |
3362 | return RValue::get(V: Result); |
3363 | } |
3364 | case Builtin::BI__builtin_unpredictable: { |
3365 | // Always return the argument of __builtin_unpredictable. LLVM does not |
3366 | // handle this builtin. Metadata for this builtin should be added directly |
3367 | // to instructions such as branches or switches that use it. |
3368 | return RValue::get(V: EmitScalarExpr(E: E->getArg(Arg: 0))); |
3369 | } |
3370 | case Builtin::BI__builtin_expect: { |
3371 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3372 | llvm::Type *ArgType = ArgValue->getType(); |
3373 | |
3374 | Value *ExpectedValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3375 | // Don't generate llvm.expect on -O0 as the backend won't use it for |
3376 | // anything. |
3377 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
3378 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
3379 | return RValue::get(V: ArgValue); |
3380 | |
3381 | Function *FnExpect = CGM.getIntrinsic(IID: Intrinsic::expect, Tys: ArgType); |
3382 | Value *Result = |
3383 | Builder.CreateCall(Callee: FnExpect, Args: {ArgValue, ExpectedValue}, Name: "expval" ); |
3384 | return RValue::get(V: Result); |
3385 | } |
3386 | case Builtin::BI__builtin_expect_with_probability: { |
3387 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3388 | llvm::Type *ArgType = ArgValue->getType(); |
3389 | |
3390 | Value *ExpectedValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3391 | llvm::APFloat Probability(0.0); |
3392 | const Expr *ProbArg = E->getArg(Arg: 2); |
3393 | bool EvalSucceed = ProbArg->EvaluateAsFloat(Result&: Probability, Ctx: CGM.getContext()); |
3394 | assert(EvalSucceed && "probability should be able to evaluate as float" ); |
3395 | (void)EvalSucceed; |
3396 | bool LoseInfo = false; |
3397 | Probability.convert(ToSemantics: llvm::APFloat::IEEEdouble(), |
3398 | RM: llvm::RoundingMode::Dynamic, losesInfo: &LoseInfo); |
3399 | llvm::Type *Ty = ConvertType(T: ProbArg->getType()); |
3400 | Constant *Confidence = ConstantFP::get(Ty, V: Probability); |
3401 | // Don't generate llvm.expect.with.probability on -O0 as the backend |
3402 | // won't use it for anything. |
3403 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
3404 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
3405 | return RValue::get(V: ArgValue); |
3406 | |
3407 | Function *FnExpect = |
3408 | CGM.getIntrinsic(IID: Intrinsic::expect_with_probability, Tys: ArgType); |
3409 | Value *Result = Builder.CreateCall( |
3410 | Callee: FnExpect, Args: {ArgValue, ExpectedValue, Confidence}, Name: "expval" ); |
3411 | return RValue::get(V: Result); |
3412 | } |
3413 | case Builtin::BI__builtin_assume_aligned: { |
3414 | const Expr *Ptr = E->getArg(Arg: 0); |
3415 | Value *PtrValue = EmitScalarExpr(E: Ptr); |
3416 | Value *OffsetValue = |
3417 | (E->getNumArgs() > 2) ? EmitScalarExpr(E: E->getArg(Arg: 2)) : nullptr; |
3418 | |
3419 | Value *AlignmentValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3420 | ConstantInt *AlignmentCI = cast<ConstantInt>(Val: AlignmentValue); |
3421 | if (AlignmentCI->getValue().ugt(RHS: llvm::Value::MaximumAlignment)) |
3422 | AlignmentCI = ConstantInt::get(Ty: AlignmentCI->getIntegerType(), |
3423 | V: llvm::Value::MaximumAlignment); |
3424 | |
3425 | emitAlignmentAssumption(PtrValue, E: Ptr, |
3426 | /*The expr loc is sufficient.*/ AssumptionLoc: SourceLocation(), |
3427 | Alignment: AlignmentCI, OffsetValue); |
3428 | return RValue::get(V: PtrValue); |
3429 | } |
3430 | case Builtin::BI__assume: |
3431 | case Builtin::BI__builtin_assume: { |
3432 | if (E->getArg(Arg: 0)->HasSideEffects(Ctx: getContext())) |
3433 | return RValue::get(V: nullptr); |
3434 | |
3435 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3436 | Function *FnAssume = CGM.getIntrinsic(IID: Intrinsic::assume); |
3437 | Builder.CreateCall(Callee: FnAssume, Args: ArgValue); |
3438 | return RValue::get(V: nullptr); |
3439 | } |
3440 | case Builtin::BI__builtin_assume_separate_storage: { |
3441 | const Expr *Arg0 = E->getArg(Arg: 0); |
3442 | const Expr *Arg1 = E->getArg(Arg: 1); |
3443 | |
3444 | Value *Value0 = EmitScalarExpr(E: Arg0); |
3445 | Value *Value1 = EmitScalarExpr(E: Arg1); |
3446 | |
3447 | Value *Values[] = {Value0, Value1}; |
3448 | OperandBundleDefT<Value *> OBD("separate_storage" , Values); |
3449 | Builder.CreateAssumption(Cond: ConstantInt::getTrue(Context&: getLLVMContext()), OpBundles: {OBD}); |
3450 | return RValue::get(V: nullptr); |
3451 | } |
3452 | case Builtin::BI__builtin_allow_runtime_check: { |
3453 | StringRef Kind = |
3454 | cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenCasts())->getString(); |
3455 | LLVMContext &Ctx = CGM.getLLVMContext(); |
3456 | llvm::Value *Allow = Builder.CreateCall( |
3457 | Callee: CGM.getIntrinsic(IID: llvm::Intrinsic::allow_runtime_check), |
3458 | Args: llvm::MetadataAsValue::get(Context&: Ctx, MD: llvm::MDString::get(Context&: Ctx, Str: Kind))); |
3459 | return RValue::get(V: Allow); |
3460 | } |
3461 | case Builtin::BI__arithmetic_fence: { |
3462 | // Create the builtin call if FastMath is selected, and the target |
3463 | // supports the builtin, otherwise just return the argument. |
3464 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3465 | llvm::FastMathFlags FMF = Builder.getFastMathFlags(); |
3466 | bool isArithmeticFenceEnabled = |
3467 | FMF.allowReassoc() && |
3468 | getContext().getTargetInfo().checkArithmeticFenceSupported(); |
3469 | QualType ArgType = E->getArg(Arg: 0)->getType(); |
3470 | if (ArgType->isComplexType()) { |
3471 | if (isArithmeticFenceEnabled) { |
3472 | QualType ElementType = ArgType->castAs<ComplexType>()->getElementType(); |
3473 | ComplexPairTy ComplexVal = EmitComplexExpr(E: E->getArg(Arg: 0)); |
3474 | Value *Real = Builder.CreateArithmeticFence(Val: ComplexVal.first, |
3475 | DstType: ConvertType(T: ElementType)); |
3476 | Value *Imag = Builder.CreateArithmeticFence(Val: ComplexVal.second, |
3477 | DstType: ConvertType(T: ElementType)); |
3478 | return RValue::getComplex(C: std::make_pair(x&: Real, y&: Imag)); |
3479 | } |
3480 | ComplexPairTy ComplexVal = EmitComplexExpr(E: E->getArg(Arg: 0)); |
3481 | Value *Real = ComplexVal.first; |
3482 | Value *Imag = ComplexVal.second; |
3483 | return RValue::getComplex(C: std::make_pair(x&: Real, y&: Imag)); |
3484 | } |
3485 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3486 | if (isArithmeticFenceEnabled) |
3487 | return RValue::get( |
3488 | V: Builder.CreateArithmeticFence(Val: ArgValue, DstType: ConvertType(T: ArgType))); |
3489 | return RValue::get(V: ArgValue); |
3490 | } |
3491 | case Builtin::BI__builtin_bswap16: |
3492 | case Builtin::BI__builtin_bswap32: |
3493 | case Builtin::BI__builtin_bswap64: |
3494 | case Builtin::BI_byteswap_ushort: |
3495 | case Builtin::BI_byteswap_ulong: |
3496 | case Builtin::BI_byteswap_uint64: { |
3497 | return RValue::get( |
3498 | V: emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::bswap)); |
3499 | } |
3500 | case Builtin::BI__builtin_bitreverse8: |
3501 | case Builtin::BI__builtin_bitreverse16: |
3502 | case Builtin::BI__builtin_bitreverse32: |
3503 | case Builtin::BI__builtin_bitreverse64: { |
3504 | return RValue::get( |
3505 | V: emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::bitreverse)); |
3506 | } |
3507 | case Builtin::BI__builtin_rotateleft8: |
3508 | case Builtin::BI__builtin_rotateleft16: |
3509 | case Builtin::BI__builtin_rotateleft32: |
3510 | case Builtin::BI__builtin_rotateleft64: |
3511 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
3512 | case Builtin::BI_rotl16: |
3513 | case Builtin::BI_rotl: |
3514 | case Builtin::BI_lrotl: |
3515 | case Builtin::BI_rotl64: |
3516 | return emitRotate(E, IsRotateRight: false); |
3517 | |
3518 | case Builtin::BI__builtin_rotateright8: |
3519 | case Builtin::BI__builtin_rotateright16: |
3520 | case Builtin::BI__builtin_rotateright32: |
3521 | case Builtin::BI__builtin_rotateright64: |
3522 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
3523 | case Builtin::BI_rotr16: |
3524 | case Builtin::BI_rotr: |
3525 | case Builtin::BI_lrotr: |
3526 | case Builtin::BI_rotr64: |
3527 | return emitRotate(E, IsRotateRight: true); |
3528 | |
3529 | case Builtin::BI__builtin_constant_p: { |
3530 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
3531 | |
3532 | const Expr *Arg = E->getArg(Arg: 0); |
3533 | QualType ArgType = Arg->getType(); |
3534 | // FIXME: The allowance for Obj-C pointers and block pointers is historical |
3535 | // and likely a mistake. |
3536 | if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
3537 | !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
3538 | // Per the GCC documentation, only numeric constants are recognized after |
3539 | // inlining. |
3540 | return RValue::get(V: ConstantInt::get(Ty: ResultType, V: 0)); |
3541 | |
3542 | if (Arg->HasSideEffects(Ctx: getContext())) |
3543 | // The argument is unevaluated, so be conservative if it might have |
3544 | // side-effects. |
3545 | return RValue::get(V: ConstantInt::get(Ty: ResultType, V: 0)); |
3546 | |
3547 | Value *ArgValue = EmitScalarExpr(E: Arg); |
3548 | if (ArgType->isObjCObjectPointerType()) { |
3549 | // Convert Objective-C objects to id because we cannot distinguish between |
3550 | // LLVM types for Obj-C classes as they are opaque. |
3551 | ArgType = CGM.getContext().getObjCIdType(); |
3552 | ArgValue = Builder.CreateBitCast(V: ArgValue, DestTy: ConvertType(T: ArgType)); |
3553 | } |
3554 | Function *F = |
3555 | CGM.getIntrinsic(IID: Intrinsic::is_constant, Tys: ConvertType(T: ArgType)); |
3556 | Value *Result = Builder.CreateCall(Callee: F, Args: ArgValue); |
3557 | if (Result->getType() != ResultType) |
3558 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/false); |
3559 | return RValue::get(V: Result); |
3560 | } |
3561 | case Builtin::BI__builtin_dynamic_object_size: |
3562 | case Builtin::BI__builtin_object_size: { |
3563 | unsigned Type = |
3564 | E->getArg(Arg: 1)->EvaluateKnownConstInt(Ctx: getContext()).getZExtValue(); |
3565 | auto *ResType = cast<llvm::IntegerType>(Val: ConvertType(T: E->getType())); |
3566 | |
3567 | // We pass this builtin onto the optimizer so that it can figure out the |
3568 | // object size in more complex cases. |
3569 | bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
3570 | return RValue::get(V: emitBuiltinObjectSize(E: E->getArg(Arg: 0), Type, ResType, |
3571 | /*EmittedE=*/nullptr, IsDynamic)); |
3572 | } |
3573 | case Builtin::BI__builtin_prefetch: { |
3574 | Value *Locality, *RW, *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3575 | // FIXME: Technically these constants should of type 'int', yes? |
3576 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E: E->getArg(Arg: 1)) : |
3577 | llvm::ConstantInt::get(Ty: Int32Ty, V: 0); |
3578 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E: E->getArg(Arg: 2)) : |
3579 | llvm::ConstantInt::get(Ty: Int32Ty, V: 3); |
3580 | Value *Data = llvm::ConstantInt::get(Ty: Int32Ty, V: 1); |
3581 | Function *F = CGM.getIntrinsic(IID: Intrinsic::prefetch, Tys: Address->getType()); |
3582 | Builder.CreateCall(Callee: F, Args: {Address, RW, Locality, Data}); |
3583 | return RValue::get(V: nullptr); |
3584 | } |
3585 | case Builtin::BI__builtin_readcyclecounter: { |
3586 | Function *F = CGM.getIntrinsic(IID: Intrinsic::readcyclecounter); |
3587 | return RValue::get(V: Builder.CreateCall(Callee: F)); |
3588 | } |
3589 | case Builtin::BI__builtin_readsteadycounter: { |
3590 | Function *F = CGM.getIntrinsic(IID: Intrinsic::readsteadycounter); |
3591 | return RValue::get(V: Builder.CreateCall(Callee: F)); |
3592 | } |
3593 | case Builtin::BI__builtin___clear_cache: { |
3594 | Value *Begin = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3595 | Value *End = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3596 | Function *F = CGM.getIntrinsic(IID: Intrinsic::clear_cache); |
3597 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: {Begin, End})); |
3598 | } |
3599 | case Builtin::BI__builtin_trap: |
3600 | EmitTrapCall(IntrID: Intrinsic::trap); |
3601 | return RValue::get(V: nullptr); |
3602 | case Builtin::BI__builtin_verbose_trap: { |
3603 | llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation(); |
3604 | if (getDebugInfo()) { |
3605 | TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor( |
3606 | TrapLocation, Category: *E->getArg(Arg: 0)->tryEvaluateString(Ctx&: getContext()), |
3607 | FailureMsg: *E->getArg(Arg: 1)->tryEvaluateString(Ctx&: getContext())); |
3608 | } |
3609 | ApplyDebugLocation ApplyTrapDI(*this, TrapLocation); |
3610 | // Currently no attempt is made to prevent traps from being merged. |
3611 | EmitTrapCall(IntrID: Intrinsic::trap); |
3612 | return RValue::get(V: nullptr); |
3613 | } |
3614 | case Builtin::BI__debugbreak: |
3615 | EmitTrapCall(IntrID: Intrinsic::debugtrap); |
3616 | return RValue::get(V: nullptr); |
3617 | case Builtin::BI__builtin_unreachable: { |
3618 | EmitUnreachable(Loc: E->getExprLoc()); |
3619 | |
3620 | // We do need to preserve an insertion point. |
3621 | EmitBlock(BB: createBasicBlock(name: "unreachable.cont" )); |
3622 | |
3623 | return RValue::get(V: nullptr); |
3624 | } |
3625 | |
3626 | case Builtin::BI__builtin_powi: |
3627 | case Builtin::BI__builtin_powif: |
3628 | case Builtin::BI__builtin_powil: { |
3629 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3630 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3631 | |
3632 | if (Builder.getIsFPConstrained()) { |
3633 | // FIXME: llvm.powi has 2 mangling types, |
3634 | // llvm.experimental.constrained.powi has one. |
3635 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3636 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_powi, |
3637 | Tys: Src0->getType()); |
3638 | return RValue::get(V: Builder.CreateConstrainedFPCall(Callee: F, Args: { Src0, Src1 })); |
3639 | } |
3640 | |
3641 | Function *F = CGM.getIntrinsic(IID: Intrinsic::powi, |
3642 | Tys: { Src0->getType(), Src1->getType() }); |
3643 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: { Src0, Src1 })); |
3644 | } |
3645 | case Builtin::BI__builtin_frexpl: { |
3646 | // Linux PPC will not be adding additional PPCDoubleDouble support. |
3647 | // WIP to switch default to IEEE long double. Will emit libcall for |
3648 | // frexpl instead of legalizing this type in the BE. |
3649 | if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble()) |
3650 | break; |
3651 | [[fallthrough]]; |
3652 | } |
3653 | case Builtin::BI__builtin_frexp: |
3654 | case Builtin::BI__builtin_frexpf: |
3655 | case Builtin::BI__builtin_frexpf128: |
3656 | case Builtin::BI__builtin_frexpf16: |
3657 | return RValue::get(V: emitFrexpBuiltin(CGF&: *this, E, IntrinsicID: Intrinsic::frexp)); |
3658 | case Builtin::BI__builtin_isgreater: |
3659 | case Builtin::BI__builtin_isgreaterequal: |
3660 | case Builtin::BI__builtin_isless: |
3661 | case Builtin::BI__builtin_islessequal: |
3662 | case Builtin::BI__builtin_islessgreater: |
3663 | case Builtin::BI__builtin_isunordered: { |
3664 | // Ordered comparisons: we know the arguments to these are matching scalar |
3665 | // floating point values. |
3666 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3667 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3668 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3669 | |
3670 | switch (BuiltinID) { |
3671 | default: llvm_unreachable("Unknown ordered comparison" ); |
3672 | case Builtin::BI__builtin_isgreater: |
3673 | LHS = Builder.CreateFCmpOGT(LHS, RHS, Name: "cmp" ); |
3674 | break; |
3675 | case Builtin::BI__builtin_isgreaterequal: |
3676 | LHS = Builder.CreateFCmpOGE(LHS, RHS, Name: "cmp" ); |
3677 | break; |
3678 | case Builtin::BI__builtin_isless: |
3679 | LHS = Builder.CreateFCmpOLT(LHS, RHS, Name: "cmp" ); |
3680 | break; |
3681 | case Builtin::BI__builtin_islessequal: |
3682 | LHS = Builder.CreateFCmpOLE(LHS, RHS, Name: "cmp" ); |
3683 | break; |
3684 | case Builtin::BI__builtin_islessgreater: |
3685 | LHS = Builder.CreateFCmpONE(LHS, RHS, Name: "cmp" ); |
3686 | break; |
3687 | case Builtin::BI__builtin_isunordered: |
3688 | LHS = Builder.CreateFCmpUNO(LHS, RHS, Name: "cmp" ); |
3689 | break; |
3690 | } |
3691 | // ZExt bool to int type. |
3692 | return RValue::get(V: Builder.CreateZExt(V: LHS, DestTy: ConvertType(T: E->getType()))); |
3693 | } |
3694 | |
3695 | case Builtin::BI__builtin_isnan: { |
3696 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3697 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3698 | if (Value *Result = tryUseTestFPKind(CGF&: *this, BuiltinID, V)) |
3699 | return RValue::get(V: Result); |
3700 | return RValue::get( |
3701 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcNan), |
3702 | DestTy: ConvertType(T: E->getType()))); |
3703 | } |
3704 | |
3705 | case Builtin::BI__builtin_issignaling: { |
3706 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3707 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3708 | return RValue::get( |
3709 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcSNan), |
3710 | DestTy: ConvertType(T: E->getType()))); |
3711 | } |
3712 | |
3713 | case Builtin::BI__builtin_isinf: { |
3714 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3715 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3716 | if (Value *Result = tryUseTestFPKind(CGF&: *this, BuiltinID, V)) |
3717 | return RValue::get(V: Result); |
3718 | return RValue::get( |
3719 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcInf), |
3720 | DestTy: ConvertType(T: E->getType()))); |
3721 | } |
3722 | |
3723 | case Builtin::BIfinite: |
3724 | case Builtin::BI__finite: |
3725 | case Builtin::BIfinitef: |
3726 | case Builtin::BI__finitef: |
3727 | case Builtin::BIfinitel: |
3728 | case Builtin::BI__finitel: |
3729 | case Builtin::BI__builtin_isfinite: { |
3730 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3731 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3732 | if (Value *Result = tryUseTestFPKind(CGF&: *this, BuiltinID, V)) |
3733 | return RValue::get(V: Result); |
3734 | return RValue::get( |
3735 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcFinite), |
3736 | DestTy: ConvertType(T: E->getType()))); |
3737 | } |
3738 | |
3739 | case Builtin::BI__builtin_isnormal: { |
3740 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3741 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3742 | return RValue::get( |
3743 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcNormal), |
3744 | DestTy: ConvertType(T: E->getType()))); |
3745 | } |
3746 | |
3747 | case Builtin::BI__builtin_issubnormal: { |
3748 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3749 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3750 | return RValue::get( |
3751 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcSubnormal), |
3752 | DestTy: ConvertType(T: E->getType()))); |
3753 | } |
3754 | |
3755 | case Builtin::BI__builtin_iszero: { |
3756 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3757 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3758 | return RValue::get( |
3759 | V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test: FPClassTest::fcZero), |
3760 | DestTy: ConvertType(T: E->getType()))); |
3761 | } |
3762 | |
3763 | case Builtin::BI__builtin_isfpclass: { |
3764 | Expr::EvalResult Result; |
3765 | if (!E->getArg(Arg: 1)->EvaluateAsInt(Result, Ctx: CGM.getContext())) |
3766 | break; |
3767 | uint64_t Test = Result.Val.getInt().getLimitedValue(); |
3768 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3769 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3770 | return RValue::get(V: Builder.CreateZExt(V: Builder.createIsFPClass(FPNum: V, Test), |
3771 | DestTy: ConvertType(T: E->getType()))); |
3772 | } |
3773 | |
3774 | case Builtin::BI__builtin_nondeterministic_value: { |
3775 | llvm::Type *Ty = ConvertType(T: E->getArg(Arg: 0)->getType()); |
3776 | |
3777 | Value *Result = PoisonValue::get(T: Ty); |
3778 | Result = Builder.CreateFreeze(V: Result); |
3779 | |
3780 | return RValue::get(V: Result); |
3781 | } |
3782 | |
3783 | case Builtin::BI__builtin_elementwise_abs: { |
3784 | Value *Result; |
3785 | QualType QT = E->getArg(Arg: 0)->getType(); |
3786 | |
3787 | if (auto *VecTy = QT->getAs<VectorType>()) |
3788 | QT = VecTy->getElementType(); |
3789 | if (QT->isIntegerType()) |
3790 | Result = Builder.CreateBinaryIntrinsic( |
3791 | ID: llvm::Intrinsic::abs, LHS: EmitScalarExpr(E: E->getArg(Arg: 0)), |
3792 | RHS: Builder.getFalse(), FMFSource: nullptr, Name: "elt.abs" ); |
3793 | else |
3794 | Result = emitBuiltinWithOneOverloadedType<1>( |
3795 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::fabs, Name: "elt.abs" ); |
3796 | |
3797 | return RValue::get(V: Result); |
3798 | } |
3799 | case Builtin::BI__builtin_elementwise_acos: |
3800 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3801 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::acos, Name: "elt.acos" )); |
3802 | case Builtin::BI__builtin_elementwise_asin: |
3803 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3804 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::asin, Name: "elt.asin" )); |
3805 | case Builtin::BI__builtin_elementwise_atan: |
3806 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3807 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::atan, Name: "elt.atan" )); |
3808 | case Builtin::BI__builtin_elementwise_ceil: |
3809 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3810 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::ceil, Name: "elt.ceil" )); |
3811 | case Builtin::BI__builtin_elementwise_exp: |
3812 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3813 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::exp, Name: "elt.exp" )); |
3814 | case Builtin::BI__builtin_elementwise_exp2: |
3815 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3816 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::exp2, Name: "elt.exp2" )); |
3817 | case Builtin::BI__builtin_elementwise_log: |
3818 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3819 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::log, Name: "elt.log" )); |
3820 | case Builtin::BI__builtin_elementwise_log2: |
3821 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3822 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::log2, Name: "elt.log2" )); |
3823 | case Builtin::BI__builtin_elementwise_log10: |
3824 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3825 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::log10, Name: "elt.log10" )); |
3826 | case Builtin::BI__builtin_elementwise_pow: { |
3827 | return RValue::get( |
3828 | V: emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E, IntrinsicID: llvm::Intrinsic::pow)); |
3829 | } |
3830 | case Builtin::BI__builtin_elementwise_bitreverse: |
3831 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3832 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::bitreverse, Name: "elt.bitreverse" )); |
3833 | case Builtin::BI__builtin_elementwise_cos: |
3834 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3835 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::cos, Name: "elt.cos" )); |
3836 | case Builtin::BI__builtin_elementwise_cosh: |
3837 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3838 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::cosh, Name: "elt.cosh" )); |
3839 | case Builtin::BI__builtin_elementwise_floor: |
3840 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3841 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::floor, Name: "elt.floor" )); |
3842 | case Builtin::BI__builtin_elementwise_roundeven: |
3843 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3844 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::roundeven, Name: "elt.roundeven" )); |
3845 | case Builtin::BI__builtin_elementwise_round: |
3846 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3847 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::round, Name: "elt.round" )); |
3848 | case Builtin::BI__builtin_elementwise_rint: |
3849 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3850 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::rint, Name: "elt.rint" )); |
3851 | case Builtin::BI__builtin_elementwise_nearbyint: |
3852 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3853 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::nearbyint, Name: "elt.nearbyint" )); |
3854 | case Builtin::BI__builtin_elementwise_sin: |
3855 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3856 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::sin, Name: "elt.sin" )); |
3857 | case Builtin::BI__builtin_elementwise_sinh: |
3858 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3859 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::sinh, Name: "elt.sinh" )); |
3860 | case Builtin::BI__builtin_elementwise_tan: |
3861 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3862 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::tan, Name: "elt.tan" )); |
3863 | case Builtin::BI__builtin_elementwise_tanh: |
3864 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3865 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::tanh, Name: "elt.tanh" )); |
3866 | case Builtin::BI__builtin_elementwise_trunc: |
3867 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3868 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::trunc, Name: "elt.trunc" )); |
3869 | case Builtin::BI__builtin_elementwise_canonicalize: |
3870 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3871 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::canonicalize, Name: "elt.canonicalize" )); |
3872 | case Builtin::BI__builtin_elementwise_copysign: |
3873 | return RValue::get(V: emitBuiltinWithOneOverloadedType<2>( |
3874 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::copysign)); |
3875 | case Builtin::BI__builtin_elementwise_fma: |
3876 | return RValue::get( |
3877 | V: emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, IntrinsicID: llvm::Intrinsic::fma)); |
3878 | case Builtin::BI__builtin_elementwise_add_sat: |
3879 | case Builtin::BI__builtin_elementwise_sub_sat: { |
3880 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3881 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3882 | Value *Result; |
3883 | assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected" ); |
3884 | QualType Ty = E->getArg(Arg: 0)->getType(); |
3885 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3886 | Ty = VecTy->getElementType(); |
3887 | bool IsSigned = Ty->isSignedIntegerType(); |
3888 | unsigned Opc; |
3889 | if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat) |
3890 | Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat; |
3891 | else |
3892 | Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat; |
3893 | Result = Builder.CreateBinaryIntrinsic(ID: Opc, LHS: Op0, RHS: Op1, FMFSource: nullptr, Name: "elt.sat" ); |
3894 | return RValue::get(V: Result); |
3895 | } |
3896 | |
3897 | case Builtin::BI__builtin_elementwise_max: { |
3898 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3899 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3900 | Value *Result; |
3901 | if (Op0->getType()->isIntOrIntVectorTy()) { |
3902 | QualType Ty = E->getArg(Arg: 0)->getType(); |
3903 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3904 | Ty = VecTy->getElementType(); |
3905 | Result = Builder.CreateBinaryIntrinsic(ID: Ty->isSignedIntegerType() |
3906 | ? llvm::Intrinsic::smax |
3907 | : llvm::Intrinsic::umax, |
3908 | LHS: Op0, RHS: Op1, FMFSource: nullptr, Name: "elt.max" ); |
3909 | } else |
3910 | Result = Builder.CreateMaxNum(LHS: Op0, RHS: Op1, Name: "elt.max" ); |
3911 | return RValue::get(V: Result); |
3912 | } |
3913 | case Builtin::BI__builtin_elementwise_min: { |
3914 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3915 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
3916 | Value *Result; |
3917 | if (Op0->getType()->isIntOrIntVectorTy()) { |
3918 | QualType Ty = E->getArg(Arg: 0)->getType(); |
3919 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3920 | Ty = VecTy->getElementType(); |
3921 | Result = Builder.CreateBinaryIntrinsic(ID: Ty->isSignedIntegerType() |
3922 | ? llvm::Intrinsic::smin |
3923 | : llvm::Intrinsic::umin, |
3924 | LHS: Op0, RHS: Op1, FMFSource: nullptr, Name: "elt.min" ); |
3925 | } else |
3926 | Result = Builder.CreateMinNum(LHS: Op0, RHS: Op1, Name: "elt.min" ); |
3927 | return RValue::get(V: Result); |
3928 | } |
3929 | |
3930 | case Builtin::BI__builtin_reduce_max: { |
3931 | auto GetIntrinsicID = [this](QualType QT) { |
3932 | if (auto *VecTy = QT->getAs<VectorType>()) |
3933 | QT = VecTy->getElementType(); |
3934 | else if (QT->isSizelessVectorType()) |
3935 | QT = QT->getSizelessVectorEltType(Ctx: CGM.getContext()); |
3936 | |
3937 | if (QT->isSignedIntegerType()) |
3938 | return llvm::Intrinsic::vector_reduce_smax; |
3939 | if (QT->isUnsignedIntegerType()) |
3940 | return llvm::Intrinsic::vector_reduce_umax; |
3941 | assert(QT->isFloatingType() && "must have a float here" ); |
3942 | return llvm::Intrinsic::vector_reduce_fmax; |
3943 | }; |
3944 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3945 | CGF&: *this, E, IntrinsicID: GetIntrinsicID(E->getArg(Arg: 0)->getType()), Name: "rdx.min" )); |
3946 | } |
3947 | |
3948 | case Builtin::BI__builtin_reduce_min: { |
3949 | auto GetIntrinsicID = [this](QualType QT) { |
3950 | if (auto *VecTy = QT->getAs<VectorType>()) |
3951 | QT = VecTy->getElementType(); |
3952 | else if (QT->isSizelessVectorType()) |
3953 | QT = QT->getSizelessVectorEltType(Ctx: CGM.getContext()); |
3954 | |
3955 | if (QT->isSignedIntegerType()) |
3956 | return llvm::Intrinsic::vector_reduce_smin; |
3957 | if (QT->isUnsignedIntegerType()) |
3958 | return llvm::Intrinsic::vector_reduce_umin; |
3959 | assert(QT->isFloatingType() && "must have a float here" ); |
3960 | return llvm::Intrinsic::vector_reduce_fmin; |
3961 | }; |
3962 | |
3963 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3964 | CGF&: *this, E, IntrinsicID: GetIntrinsicID(E->getArg(Arg: 0)->getType()), Name: "rdx.min" )); |
3965 | } |
3966 | |
3967 | case Builtin::BI__builtin_reduce_add: |
3968 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3969 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::vector_reduce_add, Name: "rdx.add" )); |
3970 | case Builtin::BI__builtin_reduce_mul: |
3971 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3972 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::vector_reduce_mul, Name: "rdx.mul" )); |
3973 | case Builtin::BI__builtin_reduce_xor: |
3974 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3975 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::vector_reduce_xor, Name: "rdx.xor" )); |
3976 | case Builtin::BI__builtin_reduce_or: |
3977 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3978 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::vector_reduce_or, Name: "rdx.or" )); |
3979 | case Builtin::BI__builtin_reduce_and: |
3980 | return RValue::get(V: emitBuiltinWithOneOverloadedType<1>( |
3981 | CGF&: *this, E, IntrinsicID: llvm::Intrinsic::vector_reduce_and, Name: "rdx.and" )); |
3982 | |
3983 | case Builtin::BI__builtin_matrix_transpose: { |
3984 | auto *MatrixTy = E->getArg(Arg: 0)->getType()->castAs<ConstantMatrixType>(); |
3985 | Value *MatValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
3986 | MatrixBuilder MB(Builder); |
3987 | Value *Result = MB.CreateMatrixTranspose(Matrix: MatValue, Rows: MatrixTy->getNumRows(), |
3988 | Columns: MatrixTy->getNumColumns()); |
3989 | return RValue::get(V: Result); |
3990 | } |
3991 | |
3992 | case Builtin::BI__builtin_matrix_column_major_load: { |
3993 | MatrixBuilder MB(Builder); |
3994 | // Emit everything that isn't dependent on the first parameter type |
3995 | Value *Stride = EmitScalarExpr(E: E->getArg(Arg: 3)); |
3996 | const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>(); |
3997 | auto *PtrTy = E->getArg(Arg: 0)->getType()->getAs<PointerType>(); |
3998 | assert(PtrTy && "arg0 must be of pointer type" ); |
3999 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
4000 | |
4001 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4002 | EmitNonNullArgCheck(RV: RValue::get(V: Src.emitRawPointer(CGF&: *this)), |
4003 | ArgType: E->getArg(Arg: 0)->getType(), ArgLoc: E->getArg(Arg: 0)->getExprLoc(), AC: FD, |
4004 | ParmNum: 0); |
4005 | Value *Result = MB.CreateColumnMajorLoad( |
4006 | EltTy: Src.getElementType(), DataPtr: Src.emitRawPointer(CGF&: *this), |
4007 | Alignment: Align(Src.getAlignment().getQuantity()), Stride, IsVolatile, |
4008 | Rows: ResultTy->getNumRows(), Columns: ResultTy->getNumColumns(), Name: "matrix" ); |
4009 | return RValue::get(V: Result); |
4010 | } |
4011 | |
4012 | case Builtin::BI__builtin_matrix_column_major_store: { |
4013 | MatrixBuilder MB(Builder); |
4014 | Value *Matrix = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4015 | Address Dst = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4016 | Value *Stride = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4017 | |
4018 | const auto *MatrixTy = E->getArg(Arg: 0)->getType()->getAs<ConstantMatrixType>(); |
4019 | auto *PtrTy = E->getArg(Arg: 1)->getType()->getAs<PointerType>(); |
4020 | assert(PtrTy && "arg1 must be of pointer type" ); |
4021 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
4022 | |
4023 | EmitNonNullArgCheck(RV: RValue::get(V: Dst.emitRawPointer(CGF&: *this)), |
4024 | ArgType: E->getArg(Arg: 1)->getType(), ArgLoc: E->getArg(Arg: 1)->getExprLoc(), AC: FD, |
4025 | ParmNum: 0); |
4026 | Value *Result = MB.CreateColumnMajorStore( |
4027 | Matrix, Ptr: Dst.emitRawPointer(CGF&: *this), |
4028 | Alignment: Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile, |
4029 | Rows: MatrixTy->getNumRows(), Columns: MatrixTy->getNumColumns()); |
4030 | return RValue::get(V: Result); |
4031 | } |
4032 | |
4033 | case Builtin::BI__builtin_isinf_sign: { |
4034 | // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
4035 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
4036 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
4037 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4038 | Value *AbsArg = EmitFAbs(CGF&: *this, V: Arg); |
4039 | Value *IsInf = Builder.CreateFCmpOEQ( |
4040 | LHS: AbsArg, RHS: ConstantFP::getInfinity(Ty: Arg->getType()), Name: "isinf" ); |
4041 | Value *IsNeg = EmitSignBit(CGF&: *this, V: Arg); |
4042 | |
4043 | llvm::Type *IntTy = ConvertType(T: E->getType()); |
4044 | Value *Zero = Constant::getNullValue(Ty: IntTy); |
4045 | Value *One = ConstantInt::get(Ty: IntTy, V: 1); |
4046 | Value *NegativeOne = ConstantInt::get(Ty: IntTy, V: -1); |
4047 | Value *SignResult = Builder.CreateSelect(C: IsNeg, True: NegativeOne, False: One); |
4048 | Value *Result = Builder.CreateSelect(C: IsInf, True: SignResult, False: Zero); |
4049 | return RValue::get(V: Result); |
4050 | } |
4051 | |
4052 | case Builtin::BI__builtin_flt_rounds: { |
4053 | Function *F = CGM.getIntrinsic(IID: Intrinsic::get_rounding); |
4054 | |
4055 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
4056 | Value *Result = Builder.CreateCall(Callee: F); |
4057 | if (Result->getType() != ResultType) |
4058 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
4059 | Name: "cast" ); |
4060 | return RValue::get(V: Result); |
4061 | } |
4062 | |
4063 | case Builtin::BI__builtin_set_flt_rounds: { |
4064 | Function *F = CGM.getIntrinsic(IID: Intrinsic::set_rounding); |
4065 | |
4066 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4067 | Builder.CreateCall(Callee: F, Args: V); |
4068 | return RValue::get(V: nullptr); |
4069 | } |
4070 | |
4071 | case Builtin::BI__builtin_fpclassify: { |
4072 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
4073 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
4074 | Value *V = EmitScalarExpr(E: E->getArg(Arg: 5)); |
4075 | llvm::Type *Ty = ConvertType(T: E->getArg(Arg: 5)->getType()); |
4076 | |
4077 | // Create Result |
4078 | BasicBlock *Begin = Builder.GetInsertBlock(); |
4079 | BasicBlock *End = createBasicBlock(name: "fpclassify_end" , parent: this->CurFn); |
4080 | Builder.SetInsertPoint(End); |
4081 | PHINode *Result = |
4082 | Builder.CreatePHI(Ty: ConvertType(T: E->getArg(Arg: 0)->getType()), NumReservedValues: 4, |
4083 | Name: "fpclassify_result" ); |
4084 | |
4085 | // if (V==0) return FP_ZERO |
4086 | Builder.SetInsertPoint(Begin); |
4087 | Value *IsZero = Builder.CreateFCmpOEQ(LHS: V, RHS: Constant::getNullValue(Ty), |
4088 | Name: "iszero" ); |
4089 | Value *ZeroLiteral = EmitScalarExpr(E: E->getArg(Arg: 4)); |
4090 | BasicBlock *NotZero = createBasicBlock(name: "fpclassify_not_zero" , parent: this->CurFn); |
4091 | Builder.CreateCondBr(Cond: IsZero, True: End, False: NotZero); |
4092 | Result->addIncoming(V: ZeroLiteral, BB: Begin); |
4093 | |
4094 | // if (V != V) return FP_NAN |
4095 | Builder.SetInsertPoint(NotZero); |
4096 | Value *IsNan = Builder.CreateFCmpUNO(LHS: V, RHS: V, Name: "cmp" ); |
4097 | Value *NanLiteral = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4098 | BasicBlock *NotNan = createBasicBlock(name: "fpclassify_not_nan" , parent: this->CurFn); |
4099 | Builder.CreateCondBr(Cond: IsNan, True: End, False: NotNan); |
4100 | Result->addIncoming(V: NanLiteral, BB: NotZero); |
4101 | |
4102 | // if (fabs(V) == infinity) return FP_INFINITY |
4103 | Builder.SetInsertPoint(NotNan); |
4104 | Value *VAbs = EmitFAbs(CGF&: *this, V); |
4105 | Value *IsInf = |
4106 | Builder.CreateFCmpOEQ(LHS: VAbs, RHS: ConstantFP::getInfinity(Ty: V->getType()), |
4107 | Name: "isinf" ); |
4108 | Value *InfLiteral = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4109 | BasicBlock *NotInf = createBasicBlock(name: "fpclassify_not_inf" , parent: this->CurFn); |
4110 | Builder.CreateCondBr(Cond: IsInf, True: End, False: NotInf); |
4111 | Result->addIncoming(V: InfLiteral, BB: NotNan); |
4112 | |
4113 | // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
4114 | Builder.SetInsertPoint(NotInf); |
4115 | APFloat Smallest = APFloat::getSmallestNormalized( |
4116 | Sem: getContext().getFloatTypeSemantics(T: E->getArg(Arg: 5)->getType())); |
4117 | Value *IsNormal = |
4118 | Builder.CreateFCmpUGE(LHS: VAbs, RHS: ConstantFP::get(Context&: V->getContext(), V: Smallest), |
4119 | Name: "isnormal" ); |
4120 | Value *NormalResult = |
4121 | Builder.CreateSelect(C: IsNormal, True: EmitScalarExpr(E: E->getArg(Arg: 2)), |
4122 | False: EmitScalarExpr(E: E->getArg(Arg: 3))); |
4123 | Builder.CreateBr(Dest: End); |
4124 | Result->addIncoming(V: NormalResult, BB: NotInf); |
4125 | |
4126 | // return Result |
4127 | Builder.SetInsertPoint(End); |
4128 | return RValue::get(V: Result); |
4129 | } |
4130 | |
4131 | // An alloca will always return a pointer to the alloca (stack) address |
4132 | // space. This address space need not be the same as the AST / Language |
4133 | // default (e.g. in C / C++ auto vars are in the generic address space). At |
4134 | // the AST level this is handled within CreateTempAlloca et al., but for the |
4135 | // builtin / dynamic alloca we have to handle it here. We use an explicit cast |
4136 | // instead of passing an AS to CreateAlloca so as to not inhibit optimisation. |
4137 | case Builtin::BIalloca: |
4138 | case Builtin::BI_alloca: |
4139 | case Builtin::BI__builtin_alloca_uninitialized: |
4140 | case Builtin::BI__builtin_alloca: { |
4141 | Value *Size = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4142 | const TargetInfo &TI = getContext().getTargetInfo(); |
4143 | // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. |
4144 | const Align SuitableAlignmentInBytes = |
4145 | CGM.getContext() |
4146 | .toCharUnitsFromBits(BitSize: TI.getSuitableAlign()) |
4147 | .getAsAlign(); |
4148 | AllocaInst *AI = Builder.CreateAlloca(Ty: Builder.getInt8Ty(), ArraySize: Size); |
4149 | AI->setAlignment(SuitableAlignmentInBytes); |
4150 | if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized) |
4151 | initializeAlloca(CGF&: *this, AI, Size, AlignmentInBytes: SuitableAlignmentInBytes); |
4152 | LangAS AAS = getASTAllocaAddressSpace(); |
4153 | LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); |
4154 | if (AAS != EAS) { |
4155 | llvm::Type *Ty = CGM.getTypes().ConvertType(T: E->getType()); |
4156 | return RValue::get(V: getTargetHooks().performAddrSpaceCast(CGF&: *this, V: AI, SrcAddr: AAS, |
4157 | DestAddr: EAS, DestTy: Ty)); |
4158 | } |
4159 | return RValue::get(V: AI); |
4160 | } |
4161 | |
4162 | case Builtin::BI__builtin_alloca_with_align_uninitialized: |
4163 | case Builtin::BI__builtin_alloca_with_align: { |
4164 | Value *Size = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4165 | Value *AlignmentInBitsValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4166 | auto *AlignmentInBitsCI = cast<ConstantInt>(Val: AlignmentInBitsValue); |
4167 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
4168 | const Align AlignmentInBytes = |
4169 | CGM.getContext().toCharUnitsFromBits(BitSize: AlignmentInBits).getAsAlign(); |
4170 | AllocaInst *AI = Builder.CreateAlloca(Ty: Builder.getInt8Ty(), ArraySize: Size); |
4171 | AI->setAlignment(AlignmentInBytes); |
4172 | if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized) |
4173 | initializeAlloca(CGF&: *this, AI, Size, AlignmentInBytes); |
4174 | LangAS AAS = getASTAllocaAddressSpace(); |
4175 | LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); |
4176 | if (AAS != EAS) { |
4177 | llvm::Type *Ty = CGM.getTypes().ConvertType(T: E->getType()); |
4178 | return RValue::get(V: getTargetHooks().performAddrSpaceCast(CGF&: *this, V: AI, SrcAddr: AAS, |
4179 | DestAddr: EAS, DestTy: Ty)); |
4180 | } |
4181 | return RValue::get(V: AI); |
4182 | } |
4183 | |
4184 | case Builtin::BIbzero: |
4185 | case Builtin::BI__builtin_bzero: { |
4186 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4187 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4188 | EmitNonNullArgCheck(Addr: Dest, ArgType: E->getArg(Arg: 0)->getType(), |
4189 | ArgLoc: E->getArg(Arg: 0)->getExprLoc(), AC: FD, ParmNum: 0); |
4190 | Builder.CreateMemSet(Dest, Value: Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false); |
4191 | return RValue::get(V: nullptr); |
4192 | } |
4193 | |
4194 | case Builtin::BIbcopy: |
4195 | case Builtin::BI__builtin_bcopy: { |
4196 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4197 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4198 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4199 | EmitNonNullArgCheck(RV: RValue::get(V: Src.emitRawPointer(CGF&: *this)), |
4200 | ArgType: E->getArg(Arg: 0)->getType(), ArgLoc: E->getArg(Arg: 0)->getExprLoc(), AC: FD, |
4201 | ParmNum: 0); |
4202 | EmitNonNullArgCheck(RV: RValue::get(V: Dest.emitRawPointer(CGF&: *this)), |
4203 | ArgType: E->getArg(Arg: 1)->getType(), ArgLoc: E->getArg(Arg: 1)->getExprLoc(), AC: FD, |
4204 | ParmNum: 0); |
4205 | Builder.CreateMemMove(Dest, Src, Size: SizeVal, IsVolatile: false); |
4206 | return RValue::get(V: nullptr); |
4207 | } |
4208 | |
4209 | case Builtin::BImemcpy: |
4210 | case Builtin::BI__builtin_memcpy: |
4211 | case Builtin::BImempcpy: |
4212 | case Builtin::BI__builtin_mempcpy: { |
4213 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4214 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4215 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4216 | EmitArgCheck(TCK_Store, Dest, E->getArg(Arg: 0), 0); |
4217 | EmitArgCheck(TCK_Load, Src, E->getArg(Arg: 1), 1); |
4218 | Builder.CreateMemCpy(Dest, Src, Size: SizeVal, IsVolatile: false); |
4219 | if (BuiltinID == Builtin::BImempcpy || |
4220 | BuiltinID == Builtin::BI__builtin_mempcpy) |
4221 | return RValue::get(V: Builder.CreateInBoundsGEP( |
4222 | Ty: Dest.getElementType(), Ptr: Dest.emitRawPointer(CGF&: *this), IdxList: SizeVal)); |
4223 | else |
4224 | return RValue::get(Addr: Dest, CGF&: *this); |
4225 | } |
4226 | |
4227 | case Builtin::BI__builtin_memcpy_inline: { |
4228 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4229 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4230 | uint64_t Size = |
4231 | E->getArg(Arg: 2)->EvaluateKnownConstInt(Ctx: getContext()).getZExtValue(); |
4232 | EmitArgCheck(TCK_Store, Dest, E->getArg(Arg: 0), 0); |
4233 | EmitArgCheck(TCK_Load, Src, E->getArg(Arg: 1), 1); |
4234 | Builder.CreateMemCpyInline(Dest, Src, Size); |
4235 | return RValue::get(V: nullptr); |
4236 | } |
4237 | |
4238 | case Builtin::BI__builtin_char_memchr: |
4239 | BuiltinID = Builtin::BI__builtin_memchr; |
4240 | break; |
4241 | |
4242 | case Builtin::BI__builtin___memcpy_chk: { |
4243 | // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
4244 | Expr::EvalResult SizeResult, DstSizeResult; |
4245 | if (!E->getArg(Arg: 2)->EvaluateAsInt(Result&: SizeResult, Ctx: CGM.getContext()) || |
4246 | !E->getArg(Arg: 3)->EvaluateAsInt(Result&: DstSizeResult, Ctx: CGM.getContext())) |
4247 | break; |
4248 | llvm::APSInt Size = SizeResult.Val.getInt(); |
4249 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
4250 | if (Size.ugt(RHS: DstSize)) |
4251 | break; |
4252 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4253 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4254 | Value *SizeVal = llvm::ConstantInt::get(Context&: Builder.getContext(), V: Size); |
4255 | Builder.CreateMemCpy(Dest, Src, Size: SizeVal, IsVolatile: false); |
4256 | return RValue::get(Addr: Dest, CGF&: *this); |
4257 | } |
4258 | |
4259 | case Builtin::BI__builtin_objc_memmove_collectable: { |
4260 | Address DestAddr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4261 | Address SrcAddr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4262 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4263 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF&: *this, |
4264 | DestPtr: DestAddr, SrcPtr: SrcAddr, Size: SizeVal); |
4265 | return RValue::get(Addr: DestAddr, CGF&: *this); |
4266 | } |
4267 | |
4268 | case Builtin::BI__builtin___memmove_chk: { |
4269 | // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
4270 | Expr::EvalResult SizeResult, DstSizeResult; |
4271 | if (!E->getArg(Arg: 2)->EvaluateAsInt(Result&: SizeResult, Ctx: CGM.getContext()) || |
4272 | !E->getArg(Arg: 3)->EvaluateAsInt(Result&: DstSizeResult, Ctx: CGM.getContext())) |
4273 | break; |
4274 | llvm::APSInt Size = SizeResult.Val.getInt(); |
4275 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
4276 | if (Size.ugt(RHS: DstSize)) |
4277 | break; |
4278 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4279 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4280 | Value *SizeVal = llvm::ConstantInt::get(Context&: Builder.getContext(), V: Size); |
4281 | Builder.CreateMemMove(Dest, Src, Size: SizeVal, IsVolatile: false); |
4282 | return RValue::get(Addr: Dest, CGF&: *this); |
4283 | } |
4284 | |
4285 | case Builtin::BImemmove: |
4286 | case Builtin::BI__builtin_memmove: { |
4287 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4288 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
4289 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4290 | EmitArgCheck(TCK_Store, Dest, E->getArg(Arg: 0), 0); |
4291 | EmitArgCheck(TCK_Load, Src, E->getArg(Arg: 1), 1); |
4292 | Builder.CreateMemMove(Dest, Src, Size: SizeVal, IsVolatile: false); |
4293 | return RValue::get(Addr: Dest, CGF&: *this); |
4294 | } |
4295 | case Builtin::BImemset: |
4296 | case Builtin::BI__builtin_memset: { |
4297 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4298 | Value *ByteVal = Builder.CreateTrunc(V: EmitScalarExpr(E: E->getArg(Arg: 1)), |
4299 | DestTy: Builder.getInt8Ty()); |
4300 | Value *SizeVal = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4301 | EmitNonNullArgCheck(Addr: Dest, ArgType: E->getArg(Arg: 0)->getType(), |
4302 | ArgLoc: E->getArg(Arg: 0)->getExprLoc(), AC: FD, ParmNum: 0); |
4303 | Builder.CreateMemSet(Dest, Value: ByteVal, Size: SizeVal, IsVolatile: false); |
4304 | return RValue::get(Addr: Dest, CGF&: *this); |
4305 | } |
4306 | case Builtin::BI__builtin_memset_inline: { |
4307 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4308 | Value *ByteVal = |
4309 | Builder.CreateTrunc(V: EmitScalarExpr(E: E->getArg(Arg: 1)), DestTy: Builder.getInt8Ty()); |
4310 | uint64_t Size = |
4311 | E->getArg(Arg: 2)->EvaluateKnownConstInt(Ctx: getContext()).getZExtValue(); |
4312 | EmitNonNullArgCheck(RV: RValue::get(V: Dest.emitRawPointer(CGF&: *this)), |
4313 | ArgType: E->getArg(Arg: 0)->getType(), ArgLoc: E->getArg(Arg: 0)->getExprLoc(), AC: FD, |
4314 | ParmNum: 0); |
4315 | Builder.CreateMemSetInline(Dest, Value: ByteVal, Size); |
4316 | return RValue::get(V: nullptr); |
4317 | } |
4318 | case Builtin::BI__builtin___memset_chk: { |
4319 | // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
4320 | Expr::EvalResult SizeResult, DstSizeResult; |
4321 | if (!E->getArg(Arg: 2)->EvaluateAsInt(Result&: SizeResult, Ctx: CGM.getContext()) || |
4322 | !E->getArg(Arg: 3)->EvaluateAsInt(Result&: DstSizeResult, Ctx: CGM.getContext())) |
4323 | break; |
4324 | llvm::APSInt Size = SizeResult.Val.getInt(); |
4325 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
4326 | if (Size.ugt(RHS: DstSize)) |
4327 | break; |
4328 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4329 | Value *ByteVal = Builder.CreateTrunc(V: EmitScalarExpr(E: E->getArg(Arg: 1)), |
4330 | DestTy: Builder.getInt8Ty()); |
4331 | Value *SizeVal = llvm::ConstantInt::get(Context&: Builder.getContext(), V: Size); |
4332 | Builder.CreateMemSet(Dest, Value: ByteVal, Size: SizeVal, IsVolatile: false); |
4333 | return RValue::get(Addr: Dest, CGF&: *this); |
4334 | } |
4335 | case Builtin::BI__builtin_wmemchr: { |
4336 | // The MSVC runtime library does not provide a definition of wmemchr, so we |
4337 | // need an inline implementation. |
4338 | if (!getTarget().getTriple().isOSMSVCRT()) |
4339 | break; |
4340 | |
4341 | llvm::Type *WCharTy = ConvertType(T: getContext().WCharTy); |
4342 | Value *Str = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4343 | Value *Chr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4344 | Value *Size = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4345 | |
4346 | BasicBlock *Entry = Builder.GetInsertBlock(); |
4347 | BasicBlock *CmpEq = createBasicBlock(name: "wmemchr.eq" ); |
4348 | BasicBlock *Next = createBasicBlock(name: "wmemchr.next" ); |
4349 | BasicBlock *Exit = createBasicBlock(name: "wmemchr.exit" ); |
4350 | Value *SizeEq0 = Builder.CreateICmpEQ(LHS: Size, RHS: ConstantInt::get(Ty: SizeTy, V: 0)); |
4351 | Builder.CreateCondBr(Cond: SizeEq0, True: Exit, False: CmpEq); |
4352 | |
4353 | EmitBlock(BB: CmpEq); |
4354 | PHINode *StrPhi = Builder.CreatePHI(Ty: Str->getType(), NumReservedValues: 2); |
4355 | StrPhi->addIncoming(V: Str, BB: Entry); |
4356 | PHINode *SizePhi = Builder.CreatePHI(Ty: SizeTy, NumReservedValues: 2); |
4357 | SizePhi->addIncoming(V: Size, BB: Entry); |
4358 | CharUnits WCharAlign = |
4359 | getContext().getTypeAlignInChars(T: getContext().WCharTy); |
4360 | Value *StrCh = Builder.CreateAlignedLoad(Ty: WCharTy, Addr: StrPhi, Align: WCharAlign); |
4361 | Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(Ty: WCharTy, Ptr: StrPhi, Idx0: 0); |
4362 | Value *StrEqChr = Builder.CreateICmpEQ(LHS: StrCh, RHS: Chr); |
4363 | Builder.CreateCondBr(Cond: StrEqChr, True: Exit, False: Next); |
4364 | |
4365 | EmitBlock(BB: Next); |
4366 | Value *NextStr = Builder.CreateConstInBoundsGEP1_32(Ty: WCharTy, Ptr: StrPhi, Idx0: 1); |
4367 | Value *NextSize = Builder.CreateSub(LHS: SizePhi, RHS: ConstantInt::get(Ty: SizeTy, V: 1)); |
4368 | Value *NextSizeEq0 = |
4369 | Builder.CreateICmpEQ(LHS: NextSize, RHS: ConstantInt::get(Ty: SizeTy, V: 0)); |
4370 | Builder.CreateCondBr(Cond: NextSizeEq0, True: Exit, False: CmpEq); |
4371 | StrPhi->addIncoming(V: NextStr, BB: Next); |
4372 | SizePhi->addIncoming(V: NextSize, BB: Next); |
4373 | |
4374 | EmitBlock(BB: Exit); |
4375 | PHINode *Ret = Builder.CreatePHI(Ty: Str->getType(), NumReservedValues: 3); |
4376 | Ret->addIncoming(V: llvm::Constant::getNullValue(Ty: Str->getType()), BB: Entry); |
4377 | Ret->addIncoming(V: llvm::Constant::getNullValue(Ty: Str->getType()), BB: Next); |
4378 | Ret->addIncoming(V: FoundChr, BB: CmpEq); |
4379 | return RValue::get(V: Ret); |
4380 | } |
4381 | case Builtin::BI__builtin_wmemcmp: { |
4382 | // The MSVC runtime library does not provide a definition of wmemcmp, so we |
4383 | // need an inline implementation. |
4384 | if (!getTarget().getTriple().isOSMSVCRT()) |
4385 | break; |
4386 | |
4387 | llvm::Type *WCharTy = ConvertType(T: getContext().WCharTy); |
4388 | |
4389 | Value *Dst = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4390 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4391 | Value *Size = EmitScalarExpr(E: E->getArg(Arg: 2)); |
4392 | |
4393 | BasicBlock *Entry = Builder.GetInsertBlock(); |
4394 | BasicBlock *CmpGT = createBasicBlock(name: "wmemcmp.gt" ); |
4395 | BasicBlock *CmpLT = createBasicBlock(name: "wmemcmp.lt" ); |
4396 | BasicBlock *Next = createBasicBlock(name: "wmemcmp.next" ); |
4397 | BasicBlock *Exit = createBasicBlock(name: "wmemcmp.exit" ); |
4398 | Value *SizeEq0 = Builder.CreateICmpEQ(LHS: Size, RHS: ConstantInt::get(Ty: SizeTy, V: 0)); |
4399 | Builder.CreateCondBr(Cond: SizeEq0, True: Exit, False: CmpGT); |
4400 | |
4401 | EmitBlock(BB: CmpGT); |
4402 | PHINode *DstPhi = Builder.CreatePHI(Ty: Dst->getType(), NumReservedValues: 2); |
4403 | DstPhi->addIncoming(V: Dst, BB: Entry); |
4404 | PHINode *SrcPhi = Builder.CreatePHI(Ty: Src->getType(), NumReservedValues: 2); |
4405 | SrcPhi->addIncoming(V: Src, BB: Entry); |
4406 | PHINode *SizePhi = Builder.CreatePHI(Ty: SizeTy, NumReservedValues: 2); |
4407 | SizePhi->addIncoming(V: Size, BB: Entry); |
4408 | CharUnits WCharAlign = |
4409 | getContext().getTypeAlignInChars(T: getContext().WCharTy); |
4410 | Value *DstCh = Builder.CreateAlignedLoad(Ty: WCharTy, Addr: DstPhi, Align: WCharAlign); |
4411 | Value *SrcCh = Builder.CreateAlignedLoad(Ty: WCharTy, Addr: SrcPhi, Align: WCharAlign); |
4412 | Value *DstGtSrc = Builder.CreateICmpUGT(LHS: DstCh, RHS: SrcCh); |
4413 | Builder.CreateCondBr(Cond: DstGtSrc, True: Exit, False: CmpLT); |
4414 | |
4415 | EmitBlock(BB: CmpLT); |
4416 | Value *DstLtSrc = Builder.CreateICmpULT(LHS: DstCh, RHS: SrcCh); |
4417 | Builder.CreateCondBr(Cond: DstLtSrc, True: Exit, False: Next); |
4418 | |
4419 | EmitBlock(BB: Next); |
4420 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(Ty: WCharTy, Ptr: DstPhi, Idx0: 1); |
4421 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(Ty: WCharTy, Ptr: SrcPhi, Idx0: 1); |
4422 | Value *NextSize = Builder.CreateSub(LHS: SizePhi, RHS: ConstantInt::get(Ty: SizeTy, V: 1)); |
4423 | Value *NextSizeEq0 = |
4424 | Builder.CreateICmpEQ(LHS: NextSize, RHS: ConstantInt::get(Ty: SizeTy, V: 0)); |
4425 | Builder.CreateCondBr(Cond: NextSizeEq0, True: Exit, False: CmpGT); |
4426 | DstPhi->addIncoming(V: NextDst, BB: Next); |
4427 | SrcPhi->addIncoming(V: NextSrc, BB: Next); |
4428 | SizePhi->addIncoming(V: NextSize, BB: Next); |
4429 | |
4430 | EmitBlock(BB: Exit); |
4431 | PHINode *Ret = Builder.CreatePHI(Ty: IntTy, NumReservedValues: 4); |
4432 | Ret->addIncoming(V: ConstantInt::get(Ty: IntTy, V: 0), BB: Entry); |
4433 | Ret->addIncoming(V: ConstantInt::get(Ty: IntTy, V: 1), BB: CmpGT); |
4434 | Ret->addIncoming(V: ConstantInt::get(Ty: IntTy, V: -1), BB: CmpLT); |
4435 | Ret->addIncoming(V: ConstantInt::get(Ty: IntTy, V: 0), BB: Next); |
4436 | return RValue::get(V: Ret); |
4437 | } |
4438 | case Builtin::BI__builtin_dwarf_cfa: { |
4439 | // The offset in bytes from the first argument to the CFA. |
4440 | // |
4441 | // Why on earth is this in the frontend? Is there any reason at |
4442 | // all that the backend can't reasonably determine this while |
4443 | // lowering llvm.eh.dwarf.cfa()? |
4444 | // |
4445 | // TODO: If there's a satisfactory reason, add a target hook for |
4446 | // this instead of hard-coding 0, which is correct for most targets. |
4447 | int32_t Offset = 0; |
4448 | |
4449 | Function *F = CGM.getIntrinsic(IID: Intrinsic::eh_dwarf_cfa); |
4450 | return RValue::get(V: Builder.CreateCall(Callee: F, |
4451 | Args: llvm::ConstantInt::get(Ty: Int32Ty, V: Offset))); |
4452 | } |
4453 | case Builtin::BI__builtin_return_address: { |
4454 | Value *Depth = ConstantEmitter(*this).emitAbstract(E: E->getArg(Arg: 0), |
4455 | T: getContext().UnsignedIntTy); |
4456 | Function *F = CGM.getIntrinsic(IID: Intrinsic::returnaddress); |
4457 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: Depth)); |
4458 | } |
4459 | case Builtin::BI_ReturnAddress: { |
4460 | Function *F = CGM.getIntrinsic(IID: Intrinsic::returnaddress); |
4461 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: Builder.getInt32(C: 0))); |
4462 | } |
4463 | case Builtin::BI__builtin_frame_address: { |
4464 | Value *Depth = ConstantEmitter(*this).emitAbstract(E: E->getArg(Arg: 0), |
4465 | T: getContext().UnsignedIntTy); |
4466 | Function *F = CGM.getIntrinsic(IID: Intrinsic::frameaddress, Tys: AllocaInt8PtrTy); |
4467 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: Depth)); |
4468 | } |
4469 | case Builtin::BI__builtin_extract_return_addr: { |
4470 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4471 | Value *Result = getTargetHooks().decodeReturnAddress(CGF&: *this, Address); |
4472 | return RValue::get(V: Result); |
4473 | } |
4474 | case Builtin::BI__builtin_frob_return_addr: { |
4475 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4476 | Value *Result = getTargetHooks().encodeReturnAddress(CGF&: *this, Address); |
4477 | return RValue::get(V: Result); |
4478 | } |
4479 | case Builtin::BI__builtin_dwarf_sp_column: { |
4480 | llvm::IntegerType *Ty |
4481 | = cast<llvm::IntegerType>(Val: ConvertType(T: E->getType())); |
4482 | int Column = getTargetHooks().getDwarfEHStackPointer(M&: CGM); |
4483 | if (Column == -1) { |
4484 | CGM.ErrorUnsupported(S: E, Type: "__builtin_dwarf_sp_column" ); |
4485 | return RValue::get(V: llvm::UndefValue::get(T: Ty)); |
4486 | } |
4487 | return RValue::get(V: llvm::ConstantInt::get(Ty, V: Column, IsSigned: true)); |
4488 | } |
4489 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
4490 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4491 | if (getTargetHooks().initDwarfEHRegSizeTable(CGF&: *this, Address)) |
4492 | CGM.ErrorUnsupported(S: E, Type: "__builtin_init_dwarf_reg_size_table" ); |
4493 | return RValue::get(V: llvm::UndefValue::get(T: ConvertType(T: E->getType()))); |
4494 | } |
4495 | case Builtin::BI__builtin_eh_return: { |
4496 | Value *Int = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4497 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4498 | |
4499 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Val: Int->getType()); |
4500 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && |
4501 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ); |
4502 | Function *F = |
4503 | CGM.getIntrinsic(IID: IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 |
4504 | : Intrinsic::eh_return_i64); |
4505 | Builder.CreateCall(Callee: F, Args: {Int, Ptr}); |
4506 | Builder.CreateUnreachable(); |
4507 | |
4508 | // We do need to preserve an insertion point. |
4509 | EmitBlock(BB: createBasicBlock(name: "builtin_eh_return.cont" )); |
4510 | |
4511 | return RValue::get(V: nullptr); |
4512 | } |
4513 | case Builtin::BI__builtin_unwind_init: { |
4514 | Function *F = CGM.getIntrinsic(IID: Intrinsic::eh_unwind_init); |
4515 | Builder.CreateCall(Callee: F); |
4516 | return RValue::get(V: nullptr); |
4517 | } |
4518 | case Builtin::BI__builtin_extend_pointer: { |
4519 | // Extends a pointer to the size of an _Unwind_Word, which is |
4520 | // uint64_t on all platforms. Generally this gets poked into a |
4521 | // register and eventually used as an address, so if the |
4522 | // addressing registers are wider than pointers and the platform |
4523 | // doesn't implicitly ignore high-order bits when doing |
4524 | // addressing, we need to make sure we zext / sext based on |
4525 | // the platform's expectations. |
4526 | // |
4527 | // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
4528 | |
4529 | // Cast the pointer to intptr_t. |
4530 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4531 | Value *Result = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy, Name: "extend.cast" ); |
4532 | |
4533 | // If that's 64 bits, we're done. |
4534 | if (IntPtrTy->getBitWidth() == 64) |
4535 | return RValue::get(V: Result); |
4536 | |
4537 | // Otherwise, ask the codegen data what to do. |
4538 | if (getTargetHooks().extendPointerWithSExt()) |
4539 | return RValue::get(V: Builder.CreateSExt(V: Result, DestTy: Int64Ty, Name: "extend.sext" )); |
4540 | else |
4541 | return RValue::get(V: Builder.CreateZExt(V: Result, DestTy: Int64Ty, Name: "extend.zext" )); |
4542 | } |
4543 | case Builtin::BI__builtin_setjmp: { |
4544 | // Buffer is a void**. |
4545 | Address Buf = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4546 | |
4547 | // Store the frame pointer to the setjmp buffer. |
4548 | Value *FrameAddr = Builder.CreateCall( |
4549 | Callee: CGM.getIntrinsic(IID: Intrinsic::frameaddress, Tys: AllocaInt8PtrTy), |
4550 | Args: ConstantInt::get(Ty: Int32Ty, V: 0)); |
4551 | Builder.CreateStore(Val: FrameAddr, Addr: Buf); |
4552 | |
4553 | // Store the stack pointer to the setjmp buffer. |
4554 | Value *StackAddr = Builder.CreateStackSave(); |
4555 | assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType()); |
4556 | |
4557 | Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Addr: Buf, Index: 2); |
4558 | Builder.CreateStore(Val: StackAddr, Addr: StackSaveSlot); |
4559 | |
4560 | // Call LLVM's EH setjmp, which is lightweight. |
4561 | Function *F = CGM.getIntrinsic(IID: Intrinsic::eh_sjlj_setjmp); |
4562 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: Buf.emitRawPointer(CGF&: *this))); |
4563 | } |
4564 | case Builtin::BI__builtin_longjmp: { |
4565 | Value *Buf = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4566 | |
4567 | // Call LLVM's EH longjmp, which is lightweight. |
4568 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::eh_sjlj_longjmp), Args: Buf); |
4569 | |
4570 | // longjmp doesn't return; mark this as unreachable. |
4571 | Builder.CreateUnreachable(); |
4572 | |
4573 | // We do need to preserve an insertion point. |
4574 | EmitBlock(BB: createBasicBlock(name: "longjmp.cont" )); |
4575 | |
4576 | return RValue::get(V: nullptr); |
4577 | } |
4578 | case Builtin::BI__builtin_launder: { |
4579 | const Expr *Arg = E->getArg(Arg: 0); |
4580 | QualType ArgTy = Arg->getType()->getPointeeType(); |
4581 | Value *Ptr = EmitScalarExpr(E: Arg); |
4582 | if (TypeRequiresBuiltinLaunder(CGM, Ty: ArgTy)) |
4583 | Ptr = Builder.CreateLaunderInvariantGroup(Ptr); |
4584 | |
4585 | return RValue::get(V: Ptr); |
4586 | } |
4587 | case Builtin::BI__sync_fetch_and_add: |
4588 | case Builtin::BI__sync_fetch_and_sub: |
4589 | case Builtin::BI__sync_fetch_and_or: |
4590 | case Builtin::BI__sync_fetch_and_and: |
4591 | case Builtin::BI__sync_fetch_and_xor: |
4592 | case Builtin::BI__sync_fetch_and_nand: |
4593 | case Builtin::BI__sync_add_and_fetch: |
4594 | case Builtin::BI__sync_sub_and_fetch: |
4595 | case Builtin::BI__sync_and_and_fetch: |
4596 | case Builtin::BI__sync_or_and_fetch: |
4597 | case Builtin::BI__sync_xor_and_fetch: |
4598 | case Builtin::BI__sync_nand_and_fetch: |
4599 | case Builtin::BI__sync_val_compare_and_swap: |
4600 | case Builtin::BI__sync_bool_compare_and_swap: |
4601 | case Builtin::BI__sync_lock_test_and_set: |
4602 | case Builtin::BI__sync_lock_release: |
4603 | case Builtin::BI__sync_swap: |
4604 | llvm_unreachable("Shouldn't make it through sema" ); |
4605 | case Builtin::BI__sync_fetch_and_add_1: |
4606 | case Builtin::BI__sync_fetch_and_add_2: |
4607 | case Builtin::BI__sync_fetch_and_add_4: |
4608 | case Builtin::BI__sync_fetch_and_add_8: |
4609 | case Builtin::BI__sync_fetch_and_add_16: |
4610 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Add, E); |
4611 | case Builtin::BI__sync_fetch_and_sub_1: |
4612 | case Builtin::BI__sync_fetch_and_sub_2: |
4613 | case Builtin::BI__sync_fetch_and_sub_4: |
4614 | case Builtin::BI__sync_fetch_and_sub_8: |
4615 | case Builtin::BI__sync_fetch_and_sub_16: |
4616 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Sub, E); |
4617 | case Builtin::BI__sync_fetch_and_or_1: |
4618 | case Builtin::BI__sync_fetch_and_or_2: |
4619 | case Builtin::BI__sync_fetch_and_or_4: |
4620 | case Builtin::BI__sync_fetch_and_or_8: |
4621 | case Builtin::BI__sync_fetch_and_or_16: |
4622 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Or, E); |
4623 | case Builtin::BI__sync_fetch_and_and_1: |
4624 | case Builtin::BI__sync_fetch_and_and_2: |
4625 | case Builtin::BI__sync_fetch_and_and_4: |
4626 | case Builtin::BI__sync_fetch_and_and_8: |
4627 | case Builtin::BI__sync_fetch_and_and_16: |
4628 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::And, E); |
4629 | case Builtin::BI__sync_fetch_and_xor_1: |
4630 | case Builtin::BI__sync_fetch_and_xor_2: |
4631 | case Builtin::BI__sync_fetch_and_xor_4: |
4632 | case Builtin::BI__sync_fetch_and_xor_8: |
4633 | case Builtin::BI__sync_fetch_and_xor_16: |
4634 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Xor, E); |
4635 | case Builtin::BI__sync_fetch_and_nand_1: |
4636 | case Builtin::BI__sync_fetch_and_nand_2: |
4637 | case Builtin::BI__sync_fetch_and_nand_4: |
4638 | case Builtin::BI__sync_fetch_and_nand_8: |
4639 | case Builtin::BI__sync_fetch_and_nand_16: |
4640 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Nand, E); |
4641 | |
4642 | // Clang extensions: not overloaded yet. |
4643 | case Builtin::BI__sync_fetch_and_min: |
4644 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Min, E); |
4645 | case Builtin::BI__sync_fetch_and_max: |
4646 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Max, E); |
4647 | case Builtin::BI__sync_fetch_and_umin: |
4648 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::UMin, E); |
4649 | case Builtin::BI__sync_fetch_and_umax: |
4650 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::UMax, E); |
4651 | |
4652 | case Builtin::BI__sync_add_and_fetch_1: |
4653 | case Builtin::BI__sync_add_and_fetch_2: |
4654 | case Builtin::BI__sync_add_and_fetch_4: |
4655 | case Builtin::BI__sync_add_and_fetch_8: |
4656 | case Builtin::BI__sync_add_and_fetch_16: |
4657 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::Add, E, |
4658 | Op: llvm::Instruction::Add); |
4659 | case Builtin::BI__sync_sub_and_fetch_1: |
4660 | case Builtin::BI__sync_sub_and_fetch_2: |
4661 | case Builtin::BI__sync_sub_and_fetch_4: |
4662 | case Builtin::BI__sync_sub_and_fetch_8: |
4663 | case Builtin::BI__sync_sub_and_fetch_16: |
4664 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::Sub, E, |
4665 | Op: llvm::Instruction::Sub); |
4666 | case Builtin::BI__sync_and_and_fetch_1: |
4667 | case Builtin::BI__sync_and_and_fetch_2: |
4668 | case Builtin::BI__sync_and_and_fetch_4: |
4669 | case Builtin::BI__sync_and_and_fetch_8: |
4670 | case Builtin::BI__sync_and_and_fetch_16: |
4671 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::And, E, |
4672 | Op: llvm::Instruction::And); |
4673 | case Builtin::BI__sync_or_and_fetch_1: |
4674 | case Builtin::BI__sync_or_and_fetch_2: |
4675 | case Builtin::BI__sync_or_and_fetch_4: |
4676 | case Builtin::BI__sync_or_and_fetch_8: |
4677 | case Builtin::BI__sync_or_and_fetch_16: |
4678 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::Or, E, |
4679 | Op: llvm::Instruction::Or); |
4680 | case Builtin::BI__sync_xor_and_fetch_1: |
4681 | case Builtin::BI__sync_xor_and_fetch_2: |
4682 | case Builtin::BI__sync_xor_and_fetch_4: |
4683 | case Builtin::BI__sync_xor_and_fetch_8: |
4684 | case Builtin::BI__sync_xor_and_fetch_16: |
4685 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::Xor, E, |
4686 | Op: llvm::Instruction::Xor); |
4687 | case Builtin::BI__sync_nand_and_fetch_1: |
4688 | case Builtin::BI__sync_nand_and_fetch_2: |
4689 | case Builtin::BI__sync_nand_and_fetch_4: |
4690 | case Builtin::BI__sync_nand_and_fetch_8: |
4691 | case Builtin::BI__sync_nand_and_fetch_16: |
4692 | return EmitBinaryAtomicPost(CGF&: *this, Kind: llvm::AtomicRMWInst::Nand, E, |
4693 | Op: llvm::Instruction::And, Invert: true); |
4694 | |
4695 | case Builtin::BI__sync_val_compare_and_swap_1: |
4696 | case Builtin::BI__sync_val_compare_and_swap_2: |
4697 | case Builtin::BI__sync_val_compare_and_swap_4: |
4698 | case Builtin::BI__sync_val_compare_and_swap_8: |
4699 | case Builtin::BI__sync_val_compare_and_swap_16: |
4700 | return RValue::get(V: MakeAtomicCmpXchgValue(CGF&: *this, E, ReturnBool: false)); |
4701 | |
4702 | case Builtin::BI__sync_bool_compare_and_swap_1: |
4703 | case Builtin::BI__sync_bool_compare_and_swap_2: |
4704 | case Builtin::BI__sync_bool_compare_and_swap_4: |
4705 | case Builtin::BI__sync_bool_compare_and_swap_8: |
4706 | case Builtin::BI__sync_bool_compare_and_swap_16: |
4707 | return RValue::get(V: MakeAtomicCmpXchgValue(CGF&: *this, E, ReturnBool: true)); |
4708 | |
4709 | case Builtin::BI__sync_swap_1: |
4710 | case Builtin::BI__sync_swap_2: |
4711 | case Builtin::BI__sync_swap_4: |
4712 | case Builtin::BI__sync_swap_8: |
4713 | case Builtin::BI__sync_swap_16: |
4714 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Xchg, E); |
4715 | |
4716 | case Builtin::BI__sync_lock_test_and_set_1: |
4717 | case Builtin::BI__sync_lock_test_and_set_2: |
4718 | case Builtin::BI__sync_lock_test_and_set_4: |
4719 | case Builtin::BI__sync_lock_test_and_set_8: |
4720 | case Builtin::BI__sync_lock_test_and_set_16: |
4721 | return EmitBinaryAtomic(CGF&: *this, Kind: llvm::AtomicRMWInst::Xchg, E); |
4722 | |
4723 | case Builtin::BI__sync_lock_release_1: |
4724 | case Builtin::BI__sync_lock_release_2: |
4725 | case Builtin::BI__sync_lock_release_4: |
4726 | case Builtin::BI__sync_lock_release_8: |
4727 | case Builtin::BI__sync_lock_release_16: { |
4728 | Address Ptr = CheckAtomicAlignment(CGF&: *this, E); |
4729 | QualType ElTy = E->getArg(Arg: 0)->getType()->getPointeeType(); |
4730 | |
4731 | llvm::Type *ITy = llvm::IntegerType::get(C&: getLLVMContext(), |
4732 | NumBits: getContext().getTypeSize(T: ElTy)); |
4733 | llvm::StoreInst *Store = |
4734 | Builder.CreateStore(Val: llvm::Constant::getNullValue(Ty: ITy), Addr: Ptr); |
4735 | Store->setAtomic(Ordering: llvm::AtomicOrdering::Release); |
4736 | return RValue::get(V: nullptr); |
4737 | } |
4738 | |
4739 | case Builtin::BI__sync_synchronize: { |
4740 | // We assume this is supposed to correspond to a C++0x-style |
4741 | // sequentially-consistent fence (i.e. this is only usable for |
4742 | // synchronization, not device I/O or anything like that). This intrinsic |
4743 | // is really badly designed in the sense that in theory, there isn't |
4744 | // any way to safely use it... but in practice, it mostly works |
4745 | // to use it with non-atomic loads and stores to get acquire/release |
4746 | // semantics. |
4747 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent); |
4748 | return RValue::get(V: nullptr); |
4749 | } |
4750 | |
4751 | case Builtin::BI__builtin_nontemporal_load: |
4752 | return RValue::get(V: EmitNontemporalLoad(CGF&: *this, E)); |
4753 | case Builtin::BI__builtin_nontemporal_store: |
4754 | return RValue::get(V: EmitNontemporalStore(CGF&: *this, E)); |
4755 | case Builtin::BI__c11_atomic_is_lock_free: |
4756 | case Builtin::BI__atomic_is_lock_free: { |
4757 | // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
4758 | // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
4759 | // _Atomic(T) is always properly-aligned. |
4760 | const char *LibCallName = "__atomic_is_lock_free" ; |
4761 | CallArgList Args; |
4762 | Args.add(rvalue: RValue::get(V: EmitScalarExpr(E: E->getArg(Arg: 0))), |
4763 | type: getContext().getSizeType()); |
4764 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
4765 | Args.add(rvalue: RValue::get(V: EmitScalarExpr(E: E->getArg(Arg: 1))), |
4766 | type: getContext().VoidPtrTy); |
4767 | else |
4768 | Args.add(rvalue: RValue::get(V: llvm::Constant::getNullValue(Ty: VoidPtrTy)), |
4769 | type: getContext().VoidPtrTy); |
4770 | const CGFunctionInfo &FuncInfo = |
4771 | CGM.getTypes().arrangeBuiltinFunctionCall(resultType: E->getType(), args: Args); |
4772 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(Info: FuncInfo); |
4773 | llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(Ty: FTy, Name: LibCallName); |
4774 | return EmitCall(CallInfo: FuncInfo, Callee: CGCallee::forDirect(functionPtr: Func), |
4775 | ReturnValue: ReturnValueSlot(), Args); |
4776 | } |
4777 | |
4778 | case Builtin::BI__atomic_test_and_set: { |
4779 | // Look at the argument type to determine whether this is a volatile |
4780 | // operation. The parameter type is always volatile. |
4781 | QualType PtrTy = E->getArg(Arg: 0)->IgnoreImpCasts()->getType(); |
4782 | bool Volatile = |
4783 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
4784 | |
4785 | Address Ptr = |
4786 | EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)).withElementType(ElemTy: Int8Ty); |
4787 | |
4788 | Value *NewVal = Builder.getInt8(C: 1); |
4789 | Value *Order = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4790 | if (isa<llvm::ConstantInt>(Val: Order)) { |
4791 | int ord = cast<llvm::ConstantInt>(Val: Order)->getZExtValue(); |
4792 | AtomicRMWInst *Result = nullptr; |
4793 | switch (ord) { |
4794 | case 0: // memory_order_relaxed |
4795 | default: // invalid order |
4796 | Result = Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::Xchg, Addr: Ptr, Val: NewVal, |
4797 | Ordering: llvm::AtomicOrdering::Monotonic); |
4798 | break; |
4799 | case 1: // memory_order_consume |
4800 | case 2: // memory_order_acquire |
4801 | Result = Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::Xchg, Addr: Ptr, Val: NewVal, |
4802 | Ordering: llvm::AtomicOrdering::Acquire); |
4803 | break; |
4804 | case 3: // memory_order_release |
4805 | Result = Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::Xchg, Addr: Ptr, Val: NewVal, |
4806 | Ordering: llvm::AtomicOrdering::Release); |
4807 | break; |
4808 | case 4: // memory_order_acq_rel |
4809 | |
4810 | Result = Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::Xchg, Addr: Ptr, Val: NewVal, |
4811 | Ordering: llvm::AtomicOrdering::AcquireRelease); |
4812 | break; |
4813 | case 5: // memory_order_seq_cst |
4814 | Result = Builder.CreateAtomicRMW( |
4815 | Op: llvm::AtomicRMWInst::Xchg, Addr: Ptr, Val: NewVal, |
4816 | Ordering: llvm::AtomicOrdering::SequentiallyConsistent); |
4817 | break; |
4818 | } |
4819 | Result->setVolatile(Volatile); |
4820 | return RValue::get(V: Builder.CreateIsNotNull(Arg: Result, Name: "tobool" )); |
4821 | } |
4822 | |
4823 | llvm::BasicBlock *ContBB = createBasicBlock(name: "atomic.continue" , parent: CurFn); |
4824 | |
4825 | llvm::BasicBlock *BBs[5] = { |
4826 | createBasicBlock(name: "monotonic" , parent: CurFn), |
4827 | createBasicBlock(name: "acquire" , parent: CurFn), |
4828 | createBasicBlock(name: "release" , parent: CurFn), |
4829 | createBasicBlock(name: "acqrel" , parent: CurFn), |
4830 | createBasicBlock(name: "seqcst" , parent: CurFn) |
4831 | }; |
4832 | llvm::AtomicOrdering Orders[5] = { |
4833 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
4834 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
4835 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4836 | |
4837 | Order = Builder.CreateIntCast(V: Order, DestTy: Builder.getInt32Ty(), isSigned: false); |
4838 | llvm::SwitchInst *SI = Builder.CreateSwitch(V: Order, Dest: BBs[0]); |
4839 | |
4840 | Builder.SetInsertPoint(ContBB); |
4841 | PHINode *Result = Builder.CreatePHI(Ty: Int8Ty, NumReservedValues: 5, Name: "was_set" ); |
4842 | |
4843 | for (unsigned i = 0; i < 5; ++i) { |
4844 | Builder.SetInsertPoint(BBs[i]); |
4845 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::Xchg, |
4846 | Addr: Ptr, Val: NewVal, Ordering: Orders[i]); |
4847 | RMW->setVolatile(Volatile); |
4848 | Result->addIncoming(V: RMW, BB: BBs[i]); |
4849 | Builder.CreateBr(Dest: ContBB); |
4850 | } |
4851 | |
4852 | SI->addCase(OnVal: Builder.getInt32(C: 0), Dest: BBs[0]); |
4853 | SI->addCase(OnVal: Builder.getInt32(C: 1), Dest: BBs[1]); |
4854 | SI->addCase(OnVal: Builder.getInt32(C: 2), Dest: BBs[1]); |
4855 | SI->addCase(OnVal: Builder.getInt32(C: 3), Dest: BBs[2]); |
4856 | SI->addCase(OnVal: Builder.getInt32(C: 4), Dest: BBs[3]); |
4857 | SI->addCase(OnVal: Builder.getInt32(C: 5), Dest: BBs[4]); |
4858 | |
4859 | Builder.SetInsertPoint(ContBB); |
4860 | return RValue::get(V: Builder.CreateIsNotNull(Arg: Result, Name: "tobool" )); |
4861 | } |
4862 | |
4863 | case Builtin::BI__atomic_clear: { |
4864 | QualType PtrTy = E->getArg(Arg: 0)->IgnoreImpCasts()->getType(); |
4865 | bool Volatile = |
4866 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
4867 | |
4868 | Address Ptr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
4869 | Ptr = Ptr.withElementType(ElemTy: Int8Ty); |
4870 | Value *NewVal = Builder.getInt8(C: 0); |
4871 | Value *Order = EmitScalarExpr(E: E->getArg(Arg: 1)); |
4872 | if (isa<llvm::ConstantInt>(Val: Order)) { |
4873 | int ord = cast<llvm::ConstantInt>(Val: Order)->getZExtValue(); |
4874 | StoreInst *Store = Builder.CreateStore(Val: NewVal, Addr: Ptr, IsVolatile: Volatile); |
4875 | switch (ord) { |
4876 | case 0: // memory_order_relaxed |
4877 | default: // invalid order |
4878 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
4879 | break; |
4880 | case 3: // memory_order_release |
4881 | Store->setOrdering(llvm::AtomicOrdering::Release); |
4882 | break; |
4883 | case 5: // memory_order_seq_cst |
4884 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
4885 | break; |
4886 | } |
4887 | return RValue::get(V: nullptr); |
4888 | } |
4889 | |
4890 | llvm::BasicBlock *ContBB = createBasicBlock(name: "atomic.continue" , parent: CurFn); |
4891 | |
4892 | llvm::BasicBlock *BBs[3] = { |
4893 | createBasicBlock(name: "monotonic" , parent: CurFn), |
4894 | createBasicBlock(name: "release" , parent: CurFn), |
4895 | createBasicBlock(name: "seqcst" , parent: CurFn) |
4896 | }; |
4897 | llvm::AtomicOrdering Orders[3] = { |
4898 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
4899 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4900 | |
4901 | Order = Builder.CreateIntCast(V: Order, DestTy: Builder.getInt32Ty(), isSigned: false); |
4902 | llvm::SwitchInst *SI = Builder.CreateSwitch(V: Order, Dest: BBs[0]); |
4903 | |
4904 | for (unsigned i = 0; i < 3; ++i) { |
4905 | Builder.SetInsertPoint(BBs[i]); |
4906 | StoreInst *Store = Builder.CreateStore(Val: NewVal, Addr: Ptr, IsVolatile: Volatile); |
4907 | Store->setOrdering(Orders[i]); |
4908 | Builder.CreateBr(Dest: ContBB); |
4909 | } |
4910 | |
4911 | SI->addCase(OnVal: Builder.getInt32(C: 0), Dest: BBs[0]); |
4912 | SI->addCase(OnVal: Builder.getInt32(C: 3), Dest: BBs[1]); |
4913 | SI->addCase(OnVal: Builder.getInt32(C: 5), Dest: BBs[2]); |
4914 | |
4915 | Builder.SetInsertPoint(ContBB); |
4916 | return RValue::get(V: nullptr); |
4917 | } |
4918 | |
4919 | case Builtin::BI__atomic_thread_fence: |
4920 | case Builtin::BI__atomic_signal_fence: |
4921 | case Builtin::BI__c11_atomic_thread_fence: |
4922 | case Builtin::BI__c11_atomic_signal_fence: { |
4923 | llvm::SyncScope::ID SSID; |
4924 | if (BuiltinID == Builtin::BI__atomic_signal_fence || |
4925 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
4926 | SSID = llvm::SyncScope::SingleThread; |
4927 | else |
4928 | SSID = llvm::SyncScope::System; |
4929 | Value *Order = EmitScalarExpr(E: E->getArg(Arg: 0)); |
4930 | if (isa<llvm::ConstantInt>(Val: Order)) { |
4931 | int ord = cast<llvm::ConstantInt>(Val: Order)->getZExtValue(); |
4932 | switch (ord) { |
4933 | case 0: // memory_order_relaxed |
4934 | default: // invalid order |
4935 | break; |
4936 | case 1: // memory_order_consume |
4937 | case 2: // memory_order_acquire |
4938 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::Acquire, SSID); |
4939 | break; |
4940 | case 3: // memory_order_release |
4941 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::Release, SSID); |
4942 | break; |
4943 | case 4: // memory_order_acq_rel |
4944 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::AcquireRelease, SSID); |
4945 | break; |
4946 | case 5: // memory_order_seq_cst |
4947 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4948 | break; |
4949 | } |
4950 | return RValue::get(V: nullptr); |
4951 | } |
4952 | |
4953 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
4954 | AcquireBB = createBasicBlock(name: "acquire" , parent: CurFn); |
4955 | ReleaseBB = createBasicBlock(name: "release" , parent: CurFn); |
4956 | AcqRelBB = createBasicBlock(name: "acqrel" , parent: CurFn); |
4957 | SeqCstBB = createBasicBlock(name: "seqcst" , parent: CurFn); |
4958 | llvm::BasicBlock *ContBB = createBasicBlock(name: "atomic.continue" , parent: CurFn); |
4959 | |
4960 | Order = Builder.CreateIntCast(V: Order, DestTy: Builder.getInt32Ty(), isSigned: false); |
4961 | llvm::SwitchInst *SI = Builder.CreateSwitch(V: Order, Dest: ContBB); |
4962 | |
4963 | Builder.SetInsertPoint(AcquireBB); |
4964 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::Acquire, SSID); |
4965 | Builder.CreateBr(Dest: ContBB); |
4966 | SI->addCase(OnVal: Builder.getInt32(C: 1), Dest: AcquireBB); |
4967 | SI->addCase(OnVal: Builder.getInt32(C: 2), Dest: AcquireBB); |
4968 | |
4969 | Builder.SetInsertPoint(ReleaseBB); |
4970 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::Release, SSID); |
4971 | Builder.CreateBr(Dest: ContBB); |
4972 | SI->addCase(OnVal: Builder.getInt32(C: 3), Dest: ReleaseBB); |
4973 | |
4974 | Builder.SetInsertPoint(AcqRelBB); |
4975 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::AcquireRelease, SSID); |
4976 | Builder.CreateBr(Dest: ContBB); |
4977 | SI->addCase(OnVal: Builder.getInt32(C: 4), Dest: AcqRelBB); |
4978 | |
4979 | Builder.SetInsertPoint(SeqCstBB); |
4980 | Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4981 | Builder.CreateBr(Dest: ContBB); |
4982 | SI->addCase(OnVal: Builder.getInt32(C: 5), Dest: SeqCstBB); |
4983 | |
4984 | Builder.SetInsertPoint(ContBB); |
4985 | return RValue::get(V: nullptr); |
4986 | } |
4987 | |
4988 | case Builtin::BI__builtin_signbit: |
4989 | case Builtin::BI__builtin_signbitf: |
4990 | case Builtin::BI__builtin_signbitl: { |
4991 | return RValue::get( |
4992 | V: Builder.CreateZExt(V: EmitSignBit(CGF&: *this, V: EmitScalarExpr(E: E->getArg(Arg: 0))), |
4993 | DestTy: ConvertType(T: E->getType()))); |
4994 | } |
4995 | case Builtin::BI__warn_memset_zero_len: |
4996 | return RValue::getIgnored(); |
4997 | case Builtin::BI__annotation: { |
4998 | // Re-encode each wide string to UTF8 and make an MDString. |
4999 | SmallVector<Metadata *, 1> Strings; |
5000 | for (const Expr *Arg : E->arguments()) { |
5001 | const auto *Str = cast<StringLiteral>(Val: Arg->IgnoreParenCasts()); |
5002 | assert(Str->getCharByteWidth() == 2); |
5003 | StringRef WideBytes = Str->getBytes(); |
5004 | std::string StrUtf8; |
5005 | if (!convertUTF16ToUTF8String( |
5006 | SrcBytes: ArrayRef(WideBytes.data(), WideBytes.size()), Out&: StrUtf8)) { |
5007 | CGM.ErrorUnsupported(S: E, Type: "non-UTF16 __annotation argument" ); |
5008 | continue; |
5009 | } |
5010 | Strings.push_back(Elt: llvm::MDString::get(Context&: getLLVMContext(), Str: StrUtf8)); |
5011 | } |
5012 | |
5013 | // Build and MDTuple of MDStrings and emit the intrinsic call. |
5014 | llvm::Function *F = |
5015 | CGM.getIntrinsic(IID: llvm::Intrinsic::codeview_annotation, Tys: {}); |
5016 | MDTuple *StrTuple = MDTuple::get(Context&: getLLVMContext(), MDs: Strings); |
5017 | Builder.CreateCall(Callee: F, Args: MetadataAsValue::get(Context&: getLLVMContext(), MD: StrTuple)); |
5018 | return RValue::getIgnored(); |
5019 | } |
5020 | case Builtin::BI__builtin_annotation: { |
5021 | llvm::Value *AnnVal = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5022 | llvm::Function *F = |
5023 | CGM.getIntrinsic(IID: llvm::Intrinsic::annotation, |
5024 | Tys: {AnnVal->getType(), CGM.ConstGlobalsPtrTy}); |
5025 | |
5026 | // Get the annotation string, go through casts. Sema requires this to be a |
5027 | // non-wide string literal, potentially casted, so the cast<> is safe. |
5028 | const Expr *AnnotationStrExpr = E->getArg(Arg: 1)->IgnoreParenCasts(); |
5029 | StringRef Str = cast<StringLiteral>(Val: AnnotationStrExpr)->getString(); |
5030 | return RValue::get( |
5031 | V: EmitAnnotationCall(AnnotationFn: F, AnnotatedVal: AnnVal, AnnotationStr: Str, Location: E->getExprLoc(), Attr: nullptr)); |
5032 | } |
5033 | case Builtin::BI__builtin_addcb: |
5034 | case Builtin::BI__builtin_addcs: |
5035 | case Builtin::BI__builtin_addc: |
5036 | case Builtin::BI__builtin_addcl: |
5037 | case Builtin::BI__builtin_addcll: |
5038 | case Builtin::BI__builtin_subcb: |
5039 | case Builtin::BI__builtin_subcs: |
5040 | case Builtin::BI__builtin_subc: |
5041 | case Builtin::BI__builtin_subcl: |
5042 | case Builtin::BI__builtin_subcll: { |
5043 | |
5044 | // We translate all of these builtins from expressions of the form: |
5045 | // int x = ..., y = ..., carryin = ..., carryout, result; |
5046 | // result = __builtin_addc(x, y, carryin, &carryout); |
5047 | // |
5048 | // to LLVM IR of the form: |
5049 | // |
5050 | // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
5051 | // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
5052 | // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
5053 | // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
5054 | // i32 %carryin) |
5055 | // %result = extractvalue {i32, i1} %tmp2, 0 |
5056 | // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
5057 | // %tmp3 = or i1 %carry1, %carry2 |
5058 | // %tmp4 = zext i1 %tmp3 to i32 |
5059 | // store i32 %tmp4, i32* %carryout |
5060 | |
5061 | // Scalarize our inputs. |
5062 | llvm::Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5063 | llvm::Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5064 | llvm::Value *Carryin = EmitScalarExpr(E: E->getArg(Arg: 2)); |
5065 | Address CarryOutPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 3)); |
5066 | |
5067 | // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
5068 | llvm::Intrinsic::ID IntrinsicId; |
5069 | switch (BuiltinID) { |
5070 | default: llvm_unreachable("Unknown multiprecision builtin id." ); |
5071 | case Builtin::BI__builtin_addcb: |
5072 | case Builtin::BI__builtin_addcs: |
5073 | case Builtin::BI__builtin_addc: |
5074 | case Builtin::BI__builtin_addcl: |
5075 | case Builtin::BI__builtin_addcll: |
5076 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
5077 | break; |
5078 | case Builtin::BI__builtin_subcb: |
5079 | case Builtin::BI__builtin_subcs: |
5080 | case Builtin::BI__builtin_subc: |
5081 | case Builtin::BI__builtin_subcl: |
5082 | case Builtin::BI__builtin_subcll: |
5083 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
5084 | break; |
5085 | } |
5086 | |
5087 | // Construct our resulting LLVM IR expression. |
5088 | llvm::Value *Carry1; |
5089 | llvm::Value *Sum1 = EmitOverflowIntrinsic(CGF&: *this, IntrinsicID: IntrinsicId, |
5090 | X, Y, Carry&: Carry1); |
5091 | llvm::Value *Carry2; |
5092 | llvm::Value *Sum2 = EmitOverflowIntrinsic(CGF&: *this, IntrinsicID: IntrinsicId, |
5093 | X: Sum1, Y: Carryin, Carry&: Carry2); |
5094 | llvm::Value *CarryOut = Builder.CreateZExt(V: Builder.CreateOr(LHS: Carry1, RHS: Carry2), |
5095 | DestTy: X->getType()); |
5096 | Builder.CreateStore(Val: CarryOut, Addr: CarryOutPtr); |
5097 | return RValue::get(V: Sum2); |
5098 | } |
5099 | |
5100 | case Builtin::BI__builtin_add_overflow: |
5101 | case Builtin::BI__builtin_sub_overflow: |
5102 | case Builtin::BI__builtin_mul_overflow: { |
5103 | const clang::Expr *LeftArg = E->getArg(Arg: 0); |
5104 | const clang::Expr *RightArg = E->getArg(Arg: 1); |
5105 | const clang::Expr *ResultArg = E->getArg(Arg: 2); |
5106 | |
5107 | clang::QualType ResultQTy = |
5108 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
5109 | |
5110 | WidthAndSignedness LeftInfo = |
5111 | getIntegerWidthAndSignedness(context: CGM.getContext(), Type: LeftArg->getType()); |
5112 | WidthAndSignedness RightInfo = |
5113 | getIntegerWidthAndSignedness(context: CGM.getContext(), Type: RightArg->getType()); |
5114 | WidthAndSignedness ResultInfo = |
5115 | getIntegerWidthAndSignedness(context: CGM.getContext(), Type: ResultQTy); |
5116 | |
5117 | // Handle mixed-sign multiplication as a special case, because adding |
5118 | // runtime or backend support for our generic irgen would be too expensive. |
5119 | if (isSpecialMixedSignMultiply(BuiltinID, Op1Info: LeftInfo, Op2Info: RightInfo, ResultInfo)) |
5120 | return EmitCheckedMixedSignMultiply(CGF&: *this, Op1: LeftArg, Op1Info: LeftInfo, Op2: RightArg, |
5121 | Op2Info: RightInfo, ResultArg, ResultQTy, |
5122 | ResultInfo); |
5123 | |
5124 | if (isSpecialUnsignedMultiplySignedResult(BuiltinID, Op1Info: LeftInfo, Op2Info: RightInfo, |
5125 | ResultInfo)) |
5126 | return EmitCheckedUnsignedMultiplySignedResult( |
5127 | CGF&: *this, Op1: LeftArg, Op1Info: LeftInfo, Op2: RightArg, Op2Info: RightInfo, ResultArg, ResultQTy, |
5128 | ResultInfo); |
5129 | |
5130 | WidthAndSignedness EncompassingInfo = |
5131 | EncompassingIntegerType(Types: {LeftInfo, RightInfo, ResultInfo}); |
5132 | |
5133 | llvm::Type *EncompassingLLVMTy = |
5134 | llvm::IntegerType::get(C&: CGM.getLLVMContext(), NumBits: EncompassingInfo.Width); |
5135 | |
5136 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(T: ResultQTy); |
5137 | |
5138 | llvm::Intrinsic::ID IntrinsicId; |
5139 | switch (BuiltinID) { |
5140 | default: |
5141 | llvm_unreachable("Unknown overflow builtin id." ); |
5142 | case Builtin::BI__builtin_add_overflow: |
5143 | IntrinsicId = EncompassingInfo.Signed |
5144 | ? llvm::Intrinsic::sadd_with_overflow |
5145 | : llvm::Intrinsic::uadd_with_overflow; |
5146 | break; |
5147 | case Builtin::BI__builtin_sub_overflow: |
5148 | IntrinsicId = EncompassingInfo.Signed |
5149 | ? llvm::Intrinsic::ssub_with_overflow |
5150 | : llvm::Intrinsic::usub_with_overflow; |
5151 | break; |
5152 | case Builtin::BI__builtin_mul_overflow: |
5153 | IntrinsicId = EncompassingInfo.Signed |
5154 | ? llvm::Intrinsic::smul_with_overflow |
5155 | : llvm::Intrinsic::umul_with_overflow; |
5156 | break; |
5157 | } |
5158 | |
5159 | llvm::Value *Left = EmitScalarExpr(E: LeftArg); |
5160 | llvm::Value *Right = EmitScalarExpr(E: RightArg); |
5161 | Address ResultPtr = EmitPointerWithAlignment(Addr: ResultArg); |
5162 | |
5163 | // Extend each operand to the encompassing type. |
5164 | Left = Builder.CreateIntCast(V: Left, DestTy: EncompassingLLVMTy, isSigned: LeftInfo.Signed); |
5165 | Right = Builder.CreateIntCast(V: Right, DestTy: EncompassingLLVMTy, isSigned: RightInfo.Signed); |
5166 | |
5167 | // Perform the operation on the extended values. |
5168 | llvm::Value *Overflow, *Result; |
5169 | Result = EmitOverflowIntrinsic(CGF&: *this, IntrinsicID: IntrinsicId, X: Left, Y: Right, Carry&: Overflow); |
5170 | |
5171 | if (EncompassingInfo.Width > ResultInfo.Width) { |
5172 | // The encompassing type is wider than the result type, so we need to |
5173 | // truncate it. |
5174 | llvm::Value *ResultTrunc = Builder.CreateTrunc(V: Result, DestTy: ResultLLVMTy); |
5175 | |
5176 | // To see if the truncation caused an overflow, we will extend |
5177 | // the result and then compare it to the original result. |
5178 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
5179 | V: ResultTrunc, DestTy: EncompassingLLVMTy, isSigned: ResultInfo.Signed); |
5180 | llvm::Value *TruncationOverflow = |
5181 | Builder.CreateICmpNE(LHS: Result, RHS: ResultTruncExt); |
5182 | |
5183 | Overflow = Builder.CreateOr(LHS: Overflow, RHS: TruncationOverflow); |
5184 | Result = ResultTrunc; |
5185 | } |
5186 | |
5187 | // Finally, store the result using the pointer. |
5188 | bool isVolatile = |
5189 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
5190 | Builder.CreateStore(Val: EmitToMemory(Value: Result, Ty: ResultQTy), Addr: ResultPtr, IsVolatile: isVolatile); |
5191 | |
5192 | return RValue::get(V: Overflow); |
5193 | } |
5194 | |
5195 | case Builtin::BI__builtin_uadd_overflow: |
5196 | case Builtin::BI__builtin_uaddl_overflow: |
5197 | case Builtin::BI__builtin_uaddll_overflow: |
5198 | case Builtin::BI__builtin_usub_overflow: |
5199 | case Builtin::BI__builtin_usubl_overflow: |
5200 | case Builtin::BI__builtin_usubll_overflow: |
5201 | case Builtin::BI__builtin_umul_overflow: |
5202 | case Builtin::BI__builtin_umull_overflow: |
5203 | case Builtin::BI__builtin_umulll_overflow: |
5204 | case Builtin::BI__builtin_sadd_overflow: |
5205 | case Builtin::BI__builtin_saddl_overflow: |
5206 | case Builtin::BI__builtin_saddll_overflow: |
5207 | case Builtin::BI__builtin_ssub_overflow: |
5208 | case Builtin::BI__builtin_ssubl_overflow: |
5209 | case Builtin::BI__builtin_ssubll_overflow: |
5210 | case Builtin::BI__builtin_smul_overflow: |
5211 | case Builtin::BI__builtin_smull_overflow: |
5212 | case Builtin::BI__builtin_smulll_overflow: { |
5213 | |
5214 | // We translate all of these builtins directly to the relevant llvm IR node. |
5215 | |
5216 | // Scalarize our inputs. |
5217 | llvm::Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5218 | llvm::Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5219 | Address SumOutPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)); |
5220 | |
5221 | // Decide which of the overflow intrinsics we are lowering to: |
5222 | llvm::Intrinsic::ID IntrinsicId; |
5223 | switch (BuiltinID) { |
5224 | default: llvm_unreachable("Unknown overflow builtin id." ); |
5225 | case Builtin::BI__builtin_uadd_overflow: |
5226 | case Builtin::BI__builtin_uaddl_overflow: |
5227 | case Builtin::BI__builtin_uaddll_overflow: |
5228 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
5229 | break; |
5230 | case Builtin::BI__builtin_usub_overflow: |
5231 | case Builtin::BI__builtin_usubl_overflow: |
5232 | case Builtin::BI__builtin_usubll_overflow: |
5233 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
5234 | break; |
5235 | case Builtin::BI__builtin_umul_overflow: |
5236 | case Builtin::BI__builtin_umull_overflow: |
5237 | case Builtin::BI__builtin_umulll_overflow: |
5238 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
5239 | break; |
5240 | case Builtin::BI__builtin_sadd_overflow: |
5241 | case Builtin::BI__builtin_saddl_overflow: |
5242 | case Builtin::BI__builtin_saddll_overflow: |
5243 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
5244 | break; |
5245 | case Builtin::BI__builtin_ssub_overflow: |
5246 | case Builtin::BI__builtin_ssubl_overflow: |
5247 | case Builtin::BI__builtin_ssubll_overflow: |
5248 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
5249 | break; |
5250 | case Builtin::BI__builtin_smul_overflow: |
5251 | case Builtin::BI__builtin_smull_overflow: |
5252 | case Builtin::BI__builtin_smulll_overflow: |
5253 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
5254 | break; |
5255 | } |
5256 | |
5257 | |
5258 | llvm::Value *Carry; |
5259 | llvm::Value *Sum = EmitOverflowIntrinsic(CGF&: *this, IntrinsicID: IntrinsicId, X, Y, Carry); |
5260 | Builder.CreateStore(Val: Sum, Addr: SumOutPtr); |
5261 | |
5262 | return RValue::get(V: Carry); |
5263 | } |
5264 | case Builtin::BIaddressof: |
5265 | case Builtin::BI__addressof: |
5266 | case Builtin::BI__builtin_addressof: |
5267 | return RValue::get(V: EmitLValue(E: E->getArg(Arg: 0)).getPointer(CGF&: *this)); |
5268 | case Builtin::BI__builtin_function_start: |
5269 | return RValue::get(V: CGM.GetFunctionStart( |
5270 | Decl: E->getArg(Arg: 0)->getAsBuiltinConstantDeclRef(Context: CGM.getContext()))); |
5271 | case Builtin::BI__builtin_operator_new: |
5272 | return EmitBuiltinNewDeleteCall( |
5273 | Type: E->getCallee()->getType()->castAs<FunctionProtoType>(), TheCallExpr: E, IsDelete: false); |
5274 | case Builtin::BI__builtin_operator_delete: |
5275 | EmitBuiltinNewDeleteCall( |
5276 | Type: E->getCallee()->getType()->castAs<FunctionProtoType>(), TheCallExpr: E, IsDelete: true); |
5277 | return RValue::get(V: nullptr); |
5278 | |
5279 | case Builtin::BI__builtin_is_aligned: |
5280 | return EmitBuiltinIsAligned(E); |
5281 | case Builtin::BI__builtin_align_up: |
5282 | return EmitBuiltinAlignTo(E, AlignUp: true); |
5283 | case Builtin::BI__builtin_align_down: |
5284 | return EmitBuiltinAlignTo(E, AlignUp: false); |
5285 | |
5286 | case Builtin::BI__noop: |
5287 | // __noop always evaluates to an integer literal zero. |
5288 | return RValue::get(V: ConstantInt::get(Ty: IntTy, V: 0)); |
5289 | case Builtin::BI__builtin_call_with_static_chain: { |
5290 | const CallExpr *Call = cast<CallExpr>(Val: E->getArg(Arg: 0)); |
5291 | const Expr *Chain = E->getArg(Arg: 1); |
5292 | return EmitCall(FnType: Call->getCallee()->getType(), |
5293 | Callee: EmitCallee(E: Call->getCallee()), E: Call, ReturnValue, |
5294 | Chain: EmitScalarExpr(E: Chain)); |
5295 | } |
5296 | case Builtin::BI_InterlockedExchange8: |
5297 | case Builtin::BI_InterlockedExchange16: |
5298 | case Builtin::BI_InterlockedExchange: |
5299 | case Builtin::BI_InterlockedExchangePointer: |
5300 | return RValue::get( |
5301 | V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedExchange, E)); |
5302 | case Builtin::BI_InterlockedCompareExchangePointer: |
5303 | case Builtin::BI_InterlockedCompareExchangePointer_nf: { |
5304 | llvm::Type *RTy; |
5305 | llvm::IntegerType *IntType = IntegerType::get( |
5306 | C&: getLLVMContext(), NumBits: getContext().getTypeSize(T: E->getType())); |
5307 | |
5308 | Address DestAddr = CheckAtomicAlignment(CGF&: *this, E); |
5309 | |
5310 | llvm::Value *Exchange = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5311 | RTy = Exchange->getType(); |
5312 | Exchange = Builder.CreatePtrToInt(V: Exchange, DestTy: IntType); |
5313 | |
5314 | llvm::Value *Comparand = |
5315 | Builder.CreatePtrToInt(V: EmitScalarExpr(E: E->getArg(Arg: 2)), DestTy: IntType); |
5316 | |
5317 | auto Ordering = |
5318 | BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? |
5319 | AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; |
5320 | |
5321 | auto Result = Builder.CreateAtomicCmpXchg(Addr: DestAddr, Cmp: Comparand, New: Exchange, |
5322 | SuccessOrdering: Ordering, FailureOrdering: Ordering); |
5323 | Result->setVolatile(true); |
5324 | |
5325 | return RValue::get(V: Builder.CreateIntToPtr(V: Builder.CreateExtractValue(Agg: Result, |
5326 | Idxs: 0), |
5327 | DestTy: RTy)); |
5328 | } |
5329 | case Builtin::BI_InterlockedCompareExchange8: |
5330 | case Builtin::BI_InterlockedCompareExchange16: |
5331 | case Builtin::BI_InterlockedCompareExchange: |
5332 | case Builtin::BI_InterlockedCompareExchange64: |
5333 | return RValue::get(V: EmitAtomicCmpXchgForMSIntrin(CGF&: *this, E)); |
5334 | case Builtin::BI_InterlockedIncrement16: |
5335 | case Builtin::BI_InterlockedIncrement: |
5336 | return RValue::get( |
5337 | V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedIncrement, E)); |
5338 | case Builtin::BI_InterlockedDecrement16: |
5339 | case Builtin::BI_InterlockedDecrement: |
5340 | return RValue::get( |
5341 | V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedDecrement, E)); |
5342 | case Builtin::BI_InterlockedAnd8: |
5343 | case Builtin::BI_InterlockedAnd16: |
5344 | case Builtin::BI_InterlockedAnd: |
5345 | return RValue::get(V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedAnd, E)); |
5346 | case Builtin::BI_InterlockedExchangeAdd8: |
5347 | case Builtin::BI_InterlockedExchangeAdd16: |
5348 | case Builtin::BI_InterlockedExchangeAdd: |
5349 | return RValue::get( |
5350 | V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedExchangeAdd, E)); |
5351 | case Builtin::BI_InterlockedExchangeSub8: |
5352 | case Builtin::BI_InterlockedExchangeSub16: |
5353 | case Builtin::BI_InterlockedExchangeSub: |
5354 | return RValue::get( |
5355 | V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedExchangeSub, E)); |
5356 | case Builtin::BI_InterlockedOr8: |
5357 | case Builtin::BI_InterlockedOr16: |
5358 | case Builtin::BI_InterlockedOr: |
5359 | return RValue::get(V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedOr, E)); |
5360 | case Builtin::BI_InterlockedXor8: |
5361 | case Builtin::BI_InterlockedXor16: |
5362 | case Builtin::BI_InterlockedXor: |
5363 | return RValue::get(V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::_InterlockedXor, E)); |
5364 | |
5365 | case Builtin::BI_bittest64: |
5366 | case Builtin::BI_bittest: |
5367 | case Builtin::BI_bittestandcomplement64: |
5368 | case Builtin::BI_bittestandcomplement: |
5369 | case Builtin::BI_bittestandreset64: |
5370 | case Builtin::BI_bittestandreset: |
5371 | case Builtin::BI_bittestandset64: |
5372 | case Builtin::BI_bittestandset: |
5373 | case Builtin::BI_interlockedbittestandreset: |
5374 | case Builtin::BI_interlockedbittestandreset64: |
5375 | case Builtin::BI_interlockedbittestandset64: |
5376 | case Builtin::BI_interlockedbittestandset: |
5377 | case Builtin::BI_interlockedbittestandset_acq: |
5378 | case Builtin::BI_interlockedbittestandset_rel: |
5379 | case Builtin::BI_interlockedbittestandset_nf: |
5380 | case Builtin::BI_interlockedbittestandreset_acq: |
5381 | case Builtin::BI_interlockedbittestandreset_rel: |
5382 | case Builtin::BI_interlockedbittestandreset_nf: |
5383 | return RValue::get(V: EmitBitTestIntrinsic(CGF&: *this, BuiltinID, E)); |
5384 | |
5385 | // These builtins exist to emit regular volatile loads and stores not |
5386 | // affected by the -fms-volatile setting. |
5387 | case Builtin::BI__iso_volatile_load8: |
5388 | case Builtin::BI__iso_volatile_load16: |
5389 | case Builtin::BI__iso_volatile_load32: |
5390 | case Builtin::BI__iso_volatile_load64: |
5391 | return RValue::get(V: EmitISOVolatileLoad(CGF&: *this, E)); |
5392 | case Builtin::BI__iso_volatile_store8: |
5393 | case Builtin::BI__iso_volatile_store16: |
5394 | case Builtin::BI__iso_volatile_store32: |
5395 | case Builtin::BI__iso_volatile_store64: |
5396 | return RValue::get(V: EmitISOVolatileStore(CGF&: *this, E)); |
5397 | |
5398 | case Builtin::BI__builtin_ptrauth_sign_constant: |
5399 | return RValue::get(V: ConstantEmitter(*this).emitAbstract(E, T: E->getType())); |
5400 | |
5401 | case Builtin::BI__builtin_ptrauth_auth: |
5402 | case Builtin::BI__builtin_ptrauth_auth_and_resign: |
5403 | case Builtin::BI__builtin_ptrauth_blend_discriminator: |
5404 | case Builtin::BI__builtin_ptrauth_sign_generic_data: |
5405 | case Builtin::BI__builtin_ptrauth_sign_unauthenticated: |
5406 | case Builtin::BI__builtin_ptrauth_strip: { |
5407 | // Emit the arguments. |
5408 | SmallVector<llvm::Value *, 5> Args; |
5409 | for (auto argExpr : E->arguments()) |
5410 | Args.push_back(Elt: EmitScalarExpr(E: argExpr)); |
5411 | |
5412 | // Cast the value to intptr_t, saving its original type. |
5413 | llvm::Type *OrigValueType = Args[0]->getType(); |
5414 | if (OrigValueType->isPointerTy()) |
5415 | Args[0] = Builder.CreatePtrToInt(V: Args[0], DestTy: IntPtrTy); |
5416 | |
5417 | switch (BuiltinID) { |
5418 | case Builtin::BI__builtin_ptrauth_auth_and_resign: |
5419 | if (Args[4]->getType()->isPointerTy()) |
5420 | Args[4] = Builder.CreatePtrToInt(V: Args[4], DestTy: IntPtrTy); |
5421 | [[fallthrough]]; |
5422 | |
5423 | case Builtin::BI__builtin_ptrauth_auth: |
5424 | case Builtin::BI__builtin_ptrauth_sign_unauthenticated: |
5425 | if (Args[2]->getType()->isPointerTy()) |
5426 | Args[2] = Builder.CreatePtrToInt(V: Args[2], DestTy: IntPtrTy); |
5427 | break; |
5428 | |
5429 | case Builtin::BI__builtin_ptrauth_sign_generic_data: |
5430 | if (Args[1]->getType()->isPointerTy()) |
5431 | Args[1] = Builder.CreatePtrToInt(V: Args[1], DestTy: IntPtrTy); |
5432 | break; |
5433 | |
5434 | case Builtin::BI__builtin_ptrauth_blend_discriminator: |
5435 | case Builtin::BI__builtin_ptrauth_strip: |
5436 | break; |
5437 | } |
5438 | |
5439 | // Call the intrinsic. |
5440 | auto IntrinsicID = [&]() -> unsigned { |
5441 | switch (BuiltinID) { |
5442 | case Builtin::BI__builtin_ptrauth_auth: |
5443 | return llvm::Intrinsic::ptrauth_auth; |
5444 | case Builtin::BI__builtin_ptrauth_auth_and_resign: |
5445 | return llvm::Intrinsic::ptrauth_resign; |
5446 | case Builtin::BI__builtin_ptrauth_blend_discriminator: |
5447 | return llvm::Intrinsic::ptrauth_blend; |
5448 | case Builtin::BI__builtin_ptrauth_sign_generic_data: |
5449 | return llvm::Intrinsic::ptrauth_sign_generic; |
5450 | case Builtin::BI__builtin_ptrauth_sign_unauthenticated: |
5451 | return llvm::Intrinsic::ptrauth_sign; |
5452 | case Builtin::BI__builtin_ptrauth_strip: |
5453 | return llvm::Intrinsic::ptrauth_strip; |
5454 | } |
5455 | llvm_unreachable("bad ptrauth intrinsic" ); |
5456 | }(); |
5457 | auto Intrinsic = CGM.getIntrinsic(IID: IntrinsicID); |
5458 | llvm::Value *Result = EmitRuntimeCall(callee: Intrinsic, args: Args); |
5459 | |
5460 | if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data && |
5461 | BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator && |
5462 | OrigValueType->isPointerTy()) { |
5463 | Result = Builder.CreateIntToPtr(V: Result, DestTy: OrigValueType); |
5464 | } |
5465 | return RValue::get(V: Result); |
5466 | } |
5467 | |
5468 | case Builtin::BI__exception_code: |
5469 | case Builtin::BI_exception_code: |
5470 | return RValue::get(V: EmitSEHExceptionCode()); |
5471 | case Builtin::BI__exception_info: |
5472 | case Builtin::BI_exception_info: |
5473 | return RValue::get(V: EmitSEHExceptionInfo()); |
5474 | case Builtin::BI__abnormal_termination: |
5475 | case Builtin::BI_abnormal_termination: |
5476 | return RValue::get(V: EmitSEHAbnormalTermination()); |
5477 | case Builtin::BI_setjmpex: |
5478 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
5479 | E->getArg(Arg: 0)->getType()->isPointerType()) |
5480 | return EmitMSVCRTSetJmp(CGF&: *this, SJKind: MSVCSetJmpKind::_setjmpex, E); |
5481 | break; |
5482 | case Builtin::BI_setjmp: |
5483 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
5484 | E->getArg(Arg: 0)->getType()->isPointerType()) { |
5485 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) |
5486 | return EmitMSVCRTSetJmp(CGF&: *this, SJKind: MSVCSetJmpKind::_setjmp3, E); |
5487 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) |
5488 | return EmitMSVCRTSetJmp(CGF&: *this, SJKind: MSVCSetJmpKind::_setjmpex, E); |
5489 | return EmitMSVCRTSetJmp(CGF&: *this, SJKind: MSVCSetJmpKind::_setjmp, E); |
5490 | } |
5491 | break; |
5492 | |
5493 | // C++ std:: builtins. |
5494 | case Builtin::BImove: |
5495 | case Builtin::BImove_if_noexcept: |
5496 | case Builtin::BIforward: |
5497 | case Builtin::BIforward_like: |
5498 | case Builtin::BIas_const: |
5499 | return RValue::get(V: EmitLValue(E: E->getArg(Arg: 0)).getPointer(CGF&: *this)); |
5500 | case Builtin::BI__GetExceptionInfo: { |
5501 | if (llvm::GlobalVariable *GV = |
5502 | CGM.getCXXABI().getThrowInfo(T: FD->getParamDecl(i: 0)->getType())) |
5503 | return RValue::get(V: GV); |
5504 | break; |
5505 | } |
5506 | |
5507 | case Builtin::BI__fastfail: |
5508 | return RValue::get(V: EmitMSVCBuiltinExpr(BuiltinID: MSVCIntrin::__fastfail, E)); |
5509 | |
5510 | case Builtin::BI__builtin_coro_id: |
5511 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_id); |
5512 | case Builtin::BI__builtin_coro_promise: |
5513 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_promise); |
5514 | case Builtin::BI__builtin_coro_resume: |
5515 | EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_resume); |
5516 | return RValue::get(V: nullptr); |
5517 | case Builtin::BI__builtin_coro_frame: |
5518 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_frame); |
5519 | case Builtin::BI__builtin_coro_noop: |
5520 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_noop); |
5521 | case Builtin::BI__builtin_coro_free: |
5522 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_free); |
5523 | case Builtin::BI__builtin_coro_destroy: |
5524 | EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_destroy); |
5525 | return RValue::get(V: nullptr); |
5526 | case Builtin::BI__builtin_coro_done: |
5527 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_done); |
5528 | case Builtin::BI__builtin_coro_alloc: |
5529 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_alloc); |
5530 | case Builtin::BI__builtin_coro_begin: |
5531 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_begin); |
5532 | case Builtin::BI__builtin_coro_end: |
5533 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_end); |
5534 | case Builtin::BI__builtin_coro_suspend: |
5535 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_suspend); |
5536 | case Builtin::BI__builtin_coro_size: |
5537 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_size); |
5538 | case Builtin::BI__builtin_coro_align: |
5539 | return EmitCoroutineIntrinsic(E, IID: Intrinsic::coro_align); |
5540 | |
5541 | // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions |
5542 | case Builtin::BIread_pipe: |
5543 | case Builtin::BIwrite_pipe: { |
5544 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)), |
5545 | *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5546 | CGOpenCLRuntime OpenCLRT(CGM); |
5547 | Value *PacketSize = OpenCLRT.getPipeElemSize(PipeArg: E->getArg(Arg: 0)); |
5548 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(PipeArg: E->getArg(Arg: 0)); |
5549 | |
5550 | // Type of the generic packet parameter. |
5551 | unsigned GenericAS = |
5552 | getContext().getTargetAddressSpace(AS: LangAS::opencl_generic); |
5553 | llvm::Type *I8PTy = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: GenericAS); |
5554 | |
5555 | // Testing which overloaded version we should generate the call for. |
5556 | if (2U == E->getNumArgs()) { |
5557 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
5558 | : "__write_pipe_2" ; |
5559 | // Creating a generic function type to be able to call with any builtin or |
5560 | // user defined type. |
5561 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; |
5562 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5563 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5564 | Value *BCast = Builder.CreatePointerCast(V: Arg1, DestTy: I8PTy); |
5565 | return RValue::get( |
5566 | V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5567 | args: {Arg0, BCast, PacketSize, PacketAlign})); |
5568 | } else { |
5569 | assert(4 == E->getNumArgs() && |
5570 | "Illegal number of parameters to pipe function" ); |
5571 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
5572 | : "__write_pipe_4" ; |
5573 | |
5574 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, |
5575 | Int32Ty, Int32Ty}; |
5576 | Value *Arg2 = EmitScalarExpr(E: E->getArg(Arg: 2)), |
5577 | *Arg3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
5578 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5579 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5580 | Value *BCast = Builder.CreatePointerCast(V: Arg3, DestTy: I8PTy); |
5581 | // We know the third argument is an integer type, but we may need to cast |
5582 | // it to i32. |
5583 | if (Arg2->getType() != Int32Ty) |
5584 | Arg2 = Builder.CreateZExtOrTrunc(V: Arg2, DestTy: Int32Ty); |
5585 | return RValue::get( |
5586 | V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5587 | args: {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); |
5588 | } |
5589 | } |
5590 | // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write |
5591 | // functions |
5592 | case Builtin::BIreserve_read_pipe: |
5593 | case Builtin::BIreserve_write_pipe: |
5594 | case Builtin::BIwork_group_reserve_read_pipe: |
5595 | case Builtin::BIwork_group_reserve_write_pipe: |
5596 | case Builtin::BIsub_group_reserve_read_pipe: |
5597 | case Builtin::BIsub_group_reserve_write_pipe: { |
5598 | // Composing the mangled name for the function. |
5599 | const char *Name; |
5600 | if (BuiltinID == Builtin::BIreserve_read_pipe) |
5601 | Name = "__reserve_read_pipe" ; |
5602 | else if (BuiltinID == Builtin::BIreserve_write_pipe) |
5603 | Name = "__reserve_write_pipe" ; |
5604 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
5605 | Name = "__work_group_reserve_read_pipe" ; |
5606 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
5607 | Name = "__work_group_reserve_write_pipe" ; |
5608 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
5609 | Name = "__sub_group_reserve_read_pipe" ; |
5610 | else |
5611 | Name = "__sub_group_reserve_write_pipe" ; |
5612 | |
5613 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)), |
5614 | *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5615 | llvm::Type *ReservedIDTy = ConvertType(T: getContext().OCLReserveIDTy); |
5616 | CGOpenCLRuntime OpenCLRT(CGM); |
5617 | Value *PacketSize = OpenCLRT.getPipeElemSize(PipeArg: E->getArg(Arg: 0)); |
5618 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(PipeArg: E->getArg(Arg: 0)); |
5619 | |
5620 | // Building the generic function prototype. |
5621 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; |
5622 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5623 | Result: ReservedIDTy, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5624 | // We know the second argument is an integer type, but we may need to cast |
5625 | // it to i32. |
5626 | if (Arg1->getType() != Int32Ty) |
5627 | Arg1 = Builder.CreateZExtOrTrunc(V: Arg1, DestTy: Int32Ty); |
5628 | return RValue::get(V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5629 | args: {Arg0, Arg1, PacketSize, PacketAlign})); |
5630 | } |
5631 | // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write |
5632 | // functions |
5633 | case Builtin::BIcommit_read_pipe: |
5634 | case Builtin::BIcommit_write_pipe: |
5635 | case Builtin::BIwork_group_commit_read_pipe: |
5636 | case Builtin::BIwork_group_commit_write_pipe: |
5637 | case Builtin::BIsub_group_commit_read_pipe: |
5638 | case Builtin::BIsub_group_commit_write_pipe: { |
5639 | const char *Name; |
5640 | if (BuiltinID == Builtin::BIcommit_read_pipe) |
5641 | Name = "__commit_read_pipe" ; |
5642 | else if (BuiltinID == Builtin::BIcommit_write_pipe) |
5643 | Name = "__commit_write_pipe" ; |
5644 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
5645 | Name = "__work_group_commit_read_pipe" ; |
5646 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
5647 | Name = "__work_group_commit_write_pipe" ; |
5648 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
5649 | Name = "__sub_group_commit_read_pipe" ; |
5650 | else |
5651 | Name = "__sub_group_commit_write_pipe" ; |
5652 | |
5653 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)), |
5654 | *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5655 | CGOpenCLRuntime OpenCLRT(CGM); |
5656 | Value *PacketSize = OpenCLRT.getPipeElemSize(PipeArg: E->getArg(Arg: 0)); |
5657 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(PipeArg: E->getArg(Arg: 0)); |
5658 | |
5659 | // Building the generic function prototype. |
5660 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; |
5661 | llvm::FunctionType *FTy = |
5662 | llvm::FunctionType::get(Result: llvm::Type::getVoidTy(C&: getLLVMContext()), |
5663 | Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5664 | |
5665 | return RValue::get(V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5666 | args: {Arg0, Arg1, PacketSize, PacketAlign})); |
5667 | } |
5668 | // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions |
5669 | case Builtin::BIget_pipe_num_packets: |
5670 | case Builtin::BIget_pipe_max_packets: { |
5671 | const char *BaseName; |
5672 | const auto *PipeTy = E->getArg(Arg: 0)->getType()->castAs<PipeType>(); |
5673 | if (BuiltinID == Builtin::BIget_pipe_num_packets) |
5674 | BaseName = "__get_pipe_num_packets" ; |
5675 | else |
5676 | BaseName = "__get_pipe_max_packets" ; |
5677 | std::string Name = std::string(BaseName) + |
5678 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo" ); |
5679 | |
5680 | // Building the generic function prototype. |
5681 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5682 | CGOpenCLRuntime OpenCLRT(CGM); |
5683 | Value *PacketSize = OpenCLRT.getPipeElemSize(PipeArg: E->getArg(Arg: 0)); |
5684 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(PipeArg: E->getArg(Arg: 0)); |
5685 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; |
5686 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5687 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5688 | |
5689 | return RValue::get(V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5690 | args: {Arg0, PacketSize, PacketAlign})); |
5691 | } |
5692 | |
5693 | // OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
5694 | case Builtin::BIto_global: |
5695 | case Builtin::BIto_local: |
5696 | case Builtin::BIto_private: { |
5697 | auto Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5698 | auto NewArgT = llvm::PointerType::get( |
5699 | C&: getLLVMContext(), |
5700 | AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5701 | auto NewRetT = llvm::PointerType::get( |
5702 | C&: getLLVMContext(), |
5703 | AddressSpace: CGM.getContext().getTargetAddressSpace( |
5704 | AS: E->getType()->getPointeeType().getAddressSpace())); |
5705 | auto FTy = llvm::FunctionType::get(Result: NewRetT, Params: {NewArgT}, isVarArg: false); |
5706 | llvm::Value *NewArg; |
5707 | if (Arg0->getType()->getPointerAddressSpace() != |
5708 | NewArgT->getPointerAddressSpace()) |
5709 | NewArg = Builder.CreateAddrSpaceCast(V: Arg0, DestTy: NewArgT); |
5710 | else |
5711 | NewArg = Builder.CreateBitOrPointerCast(V: Arg0, DestTy: NewArgT); |
5712 | auto NewName = std::string("__" ) + E->getDirectCallee()->getName().str(); |
5713 | auto NewCall = |
5714 | EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name: NewName), args: {NewArg}); |
5715 | return RValue::get(V: Builder.CreateBitOrPointerCast(V: NewCall, |
5716 | DestTy: ConvertType(T: E->getType()))); |
5717 | } |
5718 | |
5719 | // OpenCL v2.0, s6.13.17 - Enqueue kernel function. |
5720 | // Table 6.13.17.1 specifies four overload forms of enqueue_kernel. |
5721 | // The code below expands the builtin call to a call to one of the following |
5722 | // functions that an OpenCL runtime library will have to provide: |
5723 | // __enqueue_kernel_basic |
5724 | // __enqueue_kernel_varargs |
5725 | // __enqueue_kernel_basic_events |
5726 | // __enqueue_kernel_events_varargs |
5727 | case Builtin::BIenqueue_kernel: { |
5728 | StringRef Name; // Generated function call name |
5729 | unsigned NumArgs = E->getNumArgs(); |
5730 | |
5731 | llvm::Type *QueueTy = ConvertType(T: getContext().OCLQueueTy); |
5732 | llvm::Type *GenericVoidPtrTy = Builder.getPtrTy( |
5733 | AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5734 | |
5735 | llvm::Value *Queue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5736 | llvm::Value *Flags = EmitScalarExpr(E: E->getArg(Arg: 1)); |
5737 | LValue NDRangeL = EmitAggExprToLValue(E: E->getArg(Arg: 2)); |
5738 | llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(CGF&: *this); |
5739 | llvm::Type *RangeTy = NDRangeL.getAddress().getType(); |
5740 | |
5741 | if (NumArgs == 4) { |
5742 | // The most basic form of the call with parameters: |
5743 | // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) |
5744 | Name = "__enqueue_kernel_basic" ; |
5745 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, |
5746 | GenericVoidPtrTy}; |
5747 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5748 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5749 | |
5750 | auto Info = |
5751 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 3)); |
5752 | llvm::Value *Kernel = |
5753 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5754 | llvm::Value *Block = |
5755 | Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5756 | |
5757 | AttrBuilder B(Builder.getContext()); |
5758 | B.addByValAttr(Ty: NDRangeL.getAddress().getElementType()); |
5759 | llvm::AttributeList ByValAttrSet = |
5760 | llvm::AttributeList::get(C&: CGM.getModule().getContext(), Index: 3U, B); |
5761 | |
5762 | auto RTCall = |
5763 | EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name, ExtraAttrs: ByValAttrSet), |
5764 | args: {Queue, Flags, Range, Kernel, Block}); |
5765 | RTCall->setAttributes(ByValAttrSet); |
5766 | return RValue::get(V: RTCall); |
5767 | } |
5768 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature" ); |
5769 | |
5770 | // Create a temporary array to hold the sizes of local pointer arguments |
5771 | // for the block. \p First is the position of the first size argument. |
5772 | auto CreateArrayForSizeVar = [=](unsigned First) |
5773 | -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { |
5774 | llvm::APInt ArraySize(32, NumArgs - First); |
5775 | QualType SizeArrayTy = getContext().getConstantArrayType( |
5776 | EltTy: getContext().getSizeType(), ArySize: ArraySize, SizeExpr: nullptr, |
5777 | ASM: ArraySizeModifier::Normal, |
5778 | /*IndexTypeQuals=*/0); |
5779 | auto Tmp = CreateMemTemp(T: SizeArrayTy, Name: "block_sizes" ); |
5780 | llvm::Value *TmpPtr = Tmp.getPointer(); |
5781 | llvm::Value *TmpSize = EmitLifetimeStart( |
5782 | Size: CGM.getDataLayout().getTypeAllocSize(Ty: Tmp.getElementType()), Addr: TmpPtr); |
5783 | llvm::Value *ElemPtr; |
5784 | // Each of the following arguments specifies the size of the corresponding |
5785 | // argument passed to the enqueued block. |
5786 | auto *Zero = llvm::ConstantInt::get(Ty: IntTy, V: 0); |
5787 | for (unsigned I = First; I < NumArgs; ++I) { |
5788 | auto *Index = llvm::ConstantInt::get(Ty: IntTy, V: I - First); |
5789 | auto *GEP = Builder.CreateGEP(Ty: Tmp.getElementType(), Ptr: TmpPtr, |
5790 | IdxList: {Zero, Index}); |
5791 | if (I == First) |
5792 | ElemPtr = GEP; |
5793 | auto *V = |
5794 | Builder.CreateZExtOrTrunc(V: EmitScalarExpr(E: E->getArg(Arg: I)), DestTy: SizeTy); |
5795 | Builder.CreateAlignedStore( |
5796 | Val: V, Ptr: GEP, Align: CGM.getDataLayout().getPrefTypeAlign(Ty: SizeTy)); |
5797 | } |
5798 | return std::tie(args&: ElemPtr, args&: TmpSize, args&: TmpPtr); |
5799 | }; |
5800 | |
5801 | // Could have events and/or varargs. |
5802 | if (E->getArg(Arg: 3)->getType()->isBlockPointerType()) { |
5803 | // No events passed, but has variadic arguments. |
5804 | Name = "__enqueue_kernel_varargs" ; |
5805 | auto Info = |
5806 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 3)); |
5807 | llvm::Value *Kernel = |
5808 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5809 | auto *Block = Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5810 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
5811 | std::tie(args&: ElemPtr, args&: TmpSize, args&: TmpPtr) = CreateArrayForSizeVar(4); |
5812 | |
5813 | // Create a vector of the arguments, as well as a constant value to |
5814 | // express to the runtime the number of variadic arguments. |
5815 | llvm::Value *const Args[] = {Queue, Flags, |
5816 | Range, Kernel, |
5817 | Block, ConstantInt::get(Ty: IntTy, V: NumArgs - 4), |
5818 | ElemPtr}; |
5819 | llvm::Type *const ArgTys[] = { |
5820 | QueueTy, IntTy, RangeTy, GenericVoidPtrTy, |
5821 | GenericVoidPtrTy, IntTy, ElemPtr->getType()}; |
5822 | |
5823 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: Int32Ty, Params: ArgTys, isVarArg: false); |
5824 | auto Call = RValue::get( |
5825 | V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), args: Args)); |
5826 | if (TmpSize) |
5827 | EmitLifetimeEnd(Size: TmpSize, Addr: TmpPtr); |
5828 | return Call; |
5829 | } |
5830 | // Any calls now have event arguments passed. |
5831 | if (NumArgs >= 7) { |
5832 | llvm::PointerType *PtrTy = llvm::PointerType::get( |
5833 | C&: CGM.getLLVMContext(), |
5834 | AddressSpace: CGM.getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5835 | |
5836 | llvm::Value *NumEvents = |
5837 | Builder.CreateZExtOrTrunc(V: EmitScalarExpr(E: E->getArg(Arg: 3)), DestTy: Int32Ty); |
5838 | |
5839 | // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments |
5840 | // to be a null pointer constant (including `0` literal), we can take it |
5841 | // into account and emit null pointer directly. |
5842 | llvm::Value *EventWaitList = nullptr; |
5843 | if (E->getArg(Arg: 4)->isNullPointerConstant( |
5844 | Ctx&: getContext(), NPC: Expr::NPC_ValueDependentIsNotNull)) { |
5845 | EventWaitList = llvm::ConstantPointerNull::get(T: PtrTy); |
5846 | } else { |
5847 | EventWaitList = |
5848 | E->getArg(Arg: 4)->getType()->isArrayType() |
5849 | ? EmitArrayToPointerDecay(Array: E->getArg(Arg: 4)).emitRawPointer(CGF&: *this) |
5850 | : EmitScalarExpr(E: E->getArg(Arg: 4)); |
5851 | // Convert to generic address space. |
5852 | EventWaitList = Builder.CreatePointerCast(V: EventWaitList, DestTy: PtrTy); |
5853 | } |
5854 | llvm::Value *EventRet = nullptr; |
5855 | if (E->getArg(Arg: 5)->isNullPointerConstant( |
5856 | Ctx&: getContext(), NPC: Expr::NPC_ValueDependentIsNotNull)) { |
5857 | EventRet = llvm::ConstantPointerNull::get(T: PtrTy); |
5858 | } else { |
5859 | EventRet = |
5860 | Builder.CreatePointerCast(V: EmitScalarExpr(E: E->getArg(Arg: 5)), DestTy: PtrTy); |
5861 | } |
5862 | |
5863 | auto Info = |
5864 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 6)); |
5865 | llvm::Value *Kernel = |
5866 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5867 | llvm::Value *Block = |
5868 | Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5869 | |
5870 | std::vector<llvm::Type *> ArgTys = { |
5871 | QueueTy, Int32Ty, RangeTy, Int32Ty, |
5872 | PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; |
5873 | |
5874 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, |
5875 | NumEvents, EventWaitList, EventRet, |
5876 | Kernel, Block}; |
5877 | |
5878 | if (NumArgs == 7) { |
5879 | // Has events but no variadics. |
5880 | Name = "__enqueue_kernel_basic_events" ; |
5881 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5882 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5883 | return RValue::get( |
5884 | V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5885 | args: llvm::ArrayRef<llvm::Value *>(Args))); |
5886 | } |
5887 | // Has event info and variadics |
5888 | // Pass the number of variadics to the runtime function too. |
5889 | Args.push_back(x: ConstantInt::get(Ty: Int32Ty, V: NumArgs - 7)); |
5890 | ArgTys.push_back(x: Int32Ty); |
5891 | Name = "__enqueue_kernel_events_varargs" ; |
5892 | |
5893 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
5894 | std::tie(args&: ElemPtr, args&: TmpSize, args&: TmpPtr) = CreateArrayForSizeVar(7); |
5895 | Args.push_back(x: ElemPtr); |
5896 | ArgTys.push_back(x: ElemPtr->getType()); |
5897 | |
5898 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5899 | Result: Int32Ty, Params: llvm::ArrayRef<llvm::Type *>(ArgTys), isVarArg: false); |
5900 | auto Call = |
5901 | RValue::get(V: EmitRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), |
5902 | args: llvm::ArrayRef<llvm::Value *>(Args))); |
5903 | if (TmpSize) |
5904 | EmitLifetimeEnd(Size: TmpSize, Addr: TmpPtr); |
5905 | return Call; |
5906 | } |
5907 | llvm_unreachable("Unexpected enqueue_kernel signature" ); |
5908 | } |
5909 | // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block |
5910 | // parameter. |
5911 | case Builtin::BIget_kernel_work_group_size: { |
5912 | llvm::Type *GenericVoidPtrTy = Builder.getPtrTy( |
5913 | AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5914 | auto Info = |
5915 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 0)); |
5916 | Value *Kernel = |
5917 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5918 | Value *Arg = Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5919 | return RValue::get(V: EmitRuntimeCall( |
5920 | callee: CGM.CreateRuntimeFunction( |
5921 | Ty: llvm::FunctionType::get(Result: IntTy, Params: {GenericVoidPtrTy, GenericVoidPtrTy}, |
5922 | isVarArg: false), |
5923 | Name: "__get_kernel_work_group_size_impl" ), |
5924 | args: {Kernel, Arg})); |
5925 | } |
5926 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { |
5927 | llvm::Type *GenericVoidPtrTy = Builder.getPtrTy( |
5928 | AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5929 | auto Info = |
5930 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 0)); |
5931 | Value *Kernel = |
5932 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5933 | Value *Arg = Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5934 | return RValue::get(V: EmitRuntimeCall( |
5935 | callee: CGM.CreateRuntimeFunction( |
5936 | Ty: llvm::FunctionType::get(Result: IntTy, Params: {GenericVoidPtrTy, GenericVoidPtrTy}, |
5937 | isVarArg: false), |
5938 | Name: "__get_kernel_preferred_work_group_size_multiple_impl" ), |
5939 | args: {Kernel, Arg})); |
5940 | } |
5941 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
5942 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { |
5943 | llvm::Type *GenericVoidPtrTy = Builder.getPtrTy( |
5944 | AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_generic)); |
5945 | LValue NDRangeL = EmitAggExprToLValue(E: E->getArg(Arg: 0)); |
5946 | llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(CGF&: *this); |
5947 | auto Info = |
5948 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(CGF&: *this, E: E->getArg(Arg: 1)); |
5949 | Value *Kernel = |
5950 | Builder.CreatePointerCast(V: Info.KernelHandle, DestTy: GenericVoidPtrTy); |
5951 | Value *Block = Builder.CreatePointerCast(V: Info.BlockArg, DestTy: GenericVoidPtrTy); |
5952 | const char *Name = |
5953 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange |
5954 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" |
5955 | : "__get_kernel_sub_group_count_for_ndrange_impl" ; |
5956 | return RValue::get(V: EmitRuntimeCall( |
5957 | callee: CGM.CreateRuntimeFunction( |
5958 | Ty: llvm::FunctionType::get( |
5959 | Result: IntTy, Params: {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, |
5960 | isVarArg: false), |
5961 | Name), |
5962 | args: {NDRange, Kernel, Block})); |
5963 | } |
5964 | case Builtin::BI__builtin_store_half: |
5965 | case Builtin::BI__builtin_store_halff: { |
5966 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 0)); |
5967 | Address Address = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
5968 | Value *HalfVal = Builder.CreateFPTrunc(V: Val, DestTy: Builder.getHalfTy()); |
5969 | Builder.CreateStore(Val: HalfVal, Addr: Address); |
5970 | return RValue::get(V: nullptr); |
5971 | } |
5972 | case Builtin::BI__builtin_load_half: { |
5973 | Address Address = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
5974 | Value *HalfVal = Builder.CreateLoad(Addr: Address); |
5975 | return RValue::get(V: Builder.CreateFPExt(V: HalfVal, DestTy: Builder.getDoubleTy())); |
5976 | } |
5977 | case Builtin::BI__builtin_load_halff: { |
5978 | Address Address = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
5979 | Value *HalfVal = Builder.CreateLoad(Addr: Address); |
5980 | return RValue::get(V: Builder.CreateFPExt(V: HalfVal, DestTy: Builder.getFloatTy())); |
5981 | } |
5982 | case Builtin::BI__builtin_printf: |
5983 | case Builtin::BIprintf: |
5984 | if (getTarget().getTriple().isNVPTX() || |
5985 | getTarget().getTriple().isAMDGCN() || |
5986 | (getTarget().getTriple().isSPIRV() && |
5987 | getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) { |
5988 | if (getLangOpts().OpenMPIsTargetDevice) |
5989 | return EmitOpenMPDevicePrintfCallExpr(E); |
5990 | if (getTarget().getTriple().isNVPTX()) |
5991 | return EmitNVPTXDevicePrintfCallExpr(E); |
5992 | if ((getTarget().getTriple().isAMDGCN() || |
5993 | getTarget().getTriple().isSPIRV()) && |
5994 | getLangOpts().HIP) |
5995 | return EmitAMDGPUDevicePrintfCallExpr(E); |
5996 | } |
5997 | |
5998 | break; |
5999 | case Builtin::BI__builtin_canonicalize: |
6000 | case Builtin::BI__builtin_canonicalizef: |
6001 | case Builtin::BI__builtin_canonicalizef16: |
6002 | case Builtin::BI__builtin_canonicalizel: |
6003 | return RValue::get( |
6004 | V: emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::canonicalize)); |
6005 | |
6006 | case Builtin::BI__builtin_thread_pointer: { |
6007 | if (!getContext().getTargetInfo().isTLSSupported()) |
6008 | CGM.ErrorUnsupported(S: E, Type: "__builtin_thread_pointer" ); |
6009 | // Fall through - it's already mapped to the intrinsic by ClangBuiltin. |
6010 | break; |
6011 | } |
6012 | case Builtin::BI__builtin_os_log_format: |
6013 | return emitBuiltinOSLogFormat(E: *E); |
6014 | |
6015 | case Builtin::BI__xray_customevent: { |
6016 | if (!ShouldXRayInstrumentFunction()) |
6017 | return RValue::getIgnored(); |
6018 | |
6019 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
6020 | K: XRayInstrKind::Custom)) |
6021 | return RValue::getIgnored(); |
6022 | |
6023 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
6024 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) |
6025 | return RValue::getIgnored(); |
6026 | |
6027 | Function *F = CGM.getIntrinsic(IID: Intrinsic::xray_customevent); |
6028 | auto FTy = F->getFunctionType(); |
6029 | auto Arg0 = E->getArg(Arg: 0); |
6030 | auto Arg0Val = EmitScalarExpr(E: Arg0); |
6031 | auto Arg0Ty = Arg0->getType(); |
6032 | auto PTy0 = FTy->getParamType(i: 0); |
6033 | if (PTy0 != Arg0Val->getType()) { |
6034 | if (Arg0Ty->isArrayType()) |
6035 | Arg0Val = EmitArrayToPointerDecay(Array: Arg0).emitRawPointer(CGF&: *this); |
6036 | else |
6037 | Arg0Val = Builder.CreatePointerCast(V: Arg0Val, DestTy: PTy0); |
6038 | } |
6039 | auto Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
6040 | auto PTy1 = FTy->getParamType(i: 1); |
6041 | if (PTy1 != Arg1->getType()) |
6042 | Arg1 = Builder.CreateTruncOrBitCast(V: Arg1, DestTy: PTy1); |
6043 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: {Arg0Val, Arg1})); |
6044 | } |
6045 | |
6046 | case Builtin::BI__xray_typedevent: { |
6047 | // TODO: There should be a way to always emit events even if the current |
6048 | // function is not instrumented. Losing events in a stream can cripple |
6049 | // a trace. |
6050 | if (!ShouldXRayInstrumentFunction()) |
6051 | return RValue::getIgnored(); |
6052 | |
6053 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
6054 | K: XRayInstrKind::Typed)) |
6055 | return RValue::getIgnored(); |
6056 | |
6057 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
6058 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) |
6059 | return RValue::getIgnored(); |
6060 | |
6061 | Function *F = CGM.getIntrinsic(IID: Intrinsic::xray_typedevent); |
6062 | auto FTy = F->getFunctionType(); |
6063 | auto Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
6064 | auto PTy0 = FTy->getParamType(i: 0); |
6065 | if (PTy0 != Arg0->getType()) |
6066 | Arg0 = Builder.CreateTruncOrBitCast(V: Arg0, DestTy: PTy0); |
6067 | auto Arg1 = E->getArg(Arg: 1); |
6068 | auto Arg1Val = EmitScalarExpr(E: Arg1); |
6069 | auto Arg1Ty = Arg1->getType(); |
6070 | auto PTy1 = FTy->getParamType(i: 1); |
6071 | if (PTy1 != Arg1Val->getType()) { |
6072 | if (Arg1Ty->isArrayType()) |
6073 | Arg1Val = EmitArrayToPointerDecay(Array: Arg1).emitRawPointer(CGF&: *this); |
6074 | else |
6075 | Arg1Val = Builder.CreatePointerCast(V: Arg1Val, DestTy: PTy1); |
6076 | } |
6077 | auto Arg2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
6078 | auto PTy2 = FTy->getParamType(i: 2); |
6079 | if (PTy2 != Arg2->getType()) |
6080 | Arg2 = Builder.CreateTruncOrBitCast(V: Arg2, DestTy: PTy2); |
6081 | return RValue::get(V: Builder.CreateCall(Callee: F, Args: {Arg0, Arg1Val, Arg2})); |
6082 | } |
6083 | |
6084 | case Builtin::BI__builtin_ms_va_start: |
6085 | case Builtin::BI__builtin_ms_va_end: |
6086 | return RValue::get( |
6087 | V: EmitVAStartEnd(ArgValue: EmitMSVAListRef(E: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this), |
6088 | IsStart: BuiltinID == Builtin::BI__builtin_ms_va_start)); |
6089 | |
6090 | case Builtin::BI__builtin_ms_va_copy: { |
6091 | // Lower this manually. We can't reliably determine whether or not any |
6092 | // given va_copy() is for a Win64 va_list from the calling convention |
6093 | // alone, because it's legal to do this from a System V ABI function. |
6094 | // With opaque pointer types, we won't have enough information in LLVM |
6095 | // IR to determine this from the argument types, either. Best to do it |
6096 | // now, while we have enough information. |
6097 | Address DestAddr = EmitMSVAListRef(E: E->getArg(Arg: 0)); |
6098 | Address SrcAddr = EmitMSVAListRef(E: E->getArg(Arg: 1)); |
6099 | |
6100 | DestAddr = DestAddr.withElementType(ElemTy: Int8PtrTy); |
6101 | SrcAddr = SrcAddr.withElementType(ElemTy: Int8PtrTy); |
6102 | |
6103 | Value *ArgPtr = Builder.CreateLoad(Addr: SrcAddr, Name: "ap.val" ); |
6104 | return RValue::get(V: Builder.CreateStore(Val: ArgPtr, Addr: DestAddr)); |
6105 | } |
6106 | |
6107 | case Builtin::BI__builtin_get_device_side_mangled_name: { |
6108 | auto Name = CGM.getCUDARuntime().getDeviceSideName( |
6109 | ND: cast<DeclRefExpr>(Val: E->getArg(Arg: 0)->IgnoreImpCasts())->getDecl()); |
6110 | auto Str = CGM.GetAddrOfConstantCString(Str: Name, GlobalName: "" ); |
6111 | return RValue::get(V: Str.getPointer()); |
6112 | } |
6113 | } |
6114 | |
6115 | // If this is an alias for a lib function (e.g. __builtin_sin), emit |
6116 | // the call using the normal call path, but using the unmangled |
6117 | // version of the function name. |
6118 | if (getContext().BuiltinInfo.isLibFunction(ID: BuiltinID)) |
6119 | return emitLibraryCall(CGF&: *this, FD, E, |
6120 | calleeValue: CGM.getBuiltinLibFunction(FD, BuiltinID)); |
6121 | |
6122 | // If this is a predefined lib function (e.g. malloc), emit the call |
6123 | // using exactly the normal call path. |
6124 | if (getContext().BuiltinInfo.isPredefinedLibFunction(ID: BuiltinID)) |
6125 | return emitLibraryCall(CGF&: *this, FD, E, calleeValue: CGM.getRawFunctionPointer(GD: FD)); |
6126 | |
6127 | // Check that a call to a target specific builtin has the correct target |
6128 | // features. |
6129 | // This is down here to avoid non-target specific builtins, however, if |
6130 | // generic builtins start to require generic target features then we |
6131 | // can move this up to the beginning of the function. |
6132 | checkTargetFeatures(E, TargetDecl: FD); |
6133 | |
6134 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(ID: BuiltinID)) |
6135 | LargestVectorWidth = std::max(a: LargestVectorWidth, b: VectorWidth); |
6136 | |
6137 | // See if we have a target specific intrinsic. |
6138 | StringRef Name = getContext().BuiltinInfo.getName(ID: BuiltinID); |
6139 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; |
6140 | StringRef Prefix = |
6141 | llvm::Triple::getArchTypePrefix(Kind: getTarget().getTriple().getArch()); |
6142 | if (!Prefix.empty()) { |
6143 | IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix: Prefix.data(), BuiltinName: Name); |
6144 | if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" && |
6145 | getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA) |
6146 | IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix: "amdgcn" , BuiltinName: Name); |
6147 | // NOTE we don't need to perform a compatibility flag check here since the |
6148 | // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the |
6149 | // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. |
6150 | if (IntrinsicID == Intrinsic::not_intrinsic) |
6151 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix: Prefix.data(), BuiltinName: Name); |
6152 | } |
6153 | |
6154 | if (IntrinsicID != Intrinsic::not_intrinsic) { |
6155 | SmallVector<Value*, 16> Args; |
6156 | |
6157 | // Find out if any arguments are required to be integer constant |
6158 | // expressions. |
6159 | unsigned ICEArguments = 0; |
6160 | ASTContext::GetBuiltinTypeError Error; |
6161 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
6162 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
6163 | |
6164 | Function *F = CGM.getIntrinsic(IID: IntrinsicID); |
6165 | llvm::FunctionType *FTy = F->getFunctionType(); |
6166 | |
6167 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
6168 | Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E); |
6169 | // If the intrinsic arg type is different from the builtin arg type |
6170 | // we need to do a bit cast. |
6171 | llvm::Type *PTy = FTy->getParamType(i); |
6172 | if (PTy != ArgValue->getType()) { |
6173 | // XXX - vector of pointers? |
6174 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(Val: PTy)) { |
6175 | if (PtrTy->getAddressSpace() != |
6176 | ArgValue->getType()->getPointerAddressSpace()) { |
6177 | ArgValue = Builder.CreateAddrSpaceCast( |
6178 | V: ArgValue, DestTy: llvm::PointerType::get(C&: getLLVMContext(), |
6179 | AddressSpace: PtrTy->getAddressSpace())); |
6180 | } |
6181 | } |
6182 | |
6183 | // Cast vector type (e.g., v256i32) to x86_amx, this only happen |
6184 | // in amx intrinsics. |
6185 | if (PTy->isX86_AMXTy()) |
6186 | ArgValue = Builder.CreateIntrinsic(ID: Intrinsic::x86_cast_vector_to_tile, |
6187 | Types: {ArgValue->getType()}, Args: {ArgValue}); |
6188 | else |
6189 | ArgValue = Builder.CreateBitCast(V: ArgValue, DestTy: PTy); |
6190 | } |
6191 | |
6192 | Args.push_back(Elt: ArgValue); |
6193 | } |
6194 | |
6195 | Value *V = Builder.CreateCall(Callee: F, Args); |
6196 | QualType BuiltinRetType = E->getType(); |
6197 | |
6198 | llvm::Type *RetTy = VoidTy; |
6199 | if (!BuiltinRetType->isVoidType()) |
6200 | RetTy = ConvertType(T: BuiltinRetType); |
6201 | |
6202 | if (RetTy != V->getType()) { |
6203 | // XXX - vector of pointers? |
6204 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(Val: RetTy)) { |
6205 | if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { |
6206 | V = Builder.CreateAddrSpaceCast( |
6207 | V, DestTy: llvm::PointerType::get(C&: getLLVMContext(), |
6208 | AddressSpace: PtrTy->getAddressSpace())); |
6209 | } |
6210 | } |
6211 | |
6212 | // Cast x86_amx to vector type (e.g., v256i32), this only happen |
6213 | // in amx intrinsics. |
6214 | if (V->getType()->isX86_AMXTy()) |
6215 | V = Builder.CreateIntrinsic(ID: Intrinsic::x86_cast_tile_to_vector, Types: {RetTy}, |
6216 | Args: {V}); |
6217 | else |
6218 | V = Builder.CreateBitCast(V, DestTy: RetTy); |
6219 | } |
6220 | |
6221 | if (RetTy->isVoidTy()) |
6222 | return RValue::get(V: nullptr); |
6223 | |
6224 | return RValue::get(V); |
6225 | } |
6226 | |
6227 | // Some target-specific builtins can have aggregate return values, e.g. |
6228 | // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force |
6229 | // ReturnValue to be non-null, so that the target-specific emission code can |
6230 | // always just emit into it. |
6231 | TypeEvaluationKind EvalKind = getEvaluationKind(T: E->getType()); |
6232 | if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { |
6233 | Address DestPtr = CreateMemTemp(T: E->getType(), Name: "agg.tmp" ); |
6234 | ReturnValue = ReturnValueSlot(DestPtr, false); |
6235 | } |
6236 | |
6237 | // Now see if we can emit a target-specific builtin. |
6238 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { |
6239 | switch (EvalKind) { |
6240 | case TEK_Scalar: |
6241 | if (V->getType()->isVoidTy()) |
6242 | return RValue::get(V: nullptr); |
6243 | return RValue::get(V); |
6244 | case TEK_Aggregate: |
6245 | return RValue::getAggregate(addr: ReturnValue.getAddress(), |
6246 | isVolatile: ReturnValue.isVolatile()); |
6247 | case TEK_Complex: |
6248 | llvm_unreachable("No current target builtin returns complex" ); |
6249 | } |
6250 | llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr" ); |
6251 | } |
6252 | |
6253 | // EmitHLSLBuiltinExpr will check getLangOpts().HLSL |
6254 | if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E)) |
6255 | return RValue::get(V); |
6256 | |
6257 | if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice) |
6258 | return EmitHipStdParUnsupportedBuiltin(CGF: this, FD); |
6259 | |
6260 | ErrorUnsupported(S: E, Type: "builtin function" ); |
6261 | |
6262 | // Unknown builtin, for now just dump it out and return undef. |
6263 | return GetUndefRValue(Ty: E->getType()); |
6264 | } |
6265 | |
6266 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, |
6267 | unsigned BuiltinID, const CallExpr *E, |
6268 | ReturnValueSlot ReturnValue, |
6269 | llvm::Triple::ArchType Arch) { |
6270 | // When compiling in HipStdPar mode we have to be conservative in rejecting |
6271 | // target specific features in the FE, and defer the possible error to the |
6272 | // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is |
6273 | // referenced by an accelerator executable function, we emit an error. |
6274 | // Returning nullptr here leads to the builtin being handled in |
6275 | // EmitStdParUnsupportedBuiltin. |
6276 | if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice && |
6277 | Arch != CGF->getTarget().getTriple().getArch()) |
6278 | return nullptr; |
6279 | |
6280 | switch (Arch) { |
6281 | case llvm::Triple::arm: |
6282 | case llvm::Triple::armeb: |
6283 | case llvm::Triple::thumb: |
6284 | case llvm::Triple::thumbeb: |
6285 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch); |
6286 | case llvm::Triple::aarch64: |
6287 | case llvm::Triple::aarch64_32: |
6288 | case llvm::Triple::aarch64_be: |
6289 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); |
6290 | case llvm::Triple::bpfeb: |
6291 | case llvm::Triple::bpfel: |
6292 | return CGF->EmitBPFBuiltinExpr(BuiltinID, E); |
6293 | case llvm::Triple::x86: |
6294 | case llvm::Triple::x86_64: |
6295 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); |
6296 | case llvm::Triple::ppc: |
6297 | case llvm::Triple::ppcle: |
6298 | case llvm::Triple::ppc64: |
6299 | case llvm::Triple::ppc64le: |
6300 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); |
6301 | case llvm::Triple::r600: |
6302 | case llvm::Triple::amdgcn: |
6303 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
6304 | case llvm::Triple::systemz: |
6305 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); |
6306 | case llvm::Triple::nvptx: |
6307 | case llvm::Triple::nvptx64: |
6308 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); |
6309 | case llvm::Triple::wasm32: |
6310 | case llvm::Triple::wasm64: |
6311 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); |
6312 | case llvm::Triple::hexagon: |
6313 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); |
6314 | case llvm::Triple::riscv32: |
6315 | case llvm::Triple::riscv64: |
6316 | return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue); |
6317 | case llvm::Triple::spirv64: |
6318 | if (CGF->getTarget().getTriple().getOS() != llvm::Triple::OSType::AMDHSA) |
6319 | return nullptr; |
6320 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
6321 | default: |
6322 | return nullptr; |
6323 | } |
6324 | } |
6325 | |
6326 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, |
6327 | const CallExpr *E, |
6328 | ReturnValueSlot ReturnValue) { |
6329 | if (getContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID)) { |
6330 | assert(getContext().getAuxTargetInfo() && "Missing aux target info" ); |
6331 | return EmitTargetArchBuiltinExpr( |
6332 | CGF: this, BuiltinID: getContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID), E, |
6333 | ReturnValue, Arch: getContext().getAuxTargetInfo()->getTriple().getArch()); |
6334 | } |
6335 | |
6336 | return EmitTargetArchBuiltinExpr(CGF: this, BuiltinID, E, ReturnValue, |
6337 | Arch: getTarget().getTriple().getArch()); |
6338 | } |
6339 | |
6340 | static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF, |
6341 | NeonTypeFlags TypeFlags, |
6342 | bool HasLegalHalfType = true, |
6343 | bool V1Ty = false, |
6344 | bool AllowBFloatArgsAndRet = true) { |
6345 | int IsQuad = TypeFlags.isQuad(); |
6346 | switch (TypeFlags.getEltType()) { |
6347 | case NeonTypeFlags::Int8: |
6348 | case NeonTypeFlags::Poly8: |
6349 | return llvm::FixedVectorType::get(ElementType: CGF->Int8Ty, NumElts: V1Ty ? 1 : (8 << IsQuad)); |
6350 | case NeonTypeFlags::Int16: |
6351 | case NeonTypeFlags::Poly16: |
6352 | return llvm::FixedVectorType::get(ElementType: CGF->Int16Ty, NumElts: V1Ty ? 1 : (4 << IsQuad)); |
6353 | case NeonTypeFlags::BFloat16: |
6354 | if (AllowBFloatArgsAndRet) |
6355 | return llvm::FixedVectorType::get(ElementType: CGF->BFloatTy, NumElts: V1Ty ? 1 : (4 << IsQuad)); |
6356 | else |
6357 | return llvm::FixedVectorType::get(ElementType: CGF->Int16Ty, NumElts: V1Ty ? 1 : (4 << IsQuad)); |
6358 | case NeonTypeFlags::Float16: |
6359 | if (HasLegalHalfType) |
6360 | return llvm::FixedVectorType::get(ElementType: CGF->HalfTy, NumElts: V1Ty ? 1 : (4 << IsQuad)); |
6361 | else |
6362 | return llvm::FixedVectorType::get(ElementType: CGF->Int16Ty, NumElts: V1Ty ? 1 : (4 << IsQuad)); |
6363 | case NeonTypeFlags::Int32: |
6364 | return llvm::FixedVectorType::get(ElementType: CGF->Int32Ty, NumElts: V1Ty ? 1 : (2 << IsQuad)); |
6365 | case NeonTypeFlags::Int64: |
6366 | case NeonTypeFlags::Poly64: |
6367 | return llvm::FixedVectorType::get(ElementType: CGF->Int64Ty, NumElts: V1Ty ? 1 : (1 << IsQuad)); |
6368 | case NeonTypeFlags::Poly128: |
6369 | // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. |
6370 | // There is a lot of i128 and f128 API missing. |
6371 | // so we use v16i8 to represent poly128 and get pattern matched. |
6372 | return llvm::FixedVectorType::get(ElementType: CGF->Int8Ty, NumElts: 16); |
6373 | case NeonTypeFlags::Float32: |
6374 | return llvm::FixedVectorType::get(ElementType: CGF->FloatTy, NumElts: V1Ty ? 1 : (2 << IsQuad)); |
6375 | case NeonTypeFlags::Float64: |
6376 | return llvm::FixedVectorType::get(ElementType: CGF->DoubleTy, NumElts: V1Ty ? 1 : (1 << IsQuad)); |
6377 | } |
6378 | llvm_unreachable("Unknown vector element type!" ); |
6379 | } |
6380 | |
6381 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, |
6382 | NeonTypeFlags IntTypeFlags) { |
6383 | int IsQuad = IntTypeFlags.isQuad(); |
6384 | switch (IntTypeFlags.getEltType()) { |
6385 | case NeonTypeFlags::Int16: |
6386 | return llvm::FixedVectorType::get(ElementType: CGF->HalfTy, NumElts: (4 << IsQuad)); |
6387 | case NeonTypeFlags::Int32: |
6388 | return llvm::FixedVectorType::get(ElementType: CGF->FloatTy, NumElts: (2 << IsQuad)); |
6389 | case NeonTypeFlags::Int64: |
6390 | return llvm::FixedVectorType::get(ElementType: CGF->DoubleTy, NumElts: (1 << IsQuad)); |
6391 | default: |
6392 | llvm_unreachable("Type can't be converted to floating-point!" ); |
6393 | } |
6394 | } |
6395 | |
6396 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, |
6397 | const ElementCount &Count) { |
6398 | Value *SV = llvm::ConstantVector::getSplat(EC: Count, Elt: C); |
6399 | return Builder.CreateShuffleVector(V1: V, V2: V, Mask: SV, Name: "lane" ); |
6400 | } |
6401 | |
6402 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { |
6403 | ElementCount EC = cast<llvm::VectorType>(Val: V->getType())->getElementCount(); |
6404 | return EmitNeonSplat(V, C, Count: EC); |
6405 | } |
6406 | |
6407 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, |
6408 | const char *name, |
6409 | unsigned shift, bool rightshift) { |
6410 | unsigned j = 0; |
6411 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
6412 | ai != ae; ++ai, ++j) { |
6413 | if (F->isConstrainedFPIntrinsic()) |
6414 | if (ai->getType()->isMetadataTy()) |
6415 | continue; |
6416 | if (shift > 0 && shift == j) |
6417 | Ops[j] = EmitNeonShiftVector(V: Ops[j], Ty: ai->getType(), negateForRightShift: rightshift); |
6418 | else |
6419 | Ops[j] = Builder.CreateBitCast(V: Ops[j], DestTy: ai->getType(), Name: name); |
6420 | } |
6421 | |
6422 | if (F->isConstrainedFPIntrinsic()) |
6423 | return Builder.CreateConstrainedFPCall(Callee: F, Args: Ops, Name: name); |
6424 | else |
6425 | return Builder.CreateCall(Callee: F, Args: Ops, Name: name); |
6426 | } |
6427 | |
6428 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, |
6429 | bool neg) { |
6430 | int SV = cast<ConstantInt>(Val: V)->getSExtValue(); |
6431 | return ConstantInt::get(Ty, V: neg ? -SV : SV); |
6432 | } |
6433 | |
6434 | // Right-shift a vector by a constant. |
6435 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, |
6436 | llvm::Type *Ty, bool usgn, |
6437 | const char *name) { |
6438 | llvm::VectorType *VTy = cast<llvm::VectorType>(Val: Ty); |
6439 | |
6440 | int ShiftAmt = cast<ConstantInt>(Val: Shift)->getSExtValue(); |
6441 | int EltSize = VTy->getScalarSizeInBits(); |
6442 | |
6443 | Vec = Builder.CreateBitCast(V: Vec, DestTy: Ty); |
6444 | |
6445 | // lshr/ashr are undefined when the shift amount is equal to the vector |
6446 | // element size. |
6447 | if (ShiftAmt == EltSize) { |
6448 | if (usgn) { |
6449 | // Right-shifting an unsigned value by its size yields 0. |
6450 | return llvm::ConstantAggregateZero::get(Ty: VTy); |
6451 | } else { |
6452 | // Right-shifting a signed value by its size is equivalent |
6453 | // to a shift of size-1. |
6454 | --ShiftAmt; |
6455 | Shift = ConstantInt::get(Ty: VTy->getElementType(), V: ShiftAmt); |
6456 | } |
6457 | } |
6458 | |
6459 | Shift = EmitNeonShiftVector(V: Shift, Ty, neg: false); |
6460 | if (usgn) |
6461 | return Builder.CreateLShr(LHS: Vec, RHS: Shift, Name: name); |
6462 | else |
6463 | return Builder.CreateAShr(LHS: Vec, RHS: Shift, Name: name); |
6464 | } |
6465 | |
6466 | enum { |
6467 | AddRetType = (1 << 0), |
6468 | Add1ArgType = (1 << 1), |
6469 | Add2ArgTypes = (1 << 2), |
6470 | |
6471 | VectorizeRetType = (1 << 3), |
6472 | VectorizeArgTypes = (1 << 4), |
6473 | |
6474 | InventFloatType = (1 << 5), |
6475 | UnsignedAlts = (1 << 6), |
6476 | |
6477 | Use64BitVectors = (1 << 7), |
6478 | Use128BitVectors = (1 << 8), |
6479 | |
6480 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, |
6481 | VectorRet = AddRetType | VectorizeRetType, |
6482 | VectorRetGetArgs01 = |
6483 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, |
6484 | FpCmpzModifiers = |
6485 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType |
6486 | }; |
6487 | |
6488 | namespace { |
6489 | struct ARMVectorIntrinsicInfo { |
6490 | const char *NameHint; |
6491 | unsigned BuiltinID; |
6492 | unsigned LLVMIntrinsic; |
6493 | unsigned AltLLVMIntrinsic; |
6494 | uint64_t TypeModifier; |
6495 | |
6496 | bool operator<(unsigned RHSBuiltinID) const { |
6497 | return BuiltinID < RHSBuiltinID; |
6498 | } |
6499 | bool operator<(const ARMVectorIntrinsicInfo &TE) const { |
6500 | return BuiltinID < TE.BuiltinID; |
6501 | } |
6502 | }; |
6503 | } // end anonymous namespace |
6504 | |
6505 | #define NEONMAP0(NameBase) \ |
6506 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } |
6507 | |
6508 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
6509 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
6510 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } |
6511 | |
6512 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ |
6513 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
6514 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ |
6515 | TypeModifier } |
6516 | |
6517 | static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = { |
6518 | NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0), |
6519 | NEONMAP0(splat_lane_v), |
6520 | NEONMAP0(splat_laneq_v), |
6521 | NEONMAP0(splatq_lane_v), |
6522 | NEONMAP0(splatq_laneq_v), |
6523 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
6524 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
6525 | NEONMAP1(vabs_v, arm_neon_vabs, 0), |
6526 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), |
6527 | NEONMAP0(vadd_v), |
6528 | NEONMAP0(vaddhn_v), |
6529 | NEONMAP0(vaddq_v), |
6530 | NEONMAP1(vaesdq_u8, arm_neon_aesd, 0), |
6531 | NEONMAP1(vaeseq_u8, arm_neon_aese, 0), |
6532 | NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0), |
6533 | NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0), |
6534 | NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0), |
6535 | NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0), |
6536 | NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0), |
6537 | NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0), |
6538 | NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0), |
6539 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), |
6540 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), |
6541 | NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType), |
6542 | NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType), |
6543 | NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType), |
6544 | NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType), |
6545 | NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType), |
6546 | NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType), |
6547 | NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType), |
6548 | NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType), |
6549 | NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType), |
6550 | NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType), |
6551 | NEONMAP1(vcage_v, arm_neon_vacge, 0), |
6552 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), |
6553 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), |
6554 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), |
6555 | NEONMAP1(vcale_v, arm_neon_vacge, 0), |
6556 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), |
6557 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), |
6558 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), |
6559 | NEONMAP0(vceqz_v), |
6560 | NEONMAP0(vceqzq_v), |
6561 | NEONMAP0(vcgez_v), |
6562 | NEONMAP0(vcgezq_v), |
6563 | NEONMAP0(vcgtz_v), |
6564 | NEONMAP0(vcgtzq_v), |
6565 | NEONMAP0(vclez_v), |
6566 | NEONMAP0(vclezq_v), |
6567 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), |
6568 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), |
6569 | NEONMAP0(vcltz_v), |
6570 | NEONMAP0(vcltzq_v), |
6571 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
6572 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
6573 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
6574 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
6575 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), |
6576 | NEONMAP0(vcvt_f16_s16), |
6577 | NEONMAP0(vcvt_f16_u16), |
6578 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), |
6579 | NEONMAP0(vcvt_f32_v), |
6580 | NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0), |
6581 | NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0), |
6582 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
6583 | NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0), |
6584 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
6585 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
6586 | NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0), |
6587 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
6588 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
6589 | NEONMAP0(vcvt_s16_f16), |
6590 | NEONMAP0(vcvt_s32_v), |
6591 | NEONMAP0(vcvt_s64_v), |
6592 | NEONMAP0(vcvt_u16_f16), |
6593 | NEONMAP0(vcvt_u32_v), |
6594 | NEONMAP0(vcvt_u64_v), |
6595 | NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0), |
6596 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), |
6597 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), |
6598 | NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0), |
6599 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), |
6600 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), |
6601 | NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0), |
6602 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), |
6603 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), |
6604 | NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0), |
6605 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), |
6606 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), |
6607 | NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), |
6608 | NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0), |
6609 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), |
6610 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), |
6611 | NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0), |
6612 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), |
6613 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), |
6614 | NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0), |
6615 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), |
6616 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), |
6617 | NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0), |
6618 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), |
6619 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), |
6620 | NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0), |
6621 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), |
6622 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), |
6623 | NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0), |
6624 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), |
6625 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), |
6626 | NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0), |
6627 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), |
6628 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), |
6629 | NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0), |
6630 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), |
6631 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), |
6632 | NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0), |
6633 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), |
6634 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), |
6635 | NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0), |
6636 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), |
6637 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), |
6638 | NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0), |
6639 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), |
6640 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), |
6641 | NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0), |
6642 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), |
6643 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), |
6644 | NEONMAP0(vcvtq_f16_s16), |
6645 | NEONMAP0(vcvtq_f16_u16), |
6646 | NEONMAP0(vcvtq_f32_v), |
6647 | NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0), |
6648 | NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0), |
6649 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
6650 | NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0), |
6651 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
6652 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
6653 | NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0), |
6654 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
6655 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
6656 | NEONMAP0(vcvtq_s16_f16), |
6657 | NEONMAP0(vcvtq_s32_v), |
6658 | NEONMAP0(vcvtq_s64_v), |
6659 | NEONMAP0(vcvtq_u16_f16), |
6660 | NEONMAP0(vcvtq_u32_v), |
6661 | NEONMAP0(vcvtq_u64_v), |
6662 | NEONMAP1(vdot_s32, arm_neon_sdot, 0), |
6663 | NEONMAP1(vdot_u32, arm_neon_udot, 0), |
6664 | NEONMAP1(vdotq_s32, arm_neon_sdot, 0), |
6665 | NEONMAP1(vdotq_u32, arm_neon_udot, 0), |
6666 | NEONMAP0(vext_v), |
6667 | NEONMAP0(vextq_v), |
6668 | NEONMAP0(vfma_v), |
6669 | NEONMAP0(vfmaq_v), |
6670 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
6671 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
6672 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
6673 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
6674 | NEONMAP0(vld1_dup_v), |
6675 | NEONMAP1(vld1_v, arm_neon_vld1, 0), |
6676 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), |
6677 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), |
6678 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), |
6679 | NEONMAP0(vld1q_dup_v), |
6680 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), |
6681 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), |
6682 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), |
6683 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), |
6684 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), |
6685 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), |
6686 | NEONMAP1(vld2_v, arm_neon_vld2, 0), |
6687 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), |
6688 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), |
6689 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), |
6690 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), |
6691 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), |
6692 | NEONMAP1(vld3_v, arm_neon_vld3, 0), |
6693 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), |
6694 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), |
6695 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), |
6696 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), |
6697 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), |
6698 | NEONMAP1(vld4_v, arm_neon_vld4, 0), |
6699 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), |
6700 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), |
6701 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), |
6702 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
6703 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), |
6704 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), |
6705 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
6706 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
6707 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), |
6708 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), |
6709 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
6710 | NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0), |
6711 | NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0), |
6712 | NEONMAP0(vmovl_v), |
6713 | NEONMAP0(vmovn_v), |
6714 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), |
6715 | NEONMAP0(vmull_v), |
6716 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), |
6717 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
6718 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
6719 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), |
6720 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
6721 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
6722 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), |
6723 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), |
6724 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), |
6725 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), |
6726 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), |
6727 | NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
6728 | NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
6729 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), |
6730 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), |
6731 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), |
6732 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), |
6733 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), |
6734 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), |
6735 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), |
6736 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), |
6737 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), |
6738 | NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType), |
6739 | NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType), |
6740 | NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType), |
6741 | NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType), |
6742 | NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType), |
6743 | NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType), |
6744 | NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType), |
6745 | NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType), |
6746 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), |
6747 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), |
6748 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
6749 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
6750 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
6751 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
6752 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
6753 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
6754 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), |
6755 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), |
6756 | NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
6757 | NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
6758 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), |
6759 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
6760 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
6761 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), |
6762 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), |
6763 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
6764 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
6765 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), |
6766 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), |
6767 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), |
6768 | NEONMAP0(vrndi_v), |
6769 | NEONMAP0(vrndiq_v), |
6770 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), |
6771 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), |
6772 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), |
6773 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), |
6774 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), |
6775 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), |
6776 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), |
6777 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), |
6778 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), |
6779 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
6780 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
6781 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
6782 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
6783 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
6784 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
6785 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), |
6786 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), |
6787 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), |
6788 | NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0), |
6789 | NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0), |
6790 | NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0), |
6791 | NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0), |
6792 | NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0), |
6793 | NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0), |
6794 | NEONMAP0(vshl_n_v), |
6795 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
6796 | NEONMAP0(vshll_n_v), |
6797 | NEONMAP0(vshlq_n_v), |
6798 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
6799 | NEONMAP0(vshr_n_v), |
6800 | NEONMAP0(vshrn_n_v), |
6801 | NEONMAP0(vshrq_n_v), |
6802 | NEONMAP1(vst1_v, arm_neon_vst1, 0), |
6803 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), |
6804 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), |
6805 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), |
6806 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), |
6807 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), |
6808 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), |
6809 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), |
6810 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), |
6811 | NEONMAP1(vst2_v, arm_neon_vst2, 0), |
6812 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), |
6813 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), |
6814 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), |
6815 | NEONMAP1(vst3_v, arm_neon_vst3, 0), |
6816 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), |
6817 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), |
6818 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), |
6819 | NEONMAP1(vst4_v, arm_neon_vst4, 0), |
6820 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), |
6821 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), |
6822 | NEONMAP0(vsubhn_v), |
6823 | NEONMAP0(vtrn_v), |
6824 | NEONMAP0(vtrnq_v), |
6825 | NEONMAP0(vtst_v), |
6826 | NEONMAP0(vtstq_v), |
6827 | NEONMAP1(vusdot_s32, arm_neon_usdot, 0), |
6828 | NEONMAP1(vusdotq_s32, arm_neon_usdot, 0), |
6829 | NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0), |
6830 | NEONMAP0(vuzp_v), |
6831 | NEONMAP0(vuzpq_v), |
6832 | NEONMAP0(vzip_v), |
6833 | NEONMAP0(vzipq_v) |
6834 | }; |
6835 | |
6836 | static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { |
6837 | NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0), |
6838 | NEONMAP0(splat_lane_v), |
6839 | NEONMAP0(splat_laneq_v), |
6840 | NEONMAP0(splatq_lane_v), |
6841 | NEONMAP0(splatq_laneq_v), |
6842 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), |
6843 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), |
6844 | NEONMAP0(vadd_v), |
6845 | NEONMAP0(vaddhn_v), |
6846 | NEONMAP0(vaddq_p128), |
6847 | NEONMAP0(vaddq_v), |
6848 | NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0), |
6849 | NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0), |
6850 | NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0), |
6851 | NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0), |
6852 | NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6853 | NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6854 | NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6855 | NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6856 | NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6857 | NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6858 | NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6859 | NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6860 | NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0), |
6861 | NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0), |
6862 | NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0), |
6863 | NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0), |
6864 | NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0), |
6865 | NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType), |
6866 | NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType), |
6867 | NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType), |
6868 | NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType), |
6869 | NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType), |
6870 | NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType), |
6871 | NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType), |
6872 | NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType), |
6873 | NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType), |
6874 | NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType), |
6875 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), |
6876 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), |
6877 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), |
6878 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), |
6879 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), |
6880 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), |
6881 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), |
6882 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), |
6883 | NEONMAP0(vceqz_v), |
6884 | NEONMAP0(vceqzq_v), |
6885 | NEONMAP0(vcgez_v), |
6886 | NEONMAP0(vcgezq_v), |
6887 | NEONMAP0(vcgtz_v), |
6888 | NEONMAP0(vcgtzq_v), |
6889 | NEONMAP0(vclez_v), |
6890 | NEONMAP0(vclezq_v), |
6891 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), |
6892 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), |
6893 | NEONMAP0(vcltz_v), |
6894 | NEONMAP0(vcltzq_v), |
6895 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
6896 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
6897 | NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType), |
6898 | NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType), |
6899 | NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType), |
6900 | NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType), |
6901 | NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType), |
6902 | NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType), |
6903 | NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType), |
6904 | NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType), |
6905 | NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType), |
6906 | NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType), |
6907 | NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType), |
6908 | NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType), |
6909 | NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType), |
6910 | NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType), |
6911 | NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType), |
6912 | NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType), |
6913 | NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType), |
6914 | NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType), |
6915 | NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType), |
6916 | NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType), |
6917 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
6918 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
6919 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), |
6920 | NEONMAP0(vcvt_f16_s16), |
6921 | NEONMAP0(vcvt_f16_u16), |
6922 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), |
6923 | NEONMAP0(vcvt_f32_v), |
6924 | NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0), |
6925 | NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0), |
6926 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6927 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6928 | NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0), |
6929 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
6930 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
6931 | NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0), |
6932 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
6933 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
6934 | NEONMAP0(vcvtq_f16_s16), |
6935 | NEONMAP0(vcvtq_f16_u16), |
6936 | NEONMAP0(vcvtq_f32_v), |
6937 | NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0), |
6938 | NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0), |
6939 | NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0), |
6940 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6941 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6942 | NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0), |
6943 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
6944 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
6945 | NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0), |
6946 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
6947 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
6948 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), |
6949 | NEONMAP1(vdot_s32, aarch64_neon_sdot, 0), |
6950 | NEONMAP1(vdot_u32, aarch64_neon_udot, 0), |
6951 | NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0), |
6952 | NEONMAP1(vdotq_u32, aarch64_neon_udot, 0), |
6953 | NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6954 | NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6955 | NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6956 | NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6957 | NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6958 | NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6959 | NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6960 | NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6961 | NEONMAP0(vext_v), |
6962 | NEONMAP0(vextq_v), |
6963 | NEONMAP0(vfma_v), |
6964 | NEONMAP0(vfmaq_v), |
6965 | NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0), |
6966 | NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0), |
6967 | NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0), |
6968 | NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0), |
6969 | NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0), |
6970 | NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0), |
6971 | NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0), |
6972 | NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0), |
6973 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
6974 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
6975 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
6976 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
6977 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), |
6978 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), |
6979 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), |
6980 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), |
6981 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), |
6982 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), |
6983 | NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0), |
6984 | NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0), |
6985 | NEONMAP0(vmovl_v), |
6986 | NEONMAP0(vmovn_v), |
6987 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), |
6988 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), |
6989 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), |
6990 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
6991 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
6992 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), |
6993 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), |
6994 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), |
6995 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
6996 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
6997 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), |
6998 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), |
6999 | NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), |
7000 | NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
7001 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), |
7002 | NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), |
7003 | NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
7004 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), |
7005 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), |
7006 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), |
7007 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), |
7008 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), |
7009 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), |
7010 | NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType), |
7011 | NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType), |
7012 | NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType), |
7013 | NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType), |
7014 | NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType), |
7015 | NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType), |
7016 | NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType), |
7017 | NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType), |
7018 | NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
7019 | NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
7020 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), |
7021 | NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
7022 | NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
7023 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), |
7024 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
7025 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
7026 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), |
7027 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
7028 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), |
7029 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
7030 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), |
7031 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), |
7032 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
7033 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
7034 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), |
7035 | NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0), |
7036 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
7037 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
7038 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), |
7039 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), |
7040 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
7041 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
7042 | NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType), |
7043 | NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType), |
7044 | NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType), |
7045 | NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType), |
7046 | NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType), |
7047 | NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType), |
7048 | NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType), |
7049 | NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType), |
7050 | NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType), |
7051 | NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType), |
7052 | NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType), |
7053 | NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType), |
7054 | NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType), |
7055 | NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType), |
7056 | NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType), |
7057 | NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType), |
7058 | NEONMAP0(vrndi_v), |
7059 | NEONMAP0(vrndiq_v), |
7060 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
7061 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
7062 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
7063 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
7064 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
7065 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
7066 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), |
7067 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), |
7068 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), |
7069 | NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0), |
7070 | NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0), |
7071 | NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0), |
7072 | NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0), |
7073 | NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0), |
7074 | NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0), |
7075 | NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0), |
7076 | NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0), |
7077 | NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0), |
7078 | NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0), |
7079 | NEONMAP0(vshl_n_v), |
7080 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
7081 | NEONMAP0(vshll_n_v), |
7082 | NEONMAP0(vshlq_n_v), |
7083 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
7084 | NEONMAP0(vshr_n_v), |
7085 | NEONMAP0(vshrn_n_v), |
7086 | NEONMAP0(vshrq_n_v), |
7087 | NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0), |
7088 | NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0), |
7089 | NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0), |
7090 | NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0), |
7091 | NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0), |
7092 | NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0), |
7093 | NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0), |
7094 | NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0), |
7095 | NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0), |
7096 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), |
7097 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), |
7098 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), |
7099 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), |
7100 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), |
7101 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), |
7102 | NEONMAP0(vsubhn_v), |
7103 | NEONMAP0(vtst_v), |
7104 | NEONMAP0(vtstq_v), |
7105 | NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0), |
7106 | NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0), |
7107 | NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0), |
7108 | NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0), |
7109 | }; |
7110 | |
7111 | static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { |
7112 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), |
7113 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), |
7114 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), |
7115 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
7116 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
7117 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
7118 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
7119 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
7120 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
7121 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
7122 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
7123 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), |
7124 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
7125 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), |
7126 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
7127 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
7128 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
7129 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
7130 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
7131 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
7132 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
7133 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
7134 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
7135 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
7136 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
7137 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
7138 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
7139 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
7140 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
7141 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
7142 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
7143 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
7144 | NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
7145 | NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
7146 | NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), |
7147 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
7148 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
7149 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
7150 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
7151 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
7152 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
7153 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
7154 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
7155 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
7156 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
7157 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
7158 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
7159 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
7160 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
7161 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
7162 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
7163 | NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
7164 | NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
7165 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), |
7166 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
7167 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
7168 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
7169 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
7170 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
7171 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
7172 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
7173 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
7174 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
7175 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
7176 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
7177 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
7178 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
7179 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
7180 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
7181 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
7182 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
7183 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
7184 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
7185 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
7186 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), |
7187 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), |
7188 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), |
7189 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
7190 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
7191 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
7192 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
7193 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
7194 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
7195 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
7196 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
7197 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
7198 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
7199 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
7200 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), |
7201 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
7202 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), |
7203 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
7204 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
7205 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), |
7206 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), |
7207 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
7208 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
7209 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), |
7210 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), |
7211 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), |
7212 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), |
7213 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), |
7214 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), |
7215 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), |
7216 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), |
7217 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
7218 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
7219 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
7220 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
7221 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), |
7222 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
7223 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
7224 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
7225 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), |
7226 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
7227 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), |
7228 | NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors), |
7229 | NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType), |
7230 | NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors), |
7231 | NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType), |
7232 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), |
7233 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), |
7234 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
7235 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
7236 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), |
7237 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), |
7238 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
7239 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
7240 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), |
7241 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), |
7242 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), |
7243 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), |
7244 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
7245 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
7246 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
7247 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
7248 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), |
7249 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
7250 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
7251 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
7252 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
7253 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
7254 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
7255 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), |
7256 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), |
7257 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
7258 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
7259 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
7260 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
7261 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), |
7262 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), |
7263 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), |
7264 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), |
7265 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
7266 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
7267 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), |
7268 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), |
7269 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), |
7270 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
7271 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
7272 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
7273 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
7274 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), |
7275 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
7276 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
7277 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
7278 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
7279 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), |
7280 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), |
7281 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
7282 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
7283 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), |
7284 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), |
7285 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), |
7286 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), |
7287 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), |
7288 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), |
7289 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), |
7290 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), |
7291 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), |
7292 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), |
7293 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), |
7294 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), |
7295 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), |
7296 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), |
7297 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), |
7298 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), |
7299 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), |
7300 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), |
7301 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), |
7302 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), |
7303 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
7304 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), |
7305 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
7306 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), |
7307 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), |
7308 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), |
7309 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
7310 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), |
7311 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
7312 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), |
7313 | // FP16 scalar intrinisics go here. |
7314 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), |
7315 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
7316 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
7317 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
7318 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
7319 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
7320 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
7321 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
7322 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
7323 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
7324 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
7325 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
7326 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
7327 | NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
7328 | NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
7329 | NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
7330 | NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
7331 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
7332 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
7333 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
7334 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
7335 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
7336 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
7337 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
7338 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
7339 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
7340 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
7341 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
7342 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
7343 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), |
7344 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), |
7345 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), |
7346 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), |
7347 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), |
7348 | }; |
7349 | |
7350 | // Some intrinsics are equivalent for codegen. |
7351 | static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = { |
7352 | { NEON::BI__builtin_neon_splat_lane_bf16, NEON::BI__builtin_neon_splat_lane_v, }, |
7353 | { NEON::BI__builtin_neon_splat_laneq_bf16, NEON::BI__builtin_neon_splat_laneq_v, }, |
7354 | { NEON::BI__builtin_neon_splatq_lane_bf16, NEON::BI__builtin_neon_splatq_lane_v, }, |
7355 | { NEON::BI__builtin_neon_splatq_laneq_bf16, NEON::BI__builtin_neon_splatq_laneq_v, }, |
7356 | { NEON::BI__builtin_neon_vabd_f16, NEON::BI__builtin_neon_vabd_v, }, |
7357 | { NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, }, |
7358 | { NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, }, |
7359 | { NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, }, |
7360 | { NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, }, |
7361 | { NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, }, |
7362 | { NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, }, |
7363 | { NEON::BI__builtin_neon_vcagtq_f16, NEON::BI__builtin_neon_vcagtq_v, }, |
7364 | { NEON::BI__builtin_neon_vcale_f16, NEON::BI__builtin_neon_vcale_v, }, |
7365 | { NEON::BI__builtin_neon_vcaleq_f16, NEON::BI__builtin_neon_vcaleq_v, }, |
7366 | { NEON::BI__builtin_neon_vcalt_f16, NEON::BI__builtin_neon_vcalt_v, }, |
7367 | { NEON::BI__builtin_neon_vcaltq_f16, NEON::BI__builtin_neon_vcaltq_v, }, |
7368 | { NEON::BI__builtin_neon_vceqz_f16, NEON::BI__builtin_neon_vceqz_v, }, |
7369 | { NEON::BI__builtin_neon_vceqzq_f16, NEON::BI__builtin_neon_vceqzq_v, }, |
7370 | { NEON::BI__builtin_neon_vcgez_f16, NEON::BI__builtin_neon_vcgez_v, }, |
7371 | { NEON::BI__builtin_neon_vcgezq_f16, NEON::BI__builtin_neon_vcgezq_v, }, |
7372 | { NEON::BI__builtin_neon_vcgtz_f16, NEON::BI__builtin_neon_vcgtz_v, }, |
7373 | { NEON::BI__builtin_neon_vcgtzq_f16, NEON::BI__builtin_neon_vcgtzq_v, }, |
7374 | { NEON::BI__builtin_neon_vclez_f16, NEON::BI__builtin_neon_vclez_v, }, |
7375 | { NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, }, |
7376 | { NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, }, |
7377 | { NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, }, |
7378 | { NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, }, |
7379 | { NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, }, |
7380 | { NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, }, |
7381 | { NEON::BI__builtin_neon_vfmaq_f16, NEON::BI__builtin_neon_vfmaq_v, }, |
7382 | { NEON::BI__builtin_neon_vfmaq_lane_f16, NEON::BI__builtin_neon_vfmaq_lane_v, }, |
7383 | { NEON::BI__builtin_neon_vfmaq_laneq_f16, NEON::BI__builtin_neon_vfmaq_laneq_v, }, |
7384 | { NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v }, |
7385 | { NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v }, |
7386 | { NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v }, |
7387 | { NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v }, |
7388 | { NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v }, |
7389 | { NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v }, |
7390 | { NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v }, |
7391 | { NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v }, |
7392 | { NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v }, |
7393 | { NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v }, |
7394 | { NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v }, |
7395 | { NEON::BI__builtin_neon_vld1q_lane_bf16, NEON::BI__builtin_neon_vld1q_lane_v }, |
7396 | { NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v }, |
7397 | { NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v }, |
7398 | { NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v }, |
7399 | { NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v }, |
7400 | { NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v }, |
7401 | { NEON::BI__builtin_neon_vld2q_lane_bf16, NEON::BI__builtin_neon_vld2q_lane_v }, |
7402 | { NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v }, |
7403 | { NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v }, |
7404 | { NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v }, |
7405 | { NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v }, |
7406 | { NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v }, |
7407 | { NEON::BI__builtin_neon_vld3q_lane_bf16, NEON::BI__builtin_neon_vld3q_lane_v }, |
7408 | { NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v }, |
7409 | { NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v }, |
7410 | { NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v }, |
7411 | { NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v }, |
7412 | { NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v }, |
7413 | { NEON::BI__builtin_neon_vld4q_lane_bf16, NEON::BI__builtin_neon_vld4q_lane_v }, |
7414 | { NEON::BI__builtin_neon_vmax_f16, NEON::BI__builtin_neon_vmax_v, }, |
7415 | { NEON::BI__builtin_neon_vmaxnm_f16, NEON::BI__builtin_neon_vmaxnm_v, }, |
7416 | { NEON::BI__builtin_neon_vmaxnmq_f16, NEON::BI__builtin_neon_vmaxnmq_v, }, |
7417 | { NEON::BI__builtin_neon_vmaxq_f16, NEON::BI__builtin_neon_vmaxq_v, }, |
7418 | { NEON::BI__builtin_neon_vmin_f16, NEON::BI__builtin_neon_vmin_v, }, |
7419 | { NEON::BI__builtin_neon_vminnm_f16, NEON::BI__builtin_neon_vminnm_v, }, |
7420 | { NEON::BI__builtin_neon_vminnmq_f16, NEON::BI__builtin_neon_vminnmq_v, }, |
7421 | { NEON::BI__builtin_neon_vminq_f16, NEON::BI__builtin_neon_vminq_v, }, |
7422 | { NEON::BI__builtin_neon_vmulx_f16, NEON::BI__builtin_neon_vmulx_v, }, |
7423 | { NEON::BI__builtin_neon_vmulxq_f16, NEON::BI__builtin_neon_vmulxq_v, }, |
7424 | { NEON::BI__builtin_neon_vpadd_f16, NEON::BI__builtin_neon_vpadd_v, }, |
7425 | { NEON::BI__builtin_neon_vpaddq_f16, NEON::BI__builtin_neon_vpaddq_v, }, |
7426 | { NEON::BI__builtin_neon_vpmax_f16, NEON::BI__builtin_neon_vpmax_v, }, |
7427 | { NEON::BI__builtin_neon_vpmaxnm_f16, NEON::BI__builtin_neon_vpmaxnm_v, }, |
7428 | { NEON::BI__builtin_neon_vpmaxnmq_f16, NEON::BI__builtin_neon_vpmaxnmq_v, }, |
7429 | { NEON::BI__builtin_neon_vpmaxq_f16, NEON::BI__builtin_neon_vpmaxq_v, }, |
7430 | { NEON::BI__builtin_neon_vpmin_f16, NEON::BI__builtin_neon_vpmin_v, }, |
7431 | { NEON::BI__builtin_neon_vpminnm_f16, NEON::BI__builtin_neon_vpminnm_v, }, |
7432 | { NEON::BI__builtin_neon_vpminnmq_f16, NEON::BI__builtin_neon_vpminnmq_v, }, |
7433 | { NEON::BI__builtin_neon_vpminq_f16, NEON::BI__builtin_neon_vpminq_v, }, |
7434 | { NEON::BI__builtin_neon_vrecpe_f16, NEON::BI__builtin_neon_vrecpe_v, }, |
7435 | { NEON::BI__builtin_neon_vrecpeq_f16, NEON::BI__builtin_neon_vrecpeq_v, }, |
7436 | { NEON::BI__builtin_neon_vrecps_f16, NEON::BI__builtin_neon_vrecps_v, }, |
7437 | { NEON::BI__builtin_neon_vrecpsq_f16, NEON::BI__builtin_neon_vrecpsq_v, }, |
7438 | { NEON::BI__builtin_neon_vrnd_f16, NEON::BI__builtin_neon_vrnd_v, }, |
7439 | { NEON::BI__builtin_neon_vrnda_f16, NEON::BI__builtin_neon_vrnda_v, }, |
7440 | { NEON::BI__builtin_neon_vrndaq_f16, NEON::BI__builtin_neon_vrndaq_v, }, |
7441 | { NEON::BI__builtin_neon_vrndi_f16, NEON::BI__builtin_neon_vrndi_v, }, |
7442 | { NEON::BI__builtin_neon_vrndiq_f16, NEON::BI__builtin_neon_vrndiq_v, }, |
7443 | { NEON::BI__builtin_neon_vrndm_f16, NEON::BI__builtin_neon_vrndm_v, }, |
7444 | { NEON::BI__builtin_neon_vrndmq_f16, NEON::BI__builtin_neon_vrndmq_v, }, |
7445 | { NEON::BI__builtin_neon_vrndn_f16, NEON::BI__builtin_neon_vrndn_v, }, |
7446 | { NEON::BI__builtin_neon_vrndnq_f16, NEON::BI__builtin_neon_vrndnq_v, }, |
7447 | { NEON::BI__builtin_neon_vrndp_f16, NEON::BI__builtin_neon_vrndp_v, }, |
7448 | { NEON::BI__builtin_neon_vrndpq_f16, NEON::BI__builtin_neon_vrndpq_v, }, |
7449 | { NEON::BI__builtin_neon_vrndq_f16, NEON::BI__builtin_neon_vrndq_v, }, |
7450 | { NEON::BI__builtin_neon_vrndx_f16, NEON::BI__builtin_neon_vrndx_v, }, |
7451 | { NEON::BI__builtin_neon_vrndxq_f16, NEON::BI__builtin_neon_vrndxq_v, }, |
7452 | { NEON::BI__builtin_neon_vrsqrte_f16, NEON::BI__builtin_neon_vrsqrte_v, }, |
7453 | { NEON::BI__builtin_neon_vrsqrteq_f16, NEON::BI__builtin_neon_vrsqrteq_v, }, |
7454 | { NEON::BI__builtin_neon_vrsqrts_f16, NEON::BI__builtin_neon_vrsqrts_v, }, |
7455 | { NEON::BI__builtin_neon_vrsqrtsq_f16, NEON::BI__builtin_neon_vrsqrtsq_v, }, |
7456 | { NEON::BI__builtin_neon_vsqrt_f16, NEON::BI__builtin_neon_vsqrt_v, }, |
7457 | { NEON::BI__builtin_neon_vsqrtq_f16, NEON::BI__builtin_neon_vsqrtq_v, }, |
7458 | { NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v }, |
7459 | { NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v }, |
7460 | { NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v }, |
7461 | { NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v }, |
7462 | { NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v }, |
7463 | { NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v }, |
7464 | { NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v }, |
7465 | { NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v }, |
7466 | { NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v }, |
7467 | { NEON::BI__builtin_neon_vst1q_lane_bf16, NEON::BI__builtin_neon_vst1q_lane_v }, |
7468 | { NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v }, |
7469 | { NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v }, |
7470 | { NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v }, |
7471 | { NEON::BI__builtin_neon_vst2q_lane_bf16, NEON::BI__builtin_neon_vst2q_lane_v }, |
7472 | { NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v }, |
7473 | { NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v }, |
7474 | { NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v }, |
7475 | { NEON::BI__builtin_neon_vst3q_lane_bf16, NEON::BI__builtin_neon_vst3q_lane_v }, |
7476 | { NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v }, |
7477 | { NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v }, |
7478 | { NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v }, |
7479 | { NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v }, |
7480 | // The mangling rules cause us to have one ID for each type for vldap1(q)_lane |
7481 | // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an |
7482 | // arbitrary one to be handled as tha canonical variation. |
7483 | { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 }, |
7484 | { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 }, |
7485 | { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 }, |
7486 | { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 }, |
7487 | { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 }, |
7488 | { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 }, |
7489 | { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 }, |
7490 | { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 }, |
7491 | { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 }, |
7492 | { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 }, |
7493 | { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 }, |
7494 | { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 }, |
7495 | }; |
7496 | |
7497 | #undef NEONMAP0 |
7498 | #undef NEONMAP1 |
7499 | #undef NEONMAP2 |
7500 | |
7501 | #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
7502 | { \ |
7503 | #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ |
7504 | TypeModifier \ |
7505 | } |
7506 | |
7507 | #define SVEMAP2(NameBase, TypeModifier) \ |
7508 | { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } |
7509 | static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { |
7510 | #define GET_SVE_LLVM_INTRINSIC_MAP |
7511 | #include "clang/Basic/arm_sve_builtin_cg.inc" |
7512 | #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def" |
7513 | #undef GET_SVE_LLVM_INTRINSIC_MAP |
7514 | }; |
7515 | |
7516 | #undef SVEMAP1 |
7517 | #undef SVEMAP2 |
7518 | |
7519 | #define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
7520 | { \ |
7521 | #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ |
7522 | TypeModifier \ |
7523 | } |
7524 | |
7525 | #define SMEMAP2(NameBase, TypeModifier) \ |
7526 | { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier } |
7527 | static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = { |
7528 | #define GET_SME_LLVM_INTRINSIC_MAP |
7529 | #include "clang/Basic/arm_sme_builtin_cg.inc" |
7530 | #undef GET_SME_LLVM_INTRINSIC_MAP |
7531 | }; |
7532 | |
7533 | #undef SMEMAP1 |
7534 | #undef SMEMAP2 |
7535 | |
7536 | static bool NEONSIMDIntrinsicsProvenSorted = false; |
7537 | |
7538 | static bool AArch64SIMDIntrinsicsProvenSorted = false; |
7539 | static bool AArch64SISDIntrinsicsProvenSorted = false; |
7540 | static bool AArch64SVEIntrinsicsProvenSorted = false; |
7541 | static bool AArch64SMEIntrinsicsProvenSorted = false; |
7542 | |
7543 | static const ARMVectorIntrinsicInfo * |
7544 | findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap, |
7545 | unsigned BuiltinID, bool &MapProvenSorted) { |
7546 | |
7547 | #ifndef NDEBUG |
7548 | if (!MapProvenSorted) { |
7549 | assert(llvm::is_sorted(IntrinsicMap)); |
7550 | MapProvenSorted = true; |
7551 | } |
7552 | #endif |
7553 | |
7554 | const ARMVectorIntrinsicInfo *Builtin = |
7555 | llvm::lower_bound(Range&: IntrinsicMap, Value&: BuiltinID); |
7556 | |
7557 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) |
7558 | return Builtin; |
7559 | |
7560 | return nullptr; |
7561 | } |
7562 | |
7563 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, |
7564 | unsigned Modifier, |
7565 | llvm::Type *ArgType, |
7566 | const CallExpr *E) { |
7567 | int VectorSize = 0; |
7568 | if (Modifier & Use64BitVectors) |
7569 | VectorSize = 64; |
7570 | else if (Modifier & Use128BitVectors) |
7571 | VectorSize = 128; |
7572 | |
7573 | // Return type. |
7574 | SmallVector<llvm::Type *, 3> Tys; |
7575 | if (Modifier & AddRetType) { |
7576 | llvm::Type *Ty = ConvertType(T: E->getCallReturnType(Ctx: getContext())); |
7577 | if (Modifier & VectorizeRetType) |
7578 | Ty = llvm::FixedVectorType::get( |
7579 | ElementType: Ty, NumElts: VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); |
7580 | |
7581 | Tys.push_back(Elt: Ty); |
7582 | } |
7583 | |
7584 | // Arguments. |
7585 | if (Modifier & VectorizeArgTypes) { |
7586 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; |
7587 | ArgType = llvm::FixedVectorType::get(ElementType: ArgType, NumElts: Elts); |
7588 | } |
7589 | |
7590 | if (Modifier & (Add1ArgType | Add2ArgTypes)) |
7591 | Tys.push_back(Elt: ArgType); |
7592 | |
7593 | if (Modifier & Add2ArgTypes) |
7594 | Tys.push_back(Elt: ArgType); |
7595 | |
7596 | if (Modifier & InventFloatType) |
7597 | Tys.push_back(Elt: FloatTy); |
7598 | |
7599 | return CGM.getIntrinsic(IID: IntrinsicID, Tys); |
7600 | } |
7601 | |
7602 | static Value *EmitCommonNeonSISDBuiltinExpr( |
7603 | CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo, |
7604 | SmallVectorImpl<Value *> &Ops, const CallExpr *E) { |
7605 | unsigned BuiltinID = SISDInfo.BuiltinID; |
7606 | unsigned int Int = SISDInfo.LLVMIntrinsic; |
7607 | unsigned Modifier = SISDInfo.TypeModifier; |
7608 | const char *s = SISDInfo.NameHint; |
7609 | |
7610 | switch (BuiltinID) { |
7611 | case NEON::BI__builtin_neon_vcled_s64: |
7612 | case NEON::BI__builtin_neon_vcled_u64: |
7613 | case NEON::BI__builtin_neon_vcles_f32: |
7614 | case NEON::BI__builtin_neon_vcled_f64: |
7615 | case NEON::BI__builtin_neon_vcltd_s64: |
7616 | case NEON::BI__builtin_neon_vcltd_u64: |
7617 | case NEON::BI__builtin_neon_vclts_f32: |
7618 | case NEON::BI__builtin_neon_vcltd_f64: |
7619 | case NEON::BI__builtin_neon_vcales_f32: |
7620 | case NEON::BI__builtin_neon_vcaled_f64: |
7621 | case NEON::BI__builtin_neon_vcalts_f32: |
7622 | case NEON::BI__builtin_neon_vcaltd_f64: |
7623 | // Only one direction of comparisons actually exist, cmle is actually a cmge |
7624 | // with swapped operands. The table gives us the right intrinsic but we |
7625 | // still need to do the swap. |
7626 | std::swap(a&: Ops[0], b&: Ops[1]); |
7627 | break; |
7628 | } |
7629 | |
7630 | assert(Int && "Generic code assumes a valid intrinsic" ); |
7631 | |
7632 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
7633 | const Expr *Arg = E->getArg(Arg: 0); |
7634 | llvm::Type *ArgTy = CGF.ConvertType(T: Arg->getType()); |
7635 | Function *F = CGF.LookupNeonLLVMIntrinsic(IntrinsicID: Int, Modifier, ArgType: ArgTy, E); |
7636 | |
7637 | int j = 0; |
7638 | ConstantInt *C0 = ConstantInt::get(Ty: CGF.SizeTy, V: 0); |
7639 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
7640 | ai != ae; ++ai, ++j) { |
7641 | llvm::Type *ArgTy = ai->getType(); |
7642 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == |
7643 | ArgTy->getPrimitiveSizeInBits()) |
7644 | continue; |
7645 | |
7646 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); |
7647 | // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate |
7648 | // it before inserting. |
7649 | Ops[j] = CGF.Builder.CreateTruncOrBitCast( |
7650 | V: Ops[j], DestTy: cast<llvm::VectorType>(Val: ArgTy)->getElementType()); |
7651 | Ops[j] = |
7652 | CGF.Builder.CreateInsertElement(Vec: PoisonValue::get(T: ArgTy), NewElt: Ops[j], Idx: C0); |
7653 | } |
7654 | |
7655 | Value *Result = CGF.EmitNeonCall(F, Ops, name: s); |
7656 | llvm::Type *ResultType = CGF.ConvertType(T: E->getType()); |
7657 | if (ResultType->getPrimitiveSizeInBits().getFixedValue() < |
7658 | Result->getType()->getPrimitiveSizeInBits().getFixedValue()) |
7659 | return CGF.Builder.CreateExtractElement(Vec: Result, Idx: C0); |
7660 | |
7661 | return CGF.Builder.CreateBitCast(V: Result, DestTy: ResultType, Name: s); |
7662 | } |
7663 | |
7664 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( |
7665 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, |
7666 | const char *NameHint, unsigned Modifier, const CallExpr *E, |
7667 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, |
7668 | llvm::Triple::ArchType Arch) { |
7669 | // Get the last argument, which specifies the vector type. |
7670 | const Expr *Arg = E->getArg(Arg: E->getNumArgs() - 1); |
7671 | std::optional<llvm::APSInt> NeonTypeConst = |
7672 | Arg->getIntegerConstantExpr(Ctx: getContext()); |
7673 | if (!NeonTypeConst) |
7674 | return nullptr; |
7675 | |
7676 | // Determine the type of this overloaded NEON intrinsic. |
7677 | NeonTypeFlags Type(NeonTypeConst->getZExtValue()); |
7678 | bool Usgn = Type.isUnsigned(); |
7679 | bool Quad = Type.isQuad(); |
7680 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); |
7681 | const bool AllowBFloatArgsAndRet = |
7682 | getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); |
7683 | |
7684 | llvm::FixedVectorType *VTy = |
7685 | GetNeonType(CGF: this, TypeFlags: Type, HasLegalHalfType, V1Ty: false, AllowBFloatArgsAndRet); |
7686 | llvm::Type *Ty = VTy; |
7687 | if (!Ty) |
7688 | return nullptr; |
7689 | |
7690 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
7691 | return Builder.getInt32(C: addr.getAlignment().getQuantity()); |
7692 | }; |
7693 | |
7694 | unsigned Int = LLVMIntrinsic; |
7695 | if ((Modifier & UnsignedAlts) && !Usgn) |
7696 | Int = AltLLVMIntrinsic; |
7697 | |
7698 | switch (BuiltinID) { |
7699 | default: break; |
7700 | case NEON::BI__builtin_neon_splat_lane_v: |
7701 | case NEON::BI__builtin_neon_splat_laneq_v: |
7702 | case NEON::BI__builtin_neon_splatq_lane_v: |
7703 | case NEON::BI__builtin_neon_splatq_laneq_v: { |
7704 | auto NumElements = VTy->getElementCount(); |
7705 | if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v) |
7706 | NumElements = NumElements * 2; |
7707 | if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v) |
7708 | NumElements = NumElements.divideCoefficientBy(RHS: 2); |
7709 | |
7710 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: VTy); |
7711 | return EmitNeonSplat(V: Ops[0], C: cast<ConstantInt>(Val: Ops[1]), Count: NumElements); |
7712 | } |
7713 | case NEON::BI__builtin_neon_vpadd_v: |
7714 | case NEON::BI__builtin_neon_vpaddq_v: |
7715 | // We don't allow fp/int overloading of intrinsics. |
7716 | if (VTy->getElementType()->isFloatingPointTy() && |
7717 | Int == Intrinsic::aarch64_neon_addp) |
7718 | Int = Intrinsic::aarch64_neon_faddp; |
7719 | break; |
7720 | case NEON::BI__builtin_neon_vabs_v: |
7721 | case NEON::BI__builtin_neon_vabsq_v: |
7722 | if (VTy->getElementType()->isFloatingPointTy()) |
7723 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: Ty), Ops, name: "vabs" ); |
7724 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys: Ty), Ops, name: "vabs" ); |
7725 | case NEON::BI__builtin_neon_vadd_v: |
7726 | case NEON::BI__builtin_neon_vaddq_v: { |
7727 | llvm::Type *VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: Quad ? 16 : 8); |
7728 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: VTy); |
7729 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: VTy); |
7730 | Ops[0] = Builder.CreateXor(LHS: Ops[0], RHS: Ops[1]); |
7731 | return Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
7732 | } |
7733 | case NEON::BI__builtin_neon_vaddhn_v: { |
7734 | llvm::FixedVectorType *SrcTy = |
7735 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
7736 | |
7737 | // %sum = add <4 x i32> %lhs, %rhs |
7738 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: SrcTy); |
7739 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: SrcTy); |
7740 | Ops[0] = Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1], Name: "vaddhn" ); |
7741 | |
7742 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
7743 | Constant *ShiftAmt = |
7744 | ConstantInt::get(Ty: SrcTy, V: SrcTy->getScalarSizeInBits() / 2); |
7745 | Ops[0] = Builder.CreateLShr(LHS: Ops[0], RHS: ShiftAmt, Name: "vaddhn" ); |
7746 | |
7747 | // %res = trunc <4 x i32> %high to <4 x i16> |
7748 | return Builder.CreateTrunc(V: Ops[0], DestTy: VTy, Name: "vaddhn" ); |
7749 | } |
7750 | case NEON::BI__builtin_neon_vcale_v: |
7751 | case NEON::BI__builtin_neon_vcaleq_v: |
7752 | case NEON::BI__builtin_neon_vcalt_v: |
7753 | case NEON::BI__builtin_neon_vcaltq_v: |
7754 | std::swap(a&: Ops[0], b&: Ops[1]); |
7755 | [[fallthrough]]; |
7756 | case NEON::BI__builtin_neon_vcage_v: |
7757 | case NEON::BI__builtin_neon_vcageq_v: |
7758 | case NEON::BI__builtin_neon_vcagt_v: |
7759 | case NEON::BI__builtin_neon_vcagtq_v: { |
7760 | llvm::Type *Ty; |
7761 | switch (VTy->getScalarSizeInBits()) { |
7762 | default: llvm_unreachable("unexpected type" ); |
7763 | case 32: |
7764 | Ty = FloatTy; |
7765 | break; |
7766 | case 64: |
7767 | Ty = DoubleTy; |
7768 | break; |
7769 | case 16: |
7770 | Ty = HalfTy; |
7771 | break; |
7772 | } |
7773 | auto *VecFlt = llvm::FixedVectorType::get(ElementType: Ty, NumElts: VTy->getNumElements()); |
7774 | llvm::Type *Tys[] = { VTy, VecFlt }; |
7775 | Function *F = CGM.getIntrinsic(IID: LLVMIntrinsic, Tys); |
7776 | return EmitNeonCall(F, Ops, name: NameHint); |
7777 | } |
7778 | case NEON::BI__builtin_neon_vceqz_v: |
7779 | case NEON::BI__builtin_neon_vceqzq_v: |
7780 | return EmitAArch64CompareBuiltinExpr(Op: Ops[0], Ty, Fp: ICmpInst::FCMP_OEQ, |
7781 | Ip: ICmpInst::ICMP_EQ, Name: "vceqz" ); |
7782 | case NEON::BI__builtin_neon_vcgez_v: |
7783 | case NEON::BI__builtin_neon_vcgezq_v: |
7784 | return EmitAArch64CompareBuiltinExpr(Op: Ops[0], Ty, Fp: ICmpInst::FCMP_OGE, |
7785 | Ip: ICmpInst::ICMP_SGE, Name: "vcgez" ); |
7786 | case NEON::BI__builtin_neon_vclez_v: |
7787 | case NEON::BI__builtin_neon_vclezq_v: |
7788 | return EmitAArch64CompareBuiltinExpr(Op: Ops[0], Ty, Fp: ICmpInst::FCMP_OLE, |
7789 | Ip: ICmpInst::ICMP_SLE, Name: "vclez" ); |
7790 | case NEON::BI__builtin_neon_vcgtz_v: |
7791 | case NEON::BI__builtin_neon_vcgtzq_v: |
7792 | return EmitAArch64CompareBuiltinExpr(Op: Ops[0], Ty, Fp: ICmpInst::FCMP_OGT, |
7793 | Ip: ICmpInst::ICMP_SGT, Name: "vcgtz" ); |
7794 | case NEON::BI__builtin_neon_vcltz_v: |
7795 | case NEON::BI__builtin_neon_vcltzq_v: |
7796 | return EmitAArch64CompareBuiltinExpr(Op: Ops[0], Ty, Fp: ICmpInst::FCMP_OLT, |
7797 | Ip: ICmpInst::ICMP_SLT, Name: "vcltz" ); |
7798 | case NEON::BI__builtin_neon_vclz_v: |
7799 | case NEON::BI__builtin_neon_vclzq_v: |
7800 | // We generate target-independent intrinsic, which needs a second argument |
7801 | // for whether or not clz of zero is undefined; on ARM it isn't. |
7802 | Ops.push_back(Elt: Builder.getInt1(V: getTarget().isCLZForZeroUndef())); |
7803 | break; |
7804 | case NEON::BI__builtin_neon_vcvt_f32_v: |
7805 | case NEON::BI__builtin_neon_vcvtq_f32_v: |
7806 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
7807 | Ty = GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), |
7808 | HasLegalHalfType); |
7809 | return Usgn ? Builder.CreateUIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ) |
7810 | : Builder.CreateSIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
7811 | case NEON::BI__builtin_neon_vcvt_f16_s16: |
7812 | case NEON::BI__builtin_neon_vcvt_f16_u16: |
7813 | case NEON::BI__builtin_neon_vcvtq_f16_s16: |
7814 | case NEON::BI__builtin_neon_vcvtq_f16_u16: |
7815 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
7816 | Ty = GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), |
7817 | HasLegalHalfType); |
7818 | return Usgn ? Builder.CreateUIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ) |
7819 | : Builder.CreateSIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
7820 | case NEON::BI__builtin_neon_vcvt_n_f16_s16: |
7821 | case NEON::BI__builtin_neon_vcvt_n_f16_u16: |
7822 | case NEON::BI__builtin_neon_vcvtq_n_f16_s16: |
7823 | case NEON::BI__builtin_neon_vcvtq_n_f16_u16: { |
7824 | llvm::Type *Tys[2] = { GetFloatNeonType(CGF: this, IntTypeFlags: Type), Ty }; |
7825 | Function *F = CGM.getIntrinsic(IID: Int, Tys); |
7826 | return EmitNeonCall(F, Ops, name: "vcvt_n" ); |
7827 | } |
7828 | case NEON::BI__builtin_neon_vcvt_n_f32_v: |
7829 | case NEON::BI__builtin_neon_vcvt_n_f64_v: |
7830 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: |
7831 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { |
7832 | llvm::Type *Tys[2] = { GetFloatNeonType(CGF: this, IntTypeFlags: Type), Ty }; |
7833 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7834 | Function *F = CGM.getIntrinsic(IID: Int, Tys); |
7835 | return EmitNeonCall(F, Ops, name: "vcvt_n" ); |
7836 | } |
7837 | case NEON::BI__builtin_neon_vcvt_n_s16_f16: |
7838 | case NEON::BI__builtin_neon_vcvt_n_s32_v: |
7839 | case NEON::BI__builtin_neon_vcvt_n_u16_f16: |
7840 | case NEON::BI__builtin_neon_vcvt_n_u32_v: |
7841 | case NEON::BI__builtin_neon_vcvt_n_s64_v: |
7842 | case NEON::BI__builtin_neon_vcvt_n_u64_v: |
7843 | case NEON::BI__builtin_neon_vcvtq_n_s16_f16: |
7844 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: |
7845 | case NEON::BI__builtin_neon_vcvtq_n_u16_f16: |
7846 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: |
7847 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: |
7848 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { |
7849 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
7850 | Function *F = CGM.getIntrinsic(IID: LLVMIntrinsic, Tys); |
7851 | return EmitNeonCall(F, Ops, name: "vcvt_n" ); |
7852 | } |
7853 | case NEON::BI__builtin_neon_vcvt_s32_v: |
7854 | case NEON::BI__builtin_neon_vcvt_u32_v: |
7855 | case NEON::BI__builtin_neon_vcvt_s64_v: |
7856 | case NEON::BI__builtin_neon_vcvt_u64_v: |
7857 | case NEON::BI__builtin_neon_vcvt_s16_f16: |
7858 | case NEON::BI__builtin_neon_vcvt_u16_f16: |
7859 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
7860 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
7861 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
7862 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
7863 | case NEON::BI__builtin_neon_vcvtq_s16_f16: |
7864 | case NEON::BI__builtin_neon_vcvtq_u16_f16: { |
7865 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: GetFloatNeonType(CGF: this, IntTypeFlags: Type)); |
7866 | return Usgn ? Builder.CreateFPToUI(V: Ops[0], DestTy: Ty, Name: "vcvt" ) |
7867 | : Builder.CreateFPToSI(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
7868 | } |
7869 | case NEON::BI__builtin_neon_vcvta_s16_f16: |
7870 | case NEON::BI__builtin_neon_vcvta_s32_v: |
7871 | case NEON::BI__builtin_neon_vcvta_s64_v: |
7872 | case NEON::BI__builtin_neon_vcvta_u16_f16: |
7873 | case NEON::BI__builtin_neon_vcvta_u32_v: |
7874 | case NEON::BI__builtin_neon_vcvta_u64_v: |
7875 | case NEON::BI__builtin_neon_vcvtaq_s16_f16: |
7876 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
7877 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
7878 | case NEON::BI__builtin_neon_vcvtaq_u16_f16: |
7879 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
7880 | case NEON::BI__builtin_neon_vcvtaq_u64_v: |
7881 | case NEON::BI__builtin_neon_vcvtn_s16_f16: |
7882 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
7883 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
7884 | case NEON::BI__builtin_neon_vcvtn_u16_f16: |
7885 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
7886 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
7887 | case NEON::BI__builtin_neon_vcvtnq_s16_f16: |
7888 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
7889 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
7890 | case NEON::BI__builtin_neon_vcvtnq_u16_f16: |
7891 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
7892 | case NEON::BI__builtin_neon_vcvtnq_u64_v: |
7893 | case NEON::BI__builtin_neon_vcvtp_s16_f16: |
7894 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
7895 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
7896 | case NEON::BI__builtin_neon_vcvtp_u16_f16: |
7897 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
7898 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
7899 | case NEON::BI__builtin_neon_vcvtpq_s16_f16: |
7900 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
7901 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
7902 | case NEON::BI__builtin_neon_vcvtpq_u16_f16: |
7903 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
7904 | case NEON::BI__builtin_neon_vcvtpq_u64_v: |
7905 | case NEON::BI__builtin_neon_vcvtm_s16_f16: |
7906 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
7907 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
7908 | case NEON::BI__builtin_neon_vcvtm_u16_f16: |
7909 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
7910 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
7911 | case NEON::BI__builtin_neon_vcvtmq_s16_f16: |
7912 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
7913 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
7914 | case NEON::BI__builtin_neon_vcvtmq_u16_f16: |
7915 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
7916 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
7917 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
7918 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: NameHint); |
7919 | } |
7920 | case NEON::BI__builtin_neon_vcvtx_f32_v: { |
7921 | llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; |
7922 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: NameHint); |
7923 | |
7924 | } |
7925 | case NEON::BI__builtin_neon_vext_v: |
7926 | case NEON::BI__builtin_neon_vextq_v: { |
7927 | int CV = cast<ConstantInt>(Val: Ops[2])->getSExtValue(); |
7928 | SmallVector<int, 16> Indices; |
7929 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
7930 | Indices.push_back(Elt: i+CV); |
7931 | |
7932 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
7933 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
7934 | return Builder.CreateShuffleVector(V1: Ops[0], V2: Ops[1], Mask: Indices, Name: "vext" ); |
7935 | } |
7936 | case NEON::BI__builtin_neon_vfma_v: |
7937 | case NEON::BI__builtin_neon_vfmaq_v: { |
7938 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
7939 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
7940 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
7941 | |
7942 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
7943 | return emitCallMaybeConstrainedFPBuiltin( |
7944 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty, |
7945 | Args: {Ops[1], Ops[2], Ops[0]}); |
7946 | } |
7947 | case NEON::BI__builtin_neon_vld1_v: |
7948 | case NEON::BI__builtin_neon_vld1q_v: { |
7949 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
7950 | Ops.push_back(Elt: getAlignmentValue32(PtrOp0)); |
7951 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: "vld1" ); |
7952 | } |
7953 | case NEON::BI__builtin_neon_vld1_x2_v: |
7954 | case NEON::BI__builtin_neon_vld1q_x2_v: |
7955 | case NEON::BI__builtin_neon_vld1_x3_v: |
7956 | case NEON::BI__builtin_neon_vld1q_x3_v: |
7957 | case NEON::BI__builtin_neon_vld1_x4_v: |
7958 | case NEON::BI__builtin_neon_vld1q_x4_v: { |
7959 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
7960 | Function *F = CGM.getIntrinsic(IID: LLVMIntrinsic, Tys); |
7961 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld1xN" ); |
7962 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
7963 | } |
7964 | case NEON::BI__builtin_neon_vld2_v: |
7965 | case NEON::BI__builtin_neon_vld2q_v: |
7966 | case NEON::BI__builtin_neon_vld3_v: |
7967 | case NEON::BI__builtin_neon_vld3q_v: |
7968 | case NEON::BI__builtin_neon_vld4_v: |
7969 | case NEON::BI__builtin_neon_vld4q_v: |
7970 | case NEON::BI__builtin_neon_vld2_dup_v: |
7971 | case NEON::BI__builtin_neon_vld2q_dup_v: |
7972 | case NEON::BI__builtin_neon_vld3_dup_v: |
7973 | case NEON::BI__builtin_neon_vld3q_dup_v: |
7974 | case NEON::BI__builtin_neon_vld4_dup_v: |
7975 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
7976 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
7977 | Function *F = CGM.getIntrinsic(IID: LLVMIntrinsic, Tys); |
7978 | Value *Align = getAlignmentValue32(PtrOp1); |
7979 | Ops[1] = Builder.CreateCall(Callee: F, Args: {Ops[1], Align}, Name: NameHint); |
7980 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
7981 | } |
7982 | case NEON::BI__builtin_neon_vld1_dup_v: |
7983 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
7984 | Value *V = PoisonValue::get(T: Ty); |
7985 | PtrOp0 = PtrOp0.withElementType(ElemTy: VTy->getElementType()); |
7986 | LoadInst *Ld = Builder.CreateLoad(Addr: PtrOp0); |
7987 | llvm::Constant *CI = ConstantInt::get(Ty: SizeTy, V: 0); |
7988 | Ops[0] = Builder.CreateInsertElement(Vec: V, NewElt: Ld, Idx: CI); |
7989 | return EmitNeonSplat(V: Ops[0], C: CI); |
7990 | } |
7991 | case NEON::BI__builtin_neon_vld2_lane_v: |
7992 | case NEON::BI__builtin_neon_vld2q_lane_v: |
7993 | case NEON::BI__builtin_neon_vld3_lane_v: |
7994 | case NEON::BI__builtin_neon_vld3q_lane_v: |
7995 | case NEON::BI__builtin_neon_vld4_lane_v: |
7996 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
7997 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
7998 | Function *F = CGM.getIntrinsic(IID: LLVMIntrinsic, Tys); |
7999 | for (unsigned I = 2; I < Ops.size() - 1; ++I) |
8000 | Ops[I] = Builder.CreateBitCast(V: Ops[I], DestTy: Ty); |
8001 | Ops.push_back(Elt: getAlignmentValue32(PtrOp1)); |
8002 | Ops[1] = Builder.CreateCall(Callee: F, Args: ArrayRef(Ops).slice(N: 1), Name: NameHint); |
8003 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
8004 | } |
8005 | case NEON::BI__builtin_neon_vmovl_v: { |
8006 | llvm::FixedVectorType *DTy = |
8007 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
8008 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: DTy); |
8009 | if (Usgn) |
8010 | return Builder.CreateZExt(V: Ops[0], DestTy: Ty, Name: "vmovl" ); |
8011 | return Builder.CreateSExt(V: Ops[0], DestTy: Ty, Name: "vmovl" ); |
8012 | } |
8013 | case NEON::BI__builtin_neon_vmovn_v: { |
8014 | llvm::FixedVectorType *QTy = |
8015 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
8016 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: QTy); |
8017 | return Builder.CreateTrunc(V: Ops[0], DestTy: Ty, Name: "vmovn" ); |
8018 | } |
8019 | case NEON::BI__builtin_neon_vmull_v: |
8020 | // FIXME: the integer vmull operations could be emitted in terms of pure |
8021 | // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of |
8022 | // hoisting the exts outside loops. Until global ISel comes along that can |
8023 | // see through such movement this leads to bad CodeGen. So we need an |
8024 | // intrinsic for now. |
8025 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; |
8026 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; |
8027 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmull" ); |
8028 | case NEON::BI__builtin_neon_vpadal_v: |
8029 | case NEON::BI__builtin_neon_vpadalq_v: { |
8030 | // The source operand type has twice as many elements of half the size. |
8031 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
8032 | llvm::Type *EltTy = |
8033 | llvm::IntegerType::get(C&: getLLVMContext(), NumBits: EltBits / 2); |
8034 | auto *NarrowTy = |
8035 | llvm::FixedVectorType::get(ElementType: EltTy, NumElts: VTy->getNumElements() * 2); |
8036 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
8037 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: NameHint); |
8038 | } |
8039 | case NEON::BI__builtin_neon_vpaddl_v: |
8040 | case NEON::BI__builtin_neon_vpaddlq_v: { |
8041 | // The source operand type has twice as many elements of half the size. |
8042 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
8043 | llvm::Type *EltTy = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: EltBits / 2); |
8044 | auto *NarrowTy = |
8045 | llvm::FixedVectorType::get(ElementType: EltTy, NumElts: VTy->getNumElements() * 2); |
8046 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
8047 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vpaddl" ); |
8048 | } |
8049 | case NEON::BI__builtin_neon_vqdmlal_v: |
8050 | case NEON::BI__builtin_neon_vqdmlsl_v: { |
8051 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); |
8052 | Ops[1] = |
8053 | EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys: Ty), Ops&: MulOps, name: "vqdmlal" ); |
8054 | Ops.resize(N: 2); |
8055 | return EmitNeonCall(F: CGM.getIntrinsic(IID: AltLLVMIntrinsic, Tys: Ty), Ops, name: NameHint); |
8056 | } |
8057 | case NEON::BI__builtin_neon_vqdmulhq_lane_v: |
8058 | case NEON::BI__builtin_neon_vqdmulh_lane_v: |
8059 | case NEON::BI__builtin_neon_vqrdmulhq_lane_v: |
8060 | case NEON::BI__builtin_neon_vqrdmulh_lane_v: { |
8061 | auto *RTy = cast<llvm::FixedVectorType>(Val: Ty); |
8062 | if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || |
8063 | BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) |
8064 | RTy = llvm::FixedVectorType::get(ElementType: RTy->getElementType(), |
8065 | NumElts: RTy->getNumElements() * 2); |
8066 | llvm::Type *Tys[2] = { |
8067 | RTy, GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(Type.getEltType(), false, |
8068 | /*isQuad*/ false))}; |
8069 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: NameHint); |
8070 | } |
8071 | case NEON::BI__builtin_neon_vqdmulhq_laneq_v: |
8072 | case NEON::BI__builtin_neon_vqdmulh_laneq_v: |
8073 | case NEON::BI__builtin_neon_vqrdmulhq_laneq_v: |
8074 | case NEON::BI__builtin_neon_vqrdmulh_laneq_v: { |
8075 | llvm::Type *Tys[2] = { |
8076 | Ty, GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(Type.getEltType(), false, |
8077 | /*isQuad*/ true))}; |
8078 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: NameHint); |
8079 | } |
8080 | case NEON::BI__builtin_neon_vqshl_n_v: |
8081 | case NEON::BI__builtin_neon_vqshlq_n_v: |
8082 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqshl_n" , |
8083 | shift: 1, rightshift: false); |
8084 | case NEON::BI__builtin_neon_vqshlu_n_v: |
8085 | case NEON::BI__builtin_neon_vqshluq_n_v: |
8086 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqshlu_n" , |
8087 | shift: 1, rightshift: false); |
8088 | case NEON::BI__builtin_neon_vrecpe_v: |
8089 | case NEON::BI__builtin_neon_vrecpeq_v: |
8090 | case NEON::BI__builtin_neon_vrsqrte_v: |
8091 | case NEON::BI__builtin_neon_vrsqrteq_v: |
8092 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; |
8093 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: NameHint); |
8094 | case NEON::BI__builtin_neon_vrndi_v: |
8095 | case NEON::BI__builtin_neon_vrndiq_v: |
8096 | Int = Builder.getIsFPConstrained() |
8097 | ? Intrinsic::experimental_constrained_nearbyint |
8098 | : Intrinsic::nearbyint; |
8099 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: NameHint); |
8100 | case NEON::BI__builtin_neon_vrshr_n_v: |
8101 | case NEON::BI__builtin_neon_vrshrq_n_v: |
8102 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrshr_n" , |
8103 | shift: 1, rightshift: true); |
8104 | case NEON::BI__builtin_neon_vsha512hq_u64: |
8105 | case NEON::BI__builtin_neon_vsha512h2q_u64: |
8106 | case NEON::BI__builtin_neon_vsha512su0q_u64: |
8107 | case NEON::BI__builtin_neon_vsha512su1q_u64: { |
8108 | Function *F = CGM.getIntrinsic(IID: Int); |
8109 | return EmitNeonCall(F, Ops, name: "" ); |
8110 | } |
8111 | case NEON::BI__builtin_neon_vshl_n_v: |
8112 | case NEON::BI__builtin_neon_vshlq_n_v: |
8113 | Ops[1] = EmitNeonShiftVector(V: Ops[1], Ty, neg: false); |
8114 | return Builder.CreateShl(LHS: Builder.CreateBitCast(V: Ops[0],DestTy: Ty), RHS: Ops[1], |
8115 | Name: "vshl_n" ); |
8116 | case NEON::BI__builtin_neon_vshll_n_v: { |
8117 | llvm::FixedVectorType *SrcTy = |
8118 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
8119 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: SrcTy); |
8120 | if (Usgn) |
8121 | Ops[0] = Builder.CreateZExt(V: Ops[0], DestTy: VTy); |
8122 | else |
8123 | Ops[0] = Builder.CreateSExt(V: Ops[0], DestTy: VTy); |
8124 | Ops[1] = EmitNeonShiftVector(V: Ops[1], Ty: VTy, neg: false); |
8125 | return Builder.CreateShl(LHS: Ops[0], RHS: Ops[1], Name: "vshll_n" ); |
8126 | } |
8127 | case NEON::BI__builtin_neon_vshrn_n_v: { |
8128 | llvm::FixedVectorType *SrcTy = |
8129 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
8130 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: SrcTy); |
8131 | Ops[1] = EmitNeonShiftVector(V: Ops[1], Ty: SrcTy, neg: false); |
8132 | if (Usgn) |
8133 | Ops[0] = Builder.CreateLShr(LHS: Ops[0], RHS: Ops[1]); |
8134 | else |
8135 | Ops[0] = Builder.CreateAShr(LHS: Ops[0], RHS: Ops[1]); |
8136 | return Builder.CreateTrunc(V: Ops[0], DestTy: Ty, Name: "vshrn_n" ); |
8137 | } |
8138 | case NEON::BI__builtin_neon_vshr_n_v: |
8139 | case NEON::BI__builtin_neon_vshrq_n_v: |
8140 | return EmitNeonRShiftImm(Vec: Ops[0], Shift: Ops[1], Ty, usgn: Usgn, name: "vshr_n" ); |
8141 | case NEON::BI__builtin_neon_vst1_v: |
8142 | case NEON::BI__builtin_neon_vst1q_v: |
8143 | case NEON::BI__builtin_neon_vst2_v: |
8144 | case NEON::BI__builtin_neon_vst2q_v: |
8145 | case NEON::BI__builtin_neon_vst3_v: |
8146 | case NEON::BI__builtin_neon_vst3q_v: |
8147 | case NEON::BI__builtin_neon_vst4_v: |
8148 | case NEON::BI__builtin_neon_vst4q_v: |
8149 | case NEON::BI__builtin_neon_vst2_lane_v: |
8150 | case NEON::BI__builtin_neon_vst2q_lane_v: |
8151 | case NEON::BI__builtin_neon_vst3_lane_v: |
8152 | case NEON::BI__builtin_neon_vst3q_lane_v: |
8153 | case NEON::BI__builtin_neon_vst4_lane_v: |
8154 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
8155 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; |
8156 | Ops.push_back(Elt: getAlignmentValue32(PtrOp0)); |
8157 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "" ); |
8158 | } |
8159 | case NEON::BI__builtin_neon_vsm3partw1q_u32: |
8160 | case NEON::BI__builtin_neon_vsm3partw2q_u32: |
8161 | case NEON::BI__builtin_neon_vsm3ss1q_u32: |
8162 | case NEON::BI__builtin_neon_vsm4ekeyq_u32: |
8163 | case NEON::BI__builtin_neon_vsm4eq_u32: { |
8164 | Function *F = CGM.getIntrinsic(IID: Int); |
8165 | return EmitNeonCall(F, Ops, name: "" ); |
8166 | } |
8167 | case NEON::BI__builtin_neon_vsm3tt1aq_u32: |
8168 | case NEON::BI__builtin_neon_vsm3tt1bq_u32: |
8169 | case NEON::BI__builtin_neon_vsm3tt2aq_u32: |
8170 | case NEON::BI__builtin_neon_vsm3tt2bq_u32: { |
8171 | Function *F = CGM.getIntrinsic(IID: Int); |
8172 | Ops[3] = Builder.CreateZExt(V: Ops[3], DestTy: Int64Ty); |
8173 | return EmitNeonCall(F, Ops, name: "" ); |
8174 | } |
8175 | case NEON::BI__builtin_neon_vst1_x2_v: |
8176 | case NEON::BI__builtin_neon_vst1q_x2_v: |
8177 | case NEON::BI__builtin_neon_vst1_x3_v: |
8178 | case NEON::BI__builtin_neon_vst1q_x3_v: |
8179 | case NEON::BI__builtin_neon_vst1_x4_v: |
8180 | case NEON::BI__builtin_neon_vst1q_x4_v: { |
8181 | // TODO: Currently in AArch32 mode the pointer operand comes first, whereas |
8182 | // in AArch64 it comes last. We may want to stick to one or another. |
8183 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || |
8184 | Arch == llvm::Triple::aarch64_32) { |
8185 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
8186 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
8187 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: "" ); |
8188 | } |
8189 | llvm::Type *Tys[2] = {UnqualPtrTy, VTy}; |
8190 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: "" ); |
8191 | } |
8192 | case NEON::BI__builtin_neon_vsubhn_v: { |
8193 | llvm::FixedVectorType *SrcTy = |
8194 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
8195 | |
8196 | // %sum = add <4 x i32> %lhs, %rhs |
8197 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: SrcTy); |
8198 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: SrcTy); |
8199 | Ops[0] = Builder.CreateSub(LHS: Ops[0], RHS: Ops[1], Name: "vsubhn" ); |
8200 | |
8201 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
8202 | Constant *ShiftAmt = |
8203 | ConstantInt::get(Ty: SrcTy, V: SrcTy->getScalarSizeInBits() / 2); |
8204 | Ops[0] = Builder.CreateLShr(LHS: Ops[0], RHS: ShiftAmt, Name: "vsubhn" ); |
8205 | |
8206 | // %res = trunc <4 x i32> %high to <4 x i16> |
8207 | return Builder.CreateTrunc(V: Ops[0], DestTy: VTy, Name: "vsubhn" ); |
8208 | } |
8209 | case NEON::BI__builtin_neon_vtrn_v: |
8210 | case NEON::BI__builtin_neon_vtrnq_v: { |
8211 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
8212 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
8213 | Value *SV = nullptr; |
8214 | |
8215 | for (unsigned vi = 0; vi != 2; ++vi) { |
8216 | SmallVector<int, 16> Indices; |
8217 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
8218 | Indices.push_back(Elt: i+vi); |
8219 | Indices.push_back(Elt: i+e+vi); |
8220 | } |
8221 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
8222 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vtrn" ); |
8223 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
8224 | } |
8225 | return SV; |
8226 | } |
8227 | case NEON::BI__builtin_neon_vtst_v: |
8228 | case NEON::BI__builtin_neon_vtstq_v: { |
8229 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
8230 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
8231 | Ops[0] = Builder.CreateAnd(LHS: Ops[0], RHS: Ops[1]); |
8232 | Ops[0] = Builder.CreateICmp(P: ICmpInst::ICMP_NE, LHS: Ops[0], |
8233 | RHS: ConstantAggregateZero::get(Ty)); |
8234 | return Builder.CreateSExt(V: Ops[0], DestTy: Ty, Name: "vtst" ); |
8235 | } |
8236 | case NEON::BI__builtin_neon_vuzp_v: |
8237 | case NEON::BI__builtin_neon_vuzpq_v: { |
8238 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
8239 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
8240 | Value *SV = nullptr; |
8241 | |
8242 | for (unsigned vi = 0; vi != 2; ++vi) { |
8243 | SmallVector<int, 16> Indices; |
8244 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
8245 | Indices.push_back(Elt: 2*i+vi); |
8246 | |
8247 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
8248 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vuzp" ); |
8249 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
8250 | } |
8251 | return SV; |
8252 | } |
8253 | case NEON::BI__builtin_neon_vxarq_u64: { |
8254 | Function *F = CGM.getIntrinsic(IID: Int); |
8255 | Ops[2] = Builder.CreateZExt(V: Ops[2], DestTy: Int64Ty); |
8256 | return EmitNeonCall(F, Ops, name: "" ); |
8257 | } |
8258 | case NEON::BI__builtin_neon_vzip_v: |
8259 | case NEON::BI__builtin_neon_vzipq_v: { |
8260 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
8261 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
8262 | Value *SV = nullptr; |
8263 | |
8264 | for (unsigned vi = 0; vi != 2; ++vi) { |
8265 | SmallVector<int, 16> Indices; |
8266 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
8267 | Indices.push_back(Elt: (i + vi*e) >> 1); |
8268 | Indices.push_back(Elt: ((i + vi*e) >> 1)+e); |
8269 | } |
8270 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
8271 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vzip" ); |
8272 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
8273 | } |
8274 | return SV; |
8275 | } |
8276 | case NEON::BI__builtin_neon_vdot_s32: |
8277 | case NEON::BI__builtin_neon_vdot_u32: |
8278 | case NEON::BI__builtin_neon_vdotq_s32: |
8279 | case NEON::BI__builtin_neon_vdotq_u32: { |
8280 | auto *InputTy = |
8281 | llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: Ty->getPrimitiveSizeInBits() / 8); |
8282 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8283 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vdot" ); |
8284 | } |
8285 | case NEON::BI__builtin_neon_vfmlal_low_f16: |
8286 | case NEON::BI__builtin_neon_vfmlalq_low_f16: { |
8287 | auto *InputTy = |
8288 | llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: Ty->getPrimitiveSizeInBits() / 16); |
8289 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8290 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vfmlal_low" ); |
8291 | } |
8292 | case NEON::BI__builtin_neon_vfmlsl_low_f16: |
8293 | case NEON::BI__builtin_neon_vfmlslq_low_f16: { |
8294 | auto *InputTy = |
8295 | llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: Ty->getPrimitiveSizeInBits() / 16); |
8296 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8297 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vfmlsl_low" ); |
8298 | } |
8299 | case NEON::BI__builtin_neon_vfmlal_high_f16: |
8300 | case NEON::BI__builtin_neon_vfmlalq_high_f16: { |
8301 | auto *InputTy = |
8302 | llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: Ty->getPrimitiveSizeInBits() / 16); |
8303 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8304 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vfmlal_high" ); |
8305 | } |
8306 | case NEON::BI__builtin_neon_vfmlsl_high_f16: |
8307 | case NEON::BI__builtin_neon_vfmlslq_high_f16: { |
8308 | auto *InputTy = |
8309 | llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: Ty->getPrimitiveSizeInBits() / 16); |
8310 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8311 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vfmlsl_high" ); |
8312 | } |
8313 | case NEON::BI__builtin_neon_vmmlaq_s32: |
8314 | case NEON::BI__builtin_neon_vmmlaq_u32: { |
8315 | auto *InputTy = |
8316 | llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: Ty->getPrimitiveSizeInBits() / 8); |
8317 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8318 | return EmitNeonCall(F: CGM.getIntrinsic(IID: LLVMIntrinsic, Tys), Ops, name: "vmmla" ); |
8319 | } |
8320 | case NEON::BI__builtin_neon_vusmmlaq_s32: { |
8321 | auto *InputTy = |
8322 | llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: Ty->getPrimitiveSizeInBits() / 8); |
8323 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8324 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vusmmla" ); |
8325 | } |
8326 | case NEON::BI__builtin_neon_vusdot_s32: |
8327 | case NEON::BI__builtin_neon_vusdotq_s32: { |
8328 | auto *InputTy = |
8329 | llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: Ty->getPrimitiveSizeInBits() / 8); |
8330 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8331 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vusdot" ); |
8332 | } |
8333 | case NEON::BI__builtin_neon_vbfdot_f32: |
8334 | case NEON::BI__builtin_neon_vbfdotq_f32: { |
8335 | llvm::Type *InputTy = |
8336 | llvm::FixedVectorType::get(ElementType: BFloatTy, NumElts: Ty->getPrimitiveSizeInBits() / 16); |
8337 | llvm::Type *Tys[2] = { Ty, InputTy }; |
8338 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vbfdot" ); |
8339 | } |
8340 | case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: { |
8341 | llvm::Type *Tys[1] = { Ty }; |
8342 | Function *F = CGM.getIntrinsic(IID: Int, Tys); |
8343 | return EmitNeonCall(F, Ops, name: "vcvtfp2bf" ); |
8344 | } |
8345 | |
8346 | } |
8347 | |
8348 | assert(Int && "Expected valid intrinsic number" ); |
8349 | |
8350 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
8351 | Function *F = LookupNeonLLVMIntrinsic(IntrinsicID: Int, Modifier, ArgType: Ty, E); |
8352 | |
8353 | Value *Result = EmitNeonCall(F, Ops, name: NameHint); |
8354 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
8355 | // AArch64 intrinsic one-element vector type cast to |
8356 | // scalar type expected by the builtin |
8357 | return Builder.CreateBitCast(V: Result, DestTy: ResultType, Name: NameHint); |
8358 | } |
8359 | |
8360 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( |
8361 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, |
8362 | const CmpInst::Predicate Ip, const Twine &Name) { |
8363 | llvm::Type *OTy = Op->getType(); |
8364 | |
8365 | // FIXME: this is utterly horrific. We should not be looking at previous |
8366 | // codegen context to find out what needs doing. Unfortunately TableGen |
8367 | // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32 |
8368 | // (etc). |
8369 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Val: Op)) |
8370 | OTy = BI->getOperand(i_nocapture: 0)->getType(); |
8371 | |
8372 | Op = Builder.CreateBitCast(V: Op, DestTy: OTy); |
8373 | if (OTy->getScalarType()->isFloatingPointTy()) { |
8374 | if (Fp == CmpInst::FCMP_OEQ) |
8375 | Op = Builder.CreateFCmp(P: Fp, LHS: Op, RHS: Constant::getNullValue(Ty: OTy)); |
8376 | else |
8377 | Op = Builder.CreateFCmpS(P: Fp, LHS: Op, RHS: Constant::getNullValue(Ty: OTy)); |
8378 | } else { |
8379 | Op = Builder.CreateICmp(P: Ip, LHS: Op, RHS: Constant::getNullValue(Ty: OTy)); |
8380 | } |
8381 | return Builder.CreateSExt(V: Op, DestTy: Ty, Name); |
8382 | } |
8383 | |
8384 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
8385 | Value *ExtOp, Value *IndexOp, |
8386 | llvm::Type *ResTy, unsigned IntID, |
8387 | const char *Name) { |
8388 | SmallVector<Value *, 2> TblOps; |
8389 | if (ExtOp) |
8390 | TblOps.push_back(Elt: ExtOp); |
8391 | |
8392 | // Build a vector containing sequential number like (0, 1, 2, ..., 15) |
8393 | SmallVector<int, 16> Indices; |
8394 | auto *TblTy = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
8395 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { |
8396 | Indices.push_back(Elt: 2*i); |
8397 | Indices.push_back(Elt: 2*i+1); |
8398 | } |
8399 | |
8400 | int PairPos = 0, End = Ops.size() - 1; |
8401 | while (PairPos < End) { |
8402 | TblOps.push_back(Elt: CGF.Builder.CreateShuffleVector(V1: Ops[PairPos], |
8403 | V2: Ops[PairPos+1], Mask: Indices, |
8404 | Name)); |
8405 | PairPos += 2; |
8406 | } |
8407 | |
8408 | // If there's an odd number of 64-bit lookup table, fill the high 64-bit |
8409 | // of the 128-bit lookup table with zero. |
8410 | if (PairPos == End) { |
8411 | Value *ZeroTbl = ConstantAggregateZero::get(Ty: TblTy); |
8412 | TblOps.push_back(Elt: CGF.Builder.CreateShuffleVector(V1: Ops[PairPos], |
8413 | V2: ZeroTbl, Mask: Indices, Name)); |
8414 | } |
8415 | |
8416 | Function *TblF; |
8417 | TblOps.push_back(Elt: IndexOp); |
8418 | TblF = CGF.CGM.getIntrinsic(IID: IntID, Tys: ResTy); |
8419 | |
8420 | return CGF.EmitNeonCall(F: TblF, Ops&: TblOps, name: Name); |
8421 | } |
8422 | |
8423 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { |
8424 | unsigned Value; |
8425 | switch (BuiltinID) { |
8426 | default: |
8427 | return nullptr; |
8428 | case clang::ARM::BI__builtin_arm_nop: |
8429 | Value = 0; |
8430 | break; |
8431 | case clang::ARM::BI__builtin_arm_yield: |
8432 | case clang::ARM::BI__yield: |
8433 | Value = 1; |
8434 | break; |
8435 | case clang::ARM::BI__builtin_arm_wfe: |
8436 | case clang::ARM::BI__wfe: |
8437 | Value = 2; |
8438 | break; |
8439 | case clang::ARM::BI__builtin_arm_wfi: |
8440 | case clang::ARM::BI__wfi: |
8441 | Value = 3; |
8442 | break; |
8443 | case clang::ARM::BI__builtin_arm_sev: |
8444 | case clang::ARM::BI__sev: |
8445 | Value = 4; |
8446 | break; |
8447 | case clang::ARM::BI__builtin_arm_sevl: |
8448 | case clang::ARM::BI__sevl: |
8449 | Value = 5; |
8450 | break; |
8451 | } |
8452 | |
8453 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::arm_hint), |
8454 | Args: llvm::ConstantInt::get(Ty: Int32Ty, V: Value)); |
8455 | } |
8456 | |
8457 | enum SpecialRegisterAccessKind { |
8458 | NormalRead, |
8459 | VolatileRead, |
8460 | Write, |
8461 | }; |
8462 | |
8463 | // Generates the IR for __builtin_read_exec_*. |
8464 | // Lowers the builtin to amdgcn_ballot intrinsic. |
8465 | static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E, |
8466 | llvm::Type *RegisterType, |
8467 | llvm::Type *ValueType, bool isExecHi) { |
8468 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
8469 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
8470 | |
8471 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_ballot, Tys: {RegisterType}); |
8472 | llvm::Value *Call = Builder.CreateCall(Callee: F, Args: {Builder.getInt1(V: true)}); |
8473 | |
8474 | if (isExecHi) { |
8475 | Value *Rt2 = Builder.CreateLShr(LHS: Call, RHS: 32); |
8476 | Rt2 = Builder.CreateTrunc(V: Rt2, DestTy: CGF.Int32Ty); |
8477 | return Rt2; |
8478 | } |
8479 | |
8480 | return Call; |
8481 | } |
8482 | |
8483 | // Generates the IR for the read/write special register builtin, |
8484 | // ValueType is the type of the value that is to be written or read, |
8485 | // RegisterType is the type of the register being written to or read from. |
8486 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, |
8487 | const CallExpr *E, |
8488 | llvm::Type *RegisterType, |
8489 | llvm::Type *ValueType, |
8490 | SpecialRegisterAccessKind AccessKind, |
8491 | StringRef SysReg = "" ) { |
8492 | // write and register intrinsics only support 32, 64 and 128 bit operations. |
8493 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64) || |
8494 | RegisterType->isIntegerTy(128)) && |
8495 | "Unsupported size for register." ); |
8496 | |
8497 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
8498 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
8499 | LLVMContext &Context = CGM.getLLVMContext(); |
8500 | |
8501 | if (SysReg.empty()) { |
8502 | const Expr *SysRegStrExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
8503 | SysReg = cast<clang::StringLiteral>(Val: SysRegStrExpr)->getString(); |
8504 | } |
8505 | |
8506 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, Str: SysReg) }; |
8507 | llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: Ops); |
8508 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName); |
8509 | |
8510 | llvm::Type *Types[] = { RegisterType }; |
8511 | |
8512 | bool MixedTypes = RegisterType->isIntegerTy(Bitwidth: 64) && ValueType->isIntegerTy(Bitwidth: 32); |
8513 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) |
8514 | && "Can't fit 64-bit value in 32-bit register" ); |
8515 | |
8516 | if (AccessKind != Write) { |
8517 | assert(AccessKind == NormalRead || AccessKind == VolatileRead); |
8518 | llvm::Function *F = CGM.getIntrinsic( |
8519 | IID: AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register |
8520 | : llvm::Intrinsic::read_register, |
8521 | Tys: Types); |
8522 | llvm::Value *Call = Builder.CreateCall(Callee: F, Args: Metadata); |
8523 | |
8524 | if (MixedTypes) |
8525 | // Read into 64 bit register and then truncate result to 32 bit. |
8526 | return Builder.CreateTrunc(V: Call, DestTy: ValueType); |
8527 | |
8528 | if (ValueType->isPointerTy()) |
8529 | // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). |
8530 | return Builder.CreateIntToPtr(V: Call, DestTy: ValueType); |
8531 | |
8532 | return Call; |
8533 | } |
8534 | |
8535 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types); |
8536 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
8537 | if (MixedTypes) { |
8538 | // Extend 32 bit write value to 64 bit to pass to write. |
8539 | ArgValue = Builder.CreateZExt(V: ArgValue, DestTy: RegisterType); |
8540 | return Builder.CreateCall(Callee: F, Args: { Metadata, ArgValue }); |
8541 | } |
8542 | |
8543 | if (ValueType->isPointerTy()) { |
8544 | // Have VoidPtrTy ArgValue but want to return an i32/i64. |
8545 | ArgValue = Builder.CreatePtrToInt(V: ArgValue, DestTy: RegisterType); |
8546 | return Builder.CreateCall(Callee: F, Args: { Metadata, ArgValue }); |
8547 | } |
8548 | |
8549 | return Builder.CreateCall(Callee: F, Args: { Metadata, ArgValue }); |
8550 | } |
8551 | |
8552 | /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra |
8553 | /// argument that specifies the vector type. |
8554 | static bool (unsigned BuiltinID) { |
8555 | switch (BuiltinID) { |
8556 | default: break; |
8557 | case NEON::BI__builtin_neon_vget_lane_i8: |
8558 | case NEON::BI__builtin_neon_vget_lane_i16: |
8559 | case NEON::BI__builtin_neon_vget_lane_bf16: |
8560 | case NEON::BI__builtin_neon_vget_lane_i32: |
8561 | case NEON::BI__builtin_neon_vget_lane_i64: |
8562 | case NEON::BI__builtin_neon_vget_lane_f32: |
8563 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
8564 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
8565 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
8566 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
8567 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
8568 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
8569 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
8570 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
8571 | case NEON::BI__builtin_neon_vset_lane_i8: |
8572 | case NEON::BI__builtin_neon_vset_lane_i16: |
8573 | case NEON::BI__builtin_neon_vset_lane_bf16: |
8574 | case NEON::BI__builtin_neon_vset_lane_i32: |
8575 | case NEON::BI__builtin_neon_vset_lane_i64: |
8576 | case NEON::BI__builtin_neon_vset_lane_f32: |
8577 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
8578 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
8579 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
8580 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
8581 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
8582 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
8583 | case NEON::BI__builtin_neon_vsha1h_u32: |
8584 | case NEON::BI__builtin_neon_vsha1cq_u32: |
8585 | case NEON::BI__builtin_neon_vsha1pq_u32: |
8586 | case NEON::BI__builtin_neon_vsha1mq_u32: |
8587 | case NEON::BI__builtin_neon_vcvth_bf16_f32: |
8588 | case clang::ARM::BI_MoveToCoprocessor: |
8589 | case clang::ARM::BI_MoveToCoprocessor2: |
8590 | return false; |
8591 | } |
8592 | return true; |
8593 | } |
8594 | |
8595 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, |
8596 | const CallExpr *E, |
8597 | ReturnValueSlot ReturnValue, |
8598 | llvm::Triple::ArchType Arch) { |
8599 | if (auto Hint = GetValueForARMHint(BuiltinID)) |
8600 | return Hint; |
8601 | |
8602 | if (BuiltinID == clang::ARM::BI__emit) { |
8603 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; |
8604 | llvm::FunctionType *FTy = |
8605 | llvm::FunctionType::get(Result: VoidTy, /*Variadic=*/isVarArg: false); |
8606 | |
8607 | Expr::EvalResult Result; |
8608 | if (!E->getArg(Arg: 0)->EvaluateAsInt(Result, Ctx: CGM.getContext())) |
8609 | llvm_unreachable("Sema will ensure that the parameter is constant" ); |
8610 | |
8611 | llvm::APSInt Value = Result.Val.getInt(); |
8612 | uint64_t ZExtValue = Value.zextOrTrunc(width: IsThumb ? 16 : 32).getZExtValue(); |
8613 | |
8614 | llvm::InlineAsm *Emit = |
8615 | IsThumb ? InlineAsm::get(Ty: FTy, AsmString: ".inst.n 0x" + utohexstr(X: ZExtValue), Constraints: "" , |
8616 | /*hasSideEffects=*/true) |
8617 | : InlineAsm::get(Ty: FTy, AsmString: ".inst 0x" + utohexstr(X: ZExtValue), Constraints: "" , |
8618 | /*hasSideEffects=*/true); |
8619 | |
8620 | return Builder.CreateCall(Callee: Emit); |
8621 | } |
8622 | |
8623 | if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) { |
8624 | Value *Option = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8625 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::arm_dbg), Args: Option); |
8626 | } |
8627 | |
8628 | if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) { |
8629 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8630 | Value *RW = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8631 | Value *IsData = EmitScalarExpr(E: E->getArg(Arg: 2)); |
8632 | |
8633 | // Locality is not supported on ARM target |
8634 | Value *Locality = llvm::ConstantInt::get(Ty: Int32Ty, V: 3); |
8635 | |
8636 | Function *F = CGM.getIntrinsic(IID: Intrinsic::prefetch, Tys: Address->getType()); |
8637 | return Builder.CreateCall(Callee: F, Args: {Address, RW, Locality, IsData}); |
8638 | } |
8639 | |
8640 | if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) { |
8641 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8642 | return Builder.CreateCall( |
8643 | Callee: CGM.getIntrinsic(IID: Intrinsic::bitreverse, Tys: Arg->getType()), Args: Arg, Name: "rbit" ); |
8644 | } |
8645 | |
8646 | if (BuiltinID == clang::ARM::BI__builtin_arm_clz || |
8647 | BuiltinID == clang::ARM::BI__builtin_arm_clz64) { |
8648 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8649 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Arg->getType()); |
8650 | Value *Res = Builder.CreateCall(Callee: F, Args: {Arg, Builder.getInt1(V: false)}); |
8651 | if (BuiltinID == clang::ARM::BI__builtin_arm_clz64) |
8652 | Res = Builder.CreateTrunc(V: Res, DestTy: Builder.getInt32Ty()); |
8653 | return Res; |
8654 | } |
8655 | |
8656 | |
8657 | if (BuiltinID == clang::ARM::BI__builtin_arm_cls) { |
8658 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8659 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::arm_cls), Args: Arg, Name: "cls" ); |
8660 | } |
8661 | if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) { |
8662 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8663 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::arm_cls64), Args: Arg, |
8664 | Name: "cls" ); |
8665 | } |
8666 | |
8667 | if (BuiltinID == clang::ARM::BI__clear_cache) { |
8668 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments" ); |
8669 | const FunctionDecl *FD = E->getDirectCallee(); |
8670 | Value *Ops[2]; |
8671 | for (unsigned i = 0; i < 2; i++) |
8672 | Ops[i] = EmitScalarExpr(E: E->getArg(Arg: i)); |
8673 | llvm::Type *Ty = CGM.getTypes().ConvertType(T: FD->getType()); |
8674 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Val: Ty); |
8675 | StringRef Name = FD->getName(); |
8676 | return EmitNounwindRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), args: Ops); |
8677 | } |
8678 | |
8679 | if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr || |
8680 | BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) { |
8681 | Function *F; |
8682 | |
8683 | switch (BuiltinID) { |
8684 | default: llvm_unreachable("unexpected builtin" ); |
8685 | case clang::ARM::BI__builtin_arm_mcrr: |
8686 | F = CGM.getIntrinsic(IID: Intrinsic::arm_mcrr); |
8687 | break; |
8688 | case clang::ARM::BI__builtin_arm_mcrr2: |
8689 | F = CGM.getIntrinsic(IID: Intrinsic::arm_mcrr2); |
8690 | break; |
8691 | } |
8692 | |
8693 | // MCRR{2} instruction has 5 operands but |
8694 | // the intrinsic has 4 because Rt and Rt2 |
8695 | // are represented as a single unsigned 64 |
8696 | // bit integer in the intrinsic definition |
8697 | // but internally it's represented as 2 32 |
8698 | // bit integers. |
8699 | |
8700 | Value *Coproc = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8701 | Value *Opc1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8702 | Value *RtAndRt2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
8703 | Value *CRm = EmitScalarExpr(E: E->getArg(Arg: 3)); |
8704 | |
8705 | Value *C1 = llvm::ConstantInt::get(Ty: Int64Ty, V: 32); |
8706 | Value *Rt = Builder.CreateTruncOrBitCast(V: RtAndRt2, DestTy: Int32Ty); |
8707 | Value *Rt2 = Builder.CreateLShr(LHS: RtAndRt2, RHS: C1); |
8708 | Rt2 = Builder.CreateTruncOrBitCast(V: Rt2, DestTy: Int32Ty); |
8709 | |
8710 | return Builder.CreateCall(Callee: F, Args: {Coproc, Opc1, Rt, Rt2, CRm}); |
8711 | } |
8712 | |
8713 | if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc || |
8714 | BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) { |
8715 | Function *F; |
8716 | |
8717 | switch (BuiltinID) { |
8718 | default: llvm_unreachable("unexpected builtin" ); |
8719 | case clang::ARM::BI__builtin_arm_mrrc: |
8720 | F = CGM.getIntrinsic(IID: Intrinsic::arm_mrrc); |
8721 | break; |
8722 | case clang::ARM::BI__builtin_arm_mrrc2: |
8723 | F = CGM.getIntrinsic(IID: Intrinsic::arm_mrrc2); |
8724 | break; |
8725 | } |
8726 | |
8727 | Value *Coproc = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8728 | Value *Opc1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8729 | Value *CRm = EmitScalarExpr(E: E->getArg(Arg: 2)); |
8730 | Value *RtAndRt2 = Builder.CreateCall(Callee: F, Args: {Coproc, Opc1, CRm}); |
8731 | |
8732 | // Returns an unsigned 64 bit integer, represented |
8733 | // as two 32 bit integers. |
8734 | |
8735 | Value *Rt = Builder.CreateExtractValue(Agg: RtAndRt2, Idxs: 1); |
8736 | Value *Rt1 = Builder.CreateExtractValue(Agg: RtAndRt2, Idxs: 0); |
8737 | Rt = Builder.CreateZExt(V: Rt, DestTy: Int64Ty); |
8738 | Rt1 = Builder.CreateZExt(V: Rt1, DestTy: Int64Ty); |
8739 | |
8740 | Value *ShiftCast = llvm::ConstantInt::get(Ty: Int64Ty, V: 32); |
8741 | RtAndRt2 = Builder.CreateShl(LHS: Rt, RHS: ShiftCast, Name: "shl" , HasNUW: true); |
8742 | RtAndRt2 = Builder.CreateOr(LHS: RtAndRt2, RHS: Rt1); |
8743 | |
8744 | return Builder.CreateBitCast(V: RtAndRt2, DestTy: ConvertType(T: E->getType())); |
8745 | } |
8746 | |
8747 | if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd || |
8748 | ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex || |
8749 | BuiltinID == clang::ARM::BI__builtin_arm_ldaex) && |
8750 | getContext().getTypeSize(T: E->getType()) == 64) || |
8751 | BuiltinID == clang::ARM::BI__ldrexd) { |
8752 | Function *F; |
8753 | |
8754 | switch (BuiltinID) { |
8755 | default: llvm_unreachable("unexpected builtin" ); |
8756 | case clang::ARM::BI__builtin_arm_ldaex: |
8757 | F = CGM.getIntrinsic(IID: Intrinsic::arm_ldaexd); |
8758 | break; |
8759 | case clang::ARM::BI__builtin_arm_ldrexd: |
8760 | case clang::ARM::BI__builtin_arm_ldrex: |
8761 | case clang::ARM::BI__ldrexd: |
8762 | F = CGM.getIntrinsic(IID: Intrinsic::arm_ldrexd); |
8763 | break; |
8764 | } |
8765 | |
8766 | Value *LdPtr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8767 | Value *Val = Builder.CreateCall(Callee: F, Args: LdPtr, Name: "ldrexd" ); |
8768 | |
8769 | Value *Val0 = Builder.CreateExtractValue(Agg: Val, Idxs: 1); |
8770 | Value *Val1 = Builder.CreateExtractValue(Agg: Val, Idxs: 0); |
8771 | Val0 = Builder.CreateZExt(V: Val0, DestTy: Int64Ty); |
8772 | Val1 = Builder.CreateZExt(V: Val1, DestTy: Int64Ty); |
8773 | |
8774 | Value *ShiftCst = llvm::ConstantInt::get(Ty: Int64Ty, V: 32); |
8775 | Val = Builder.CreateShl(LHS: Val0, RHS: ShiftCst, Name: "shl" , HasNUW: true /* nuw */); |
8776 | Val = Builder.CreateOr(LHS: Val, RHS: Val1); |
8777 | return Builder.CreateBitCast(V: Val, DestTy: ConvertType(T: E->getType())); |
8778 | } |
8779 | |
8780 | if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex || |
8781 | BuiltinID == clang::ARM::BI__builtin_arm_ldaex) { |
8782 | Value *LoadAddr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8783 | |
8784 | QualType Ty = E->getType(); |
8785 | llvm::Type *RealResTy = ConvertType(T: Ty); |
8786 | llvm::Type *IntTy = |
8787 | llvm::IntegerType::get(C&: getLLVMContext(), NumBits: getContext().getTypeSize(T: Ty)); |
8788 | |
8789 | Function *F = CGM.getIntrinsic( |
8790 | IID: BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex |
8791 | : Intrinsic::arm_ldrex, |
8792 | Tys: UnqualPtrTy); |
8793 | CallInst *Val = Builder.CreateCall(Callee: F, Args: LoadAddr, Name: "ldrex" ); |
8794 | Val->addParamAttr( |
8795 | ArgNo: 0, Attr: Attribute::get(Context&: getLLVMContext(), Kind: Attribute::ElementType, Ty: IntTy)); |
8796 | |
8797 | if (RealResTy->isPointerTy()) |
8798 | return Builder.CreateIntToPtr(V: Val, DestTy: RealResTy); |
8799 | else { |
8800 | llvm::Type *IntResTy = llvm::IntegerType::get( |
8801 | C&: getLLVMContext(), NumBits: CGM.getDataLayout().getTypeSizeInBits(Ty: RealResTy)); |
8802 | return Builder.CreateBitCast(V: Builder.CreateTruncOrBitCast(V: Val, DestTy: IntResTy), |
8803 | DestTy: RealResTy); |
8804 | } |
8805 | } |
8806 | |
8807 | if (BuiltinID == clang::ARM::BI__builtin_arm_strexd || |
8808 | ((BuiltinID == clang::ARM::BI__builtin_arm_stlex || |
8809 | BuiltinID == clang::ARM::BI__builtin_arm_strex) && |
8810 | getContext().getTypeSize(T: E->getArg(Arg: 0)->getType()) == 64)) { |
8811 | Function *F = CGM.getIntrinsic( |
8812 | IID: BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd |
8813 | : Intrinsic::arm_strexd); |
8814 | llvm::Type *STy = llvm::StructType::get(elt1: Int32Ty, elts: Int32Ty); |
8815 | |
8816 | Address Tmp = CreateMemTemp(T: E->getArg(Arg: 0)->getType()); |
8817 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8818 | Builder.CreateStore(Val, Addr: Tmp); |
8819 | |
8820 | Address LdPtr = Tmp.withElementType(ElemTy: STy); |
8821 | Val = Builder.CreateLoad(Addr: LdPtr); |
8822 | |
8823 | Value *Arg0 = Builder.CreateExtractValue(Agg: Val, Idxs: 0); |
8824 | Value *Arg1 = Builder.CreateExtractValue(Agg: Val, Idxs: 1); |
8825 | Value *StPtr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8826 | return Builder.CreateCall(Callee: F, Args: {Arg0, Arg1, StPtr}, Name: "strexd" ); |
8827 | } |
8828 | |
8829 | if (BuiltinID == clang::ARM::BI__builtin_arm_strex || |
8830 | BuiltinID == clang::ARM::BI__builtin_arm_stlex) { |
8831 | Value *StoreVal = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8832 | Value *StoreAddr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8833 | |
8834 | QualType Ty = E->getArg(Arg: 0)->getType(); |
8835 | llvm::Type *StoreTy = |
8836 | llvm::IntegerType::get(C&: getLLVMContext(), NumBits: getContext().getTypeSize(T: Ty)); |
8837 | |
8838 | if (StoreVal->getType()->isPointerTy()) |
8839 | StoreVal = Builder.CreatePtrToInt(V: StoreVal, DestTy: Int32Ty); |
8840 | else { |
8841 | llvm::Type *IntTy = llvm::IntegerType::get( |
8842 | C&: getLLVMContext(), |
8843 | NumBits: CGM.getDataLayout().getTypeSizeInBits(Ty: StoreVal->getType())); |
8844 | StoreVal = Builder.CreateBitCast(V: StoreVal, DestTy: IntTy); |
8845 | StoreVal = Builder.CreateZExtOrBitCast(V: StoreVal, DestTy: Int32Ty); |
8846 | } |
8847 | |
8848 | Function *F = CGM.getIntrinsic( |
8849 | IID: BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex |
8850 | : Intrinsic::arm_strex, |
8851 | Tys: StoreAddr->getType()); |
8852 | |
8853 | CallInst *CI = Builder.CreateCall(Callee: F, Args: {StoreVal, StoreAddr}, Name: "strex" ); |
8854 | CI->addParamAttr( |
8855 | ArgNo: 1, Attr: Attribute::get(Context&: getLLVMContext(), Kind: Attribute::ElementType, Ty: StoreTy)); |
8856 | return CI; |
8857 | } |
8858 | |
8859 | if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) { |
8860 | Function *F = CGM.getIntrinsic(IID: Intrinsic::arm_clrex); |
8861 | return Builder.CreateCall(Callee: F); |
8862 | } |
8863 | |
8864 | // CRC32 |
8865 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
8866 | switch (BuiltinID) { |
8867 | case clang::ARM::BI__builtin_arm_crc32b: |
8868 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; |
8869 | case clang::ARM::BI__builtin_arm_crc32cb: |
8870 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; |
8871 | case clang::ARM::BI__builtin_arm_crc32h: |
8872 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; |
8873 | case clang::ARM::BI__builtin_arm_crc32ch: |
8874 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; |
8875 | case clang::ARM::BI__builtin_arm_crc32w: |
8876 | case clang::ARM::BI__builtin_arm_crc32d: |
8877 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; |
8878 | case clang::ARM::BI__builtin_arm_crc32cw: |
8879 | case clang::ARM::BI__builtin_arm_crc32cd: |
8880 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; |
8881 | } |
8882 | |
8883 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
8884 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
8885 | Value *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
8886 | |
8887 | // crc32{c,}d intrinsics are implemented as two calls to crc32{c,}w |
8888 | // intrinsics, hence we need different codegen for these cases. |
8889 | if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d || |
8890 | BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) { |
8891 | Value *C1 = llvm::ConstantInt::get(Ty: Int64Ty, V: 32); |
8892 | Value *Arg1a = Builder.CreateTruncOrBitCast(V: Arg1, DestTy: Int32Ty); |
8893 | Value *Arg1b = Builder.CreateLShr(LHS: Arg1, RHS: C1); |
8894 | Arg1b = Builder.CreateTruncOrBitCast(V: Arg1b, DestTy: Int32Ty); |
8895 | |
8896 | Function *F = CGM.getIntrinsic(IID: CRCIntrinsicID); |
8897 | Value *Res = Builder.CreateCall(Callee: F, Args: {Arg0, Arg1a}); |
8898 | return Builder.CreateCall(Callee: F, Args: {Res, Arg1b}); |
8899 | } else { |
8900 | Arg1 = Builder.CreateZExtOrBitCast(V: Arg1, DestTy: Int32Ty); |
8901 | |
8902 | Function *F = CGM.getIntrinsic(IID: CRCIntrinsicID); |
8903 | return Builder.CreateCall(Callee: F, Args: {Arg0, Arg1}); |
8904 | } |
8905 | } |
8906 | |
8907 | if (BuiltinID == clang::ARM::BI__builtin_arm_rsr || |
8908 | BuiltinID == clang::ARM::BI__builtin_arm_rsr64 || |
8909 | BuiltinID == clang::ARM::BI__builtin_arm_rsrp || |
8910 | BuiltinID == clang::ARM::BI__builtin_arm_wsr || |
8911 | BuiltinID == clang::ARM::BI__builtin_arm_wsr64 || |
8912 | BuiltinID == clang::ARM::BI__builtin_arm_wsrp) { |
8913 | |
8914 | SpecialRegisterAccessKind AccessKind = Write; |
8915 | if (BuiltinID == clang::ARM::BI__builtin_arm_rsr || |
8916 | BuiltinID == clang::ARM::BI__builtin_arm_rsr64 || |
8917 | BuiltinID == clang::ARM::BI__builtin_arm_rsrp) |
8918 | AccessKind = VolatileRead; |
8919 | |
8920 | bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp || |
8921 | BuiltinID == clang::ARM::BI__builtin_arm_wsrp; |
8922 | |
8923 | bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 || |
8924 | BuiltinID == clang::ARM::BI__builtin_arm_wsr64; |
8925 | |
8926 | llvm::Type *ValueType; |
8927 | llvm::Type *RegisterType; |
8928 | if (IsPointerBuiltin) { |
8929 | ValueType = VoidPtrTy; |
8930 | RegisterType = Int32Ty; |
8931 | } else if (Is64Bit) { |
8932 | ValueType = RegisterType = Int64Ty; |
8933 | } else { |
8934 | ValueType = RegisterType = Int32Ty; |
8935 | } |
8936 | |
8937 | return EmitSpecialRegisterBuiltin(CGF&: *this, E, RegisterType, ValueType, |
8938 | AccessKind); |
8939 | } |
8940 | |
8941 | if (BuiltinID == ARM::BI__builtin_sponentry) { |
8942 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::sponentry, Tys: AllocaInt8PtrTy); |
8943 | return Builder.CreateCall(Callee: F); |
8944 | } |
8945 | |
8946 | // Handle MSVC intrinsics before argument evaluation to prevent double |
8947 | // evaluation. |
8948 | if (std::optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID)) |
8949 | return EmitMSVCBuiltinExpr(BuiltinID: *MsvcIntId, E); |
8950 | |
8951 | // Deal with MVE builtins |
8952 | if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
8953 | return Result; |
8954 | // Handle CDE builtins |
8955 | if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
8956 | return Result; |
8957 | |
8958 | // Some intrinsics are equivalent - if they are use the base intrinsic ID. |
8959 | auto It = llvm::find_if(Range: NEONEquivalentIntrinsicMap, P: [BuiltinID](auto &P) { |
8960 | return P.first == BuiltinID; |
8961 | }); |
8962 | if (It != end(arr: NEONEquivalentIntrinsicMap)) |
8963 | BuiltinID = It->second; |
8964 | |
8965 | // Find out if any arguments are required to be integer constant |
8966 | // expressions. |
8967 | unsigned ICEArguments = 0; |
8968 | ASTContext::GetBuiltinTypeError Error; |
8969 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
8970 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
8971 | |
8972 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
8973 | return Builder.getInt32(C: addr.getAlignment().getQuantity()); |
8974 | }; |
8975 | |
8976 | Address PtrOp0 = Address::invalid(); |
8977 | Address PtrOp1 = Address::invalid(); |
8978 | SmallVector<Value*, 4> Ops; |
8979 | bool = HasExtraNeonArgument(BuiltinID); |
8980 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); |
8981 | for (unsigned i = 0, e = NumArgs; i != e; i++) { |
8982 | if (i == 0) { |
8983 | switch (BuiltinID) { |
8984 | case NEON::BI__builtin_neon_vld1_v: |
8985 | case NEON::BI__builtin_neon_vld1q_v: |
8986 | case NEON::BI__builtin_neon_vld1q_lane_v: |
8987 | case NEON::BI__builtin_neon_vld1_lane_v: |
8988 | case NEON::BI__builtin_neon_vld1_dup_v: |
8989 | case NEON::BI__builtin_neon_vld1q_dup_v: |
8990 | case NEON::BI__builtin_neon_vst1_v: |
8991 | case NEON::BI__builtin_neon_vst1q_v: |
8992 | case NEON::BI__builtin_neon_vst1q_lane_v: |
8993 | case NEON::BI__builtin_neon_vst1_lane_v: |
8994 | case NEON::BI__builtin_neon_vst2_v: |
8995 | case NEON::BI__builtin_neon_vst2q_v: |
8996 | case NEON::BI__builtin_neon_vst2_lane_v: |
8997 | case NEON::BI__builtin_neon_vst2q_lane_v: |
8998 | case NEON::BI__builtin_neon_vst3_v: |
8999 | case NEON::BI__builtin_neon_vst3q_v: |
9000 | case NEON::BI__builtin_neon_vst3_lane_v: |
9001 | case NEON::BI__builtin_neon_vst3q_lane_v: |
9002 | case NEON::BI__builtin_neon_vst4_v: |
9003 | case NEON::BI__builtin_neon_vst4q_v: |
9004 | case NEON::BI__builtin_neon_vst4_lane_v: |
9005 | case NEON::BI__builtin_neon_vst4q_lane_v: |
9006 | // Get the alignment for the argument in addition to the value; |
9007 | // we'll use it later. |
9008 | PtrOp0 = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
9009 | Ops.push_back(Elt: PtrOp0.emitRawPointer(CGF&: *this)); |
9010 | continue; |
9011 | } |
9012 | } |
9013 | if (i == 1) { |
9014 | switch (BuiltinID) { |
9015 | case NEON::BI__builtin_neon_vld2_v: |
9016 | case NEON::BI__builtin_neon_vld2q_v: |
9017 | case NEON::BI__builtin_neon_vld3_v: |
9018 | case NEON::BI__builtin_neon_vld3q_v: |
9019 | case NEON::BI__builtin_neon_vld4_v: |
9020 | case NEON::BI__builtin_neon_vld4q_v: |
9021 | case NEON::BI__builtin_neon_vld2_lane_v: |
9022 | case NEON::BI__builtin_neon_vld2q_lane_v: |
9023 | case NEON::BI__builtin_neon_vld3_lane_v: |
9024 | case NEON::BI__builtin_neon_vld3q_lane_v: |
9025 | case NEON::BI__builtin_neon_vld4_lane_v: |
9026 | case NEON::BI__builtin_neon_vld4q_lane_v: |
9027 | case NEON::BI__builtin_neon_vld2_dup_v: |
9028 | case NEON::BI__builtin_neon_vld2q_dup_v: |
9029 | case NEON::BI__builtin_neon_vld3_dup_v: |
9030 | case NEON::BI__builtin_neon_vld3q_dup_v: |
9031 | case NEON::BI__builtin_neon_vld4_dup_v: |
9032 | case NEON::BI__builtin_neon_vld4q_dup_v: |
9033 | // Get the alignment for the argument in addition to the value; |
9034 | // we'll use it later. |
9035 | PtrOp1 = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
9036 | Ops.push_back(Elt: PtrOp1.emitRawPointer(CGF&: *this)); |
9037 | continue; |
9038 | } |
9039 | } |
9040 | |
9041 | Ops.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E)); |
9042 | } |
9043 | |
9044 | switch (BuiltinID) { |
9045 | default: break; |
9046 | |
9047 | case NEON::BI__builtin_neon_vget_lane_i8: |
9048 | case NEON::BI__builtin_neon_vget_lane_i16: |
9049 | case NEON::BI__builtin_neon_vget_lane_i32: |
9050 | case NEON::BI__builtin_neon_vget_lane_i64: |
9051 | case NEON::BI__builtin_neon_vget_lane_bf16: |
9052 | case NEON::BI__builtin_neon_vget_lane_f32: |
9053 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
9054 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
9055 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
9056 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
9057 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
9058 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
9059 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
9060 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
9061 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: Ops[1], Name: "vget_lane" ); |
9062 | |
9063 | case NEON::BI__builtin_neon_vrndns_f32: { |
9064 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
9065 | llvm::Type *Tys[] = {Arg->getType()}; |
9066 | Function *F = CGM.getIntrinsic(IID: Intrinsic::arm_neon_vrintn, Tys); |
9067 | return Builder.CreateCall(Callee: F, Args: {Arg}, Name: "vrndn" ); } |
9068 | |
9069 | case NEON::BI__builtin_neon_vset_lane_i8: |
9070 | case NEON::BI__builtin_neon_vset_lane_i16: |
9071 | case NEON::BI__builtin_neon_vset_lane_i32: |
9072 | case NEON::BI__builtin_neon_vset_lane_i64: |
9073 | case NEON::BI__builtin_neon_vset_lane_bf16: |
9074 | case NEON::BI__builtin_neon_vset_lane_f32: |
9075 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
9076 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
9077 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
9078 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
9079 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
9080 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
9081 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vset_lane" ); |
9082 | |
9083 | case NEON::BI__builtin_neon_vsha1h_u32: |
9084 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_sha1h), Ops, |
9085 | name: "vsha1h" ); |
9086 | case NEON::BI__builtin_neon_vsha1cq_u32: |
9087 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_sha1c), Ops, |
9088 | name: "vsha1h" ); |
9089 | case NEON::BI__builtin_neon_vsha1pq_u32: |
9090 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_sha1p), Ops, |
9091 | name: "vsha1h" ); |
9092 | case NEON::BI__builtin_neon_vsha1mq_u32: |
9093 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_sha1m), Ops, |
9094 | name: "vsha1h" ); |
9095 | |
9096 | case NEON::BI__builtin_neon_vcvth_bf16_f32: { |
9097 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vcvtbfp2bf), Ops, |
9098 | name: "vcvtbfp2bf" ); |
9099 | } |
9100 | |
9101 | // The ARM _MoveToCoprocessor builtins put the input register value as |
9102 | // the first argument, but the LLVM intrinsic expects it as the third one. |
9103 | case clang::ARM::BI_MoveToCoprocessor: |
9104 | case clang::ARM::BI_MoveToCoprocessor2: { |
9105 | Function *F = CGM.getIntrinsic(IID: BuiltinID == clang::ARM::BI_MoveToCoprocessor |
9106 | ? Intrinsic::arm_mcr |
9107 | : Intrinsic::arm_mcr2); |
9108 | return Builder.CreateCall(Callee: F, Args: {Ops[1], Ops[2], Ops[0], |
9109 | Ops[3], Ops[4], Ops[5]}); |
9110 | } |
9111 | } |
9112 | |
9113 | // Get the last argument, which specifies the vector type. |
9114 | assert(HasExtraArg); |
9115 | const Expr *Arg = E->getArg(Arg: E->getNumArgs()-1); |
9116 | std::optional<llvm::APSInt> Result = |
9117 | Arg->getIntegerConstantExpr(Ctx: getContext()); |
9118 | if (!Result) |
9119 | return nullptr; |
9120 | |
9121 | if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f || |
9122 | BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) { |
9123 | // Determine the overloaded type of this builtin. |
9124 | llvm::Type *Ty; |
9125 | if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f) |
9126 | Ty = FloatTy; |
9127 | else |
9128 | Ty = DoubleTy; |
9129 | |
9130 | // Determine whether this is an unsigned conversion or not. |
9131 | bool usgn = Result->getZExtValue() == 1; |
9132 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; |
9133 | |
9134 | // Call the appropriate intrinsic. |
9135 | Function *F = CGM.getIntrinsic(IID: Int, Tys: Ty); |
9136 | return Builder.CreateCall(Callee: F, Args: Ops, Name: "vcvtr" ); |
9137 | } |
9138 | |
9139 | // Determine the type of this overloaded NEON intrinsic. |
9140 | NeonTypeFlags Type = Result->getZExtValue(); |
9141 | bool usgn = Type.isUnsigned(); |
9142 | bool rightShift = false; |
9143 | |
9144 | llvm::FixedVectorType *VTy = |
9145 | GetNeonType(CGF: this, TypeFlags: Type, HasLegalHalfType: getTarget().hasLegalHalfType(), V1Ty: false, |
9146 | AllowBFloatArgsAndRet: getTarget().hasBFloat16Type()); |
9147 | llvm::Type *Ty = VTy; |
9148 | if (!Ty) |
9149 | return nullptr; |
9150 | |
9151 | // Many NEON builtins have identical semantics and uses in ARM and |
9152 | // AArch64. Emit these in a single function. |
9153 | auto IntrinsicMap = ArrayRef(ARMSIMDIntrinsicMap); |
9154 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
9155 | IntrinsicMap, BuiltinID, MapProvenSorted&: NEONSIMDIntrinsicsProvenSorted); |
9156 | if (Builtin) |
9157 | return EmitCommonNeonBuiltinExpr( |
9158 | BuiltinID: Builtin->BuiltinID, LLVMIntrinsic: Builtin->LLVMIntrinsic, AltLLVMIntrinsic: Builtin->AltLLVMIntrinsic, |
9159 | NameHint: Builtin->NameHint, Modifier: Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); |
9160 | |
9161 | unsigned Int; |
9162 | switch (BuiltinID) { |
9163 | default: return nullptr; |
9164 | case NEON::BI__builtin_neon_vld1q_lane_v: |
9165 | // Handle 64-bit integer elements as a special case. Use shuffles of |
9166 | // one-element vectors to avoid poor code for i64 in the backend. |
9167 | if (VTy->getElementType()->isIntegerTy(Bitwidth: 64)) { |
9168 | // Extract the other lane. |
9169 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
9170 | int Lane = cast<ConstantInt>(Val: Ops[2])->getZExtValue(); |
9171 | Value *SV = llvm::ConstantVector::get(V: ConstantInt::get(Ty: Int32Ty, V: 1-Lane)); |
9172 | Ops[1] = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[1], Mask: SV); |
9173 | // Load the value as a one-element vector. |
9174 | Ty = llvm::FixedVectorType::get(ElementType: VTy->getElementType(), NumElts: 1); |
9175 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
9176 | Function *F = CGM.getIntrinsic(IID: Intrinsic::arm_neon_vld1, Tys); |
9177 | Value *Align = getAlignmentValue32(PtrOp0); |
9178 | Value *Ld = Builder.CreateCall(Callee: F, Args: {Ops[0], Align}); |
9179 | // Combine them. |
9180 | int Indices[] = {1 - Lane, Lane}; |
9181 | return Builder.CreateShuffleVector(V1: Ops[1], V2: Ld, Mask: Indices, Name: "vld1q_lane" ); |
9182 | } |
9183 | [[fallthrough]]; |
9184 | case NEON::BI__builtin_neon_vld1_lane_v: { |
9185 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
9186 | PtrOp0 = PtrOp0.withElementType(ElemTy: VTy->getElementType()); |
9187 | Value *Ld = Builder.CreateLoad(Addr: PtrOp0); |
9188 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ld, Idx: Ops[2], Name: "vld1_lane" ); |
9189 | } |
9190 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
9191 | Int = |
9192 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; |
9193 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqrshrn_n" , |
9194 | shift: 1, rightshift: true); |
9195 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
9196 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vqrshiftnsu, Tys: Ty), |
9197 | Ops, name: "vqrshrun_n" , shift: 1, rightshift: true); |
9198 | case NEON::BI__builtin_neon_vqshrn_n_v: |
9199 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; |
9200 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqshrn_n" , |
9201 | shift: 1, rightshift: true); |
9202 | case NEON::BI__builtin_neon_vqshrun_n_v: |
9203 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vqshiftnsu, Tys: Ty), |
9204 | Ops, name: "vqshrun_n" , shift: 1, rightshift: true); |
9205 | case NEON::BI__builtin_neon_vrecpe_v: |
9206 | case NEON::BI__builtin_neon_vrecpeq_v: |
9207 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vrecpe, Tys: Ty), |
9208 | Ops, name: "vrecpe" ); |
9209 | case NEON::BI__builtin_neon_vrshrn_n_v: |
9210 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vrshiftn, Tys: Ty), |
9211 | Ops, name: "vrshrn_n" , shift: 1, rightshift: true); |
9212 | case NEON::BI__builtin_neon_vrsra_n_v: |
9213 | case NEON::BI__builtin_neon_vrsraq_n_v: |
9214 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
9215 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
9216 | Ops[2] = EmitNeonShiftVector(V: Ops[2], Ty, neg: true); |
9217 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; |
9218 | Ops[1] = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Int, Tys: Ty), Args: {Ops[1], Ops[2]}); |
9219 | return Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1], Name: "vrsra_n" ); |
9220 | case NEON::BI__builtin_neon_vsri_n_v: |
9221 | case NEON::BI__builtin_neon_vsriq_n_v: |
9222 | rightShift = true; |
9223 | [[fallthrough]]; |
9224 | case NEON::BI__builtin_neon_vsli_n_v: |
9225 | case NEON::BI__builtin_neon_vsliq_n_v: |
9226 | Ops[2] = EmitNeonShiftVector(V: Ops[2], Ty, neg: rightShift); |
9227 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vshiftins, Tys: Ty), |
9228 | Ops, name: "vsli_n" ); |
9229 | case NEON::BI__builtin_neon_vsra_n_v: |
9230 | case NEON::BI__builtin_neon_vsraq_n_v: |
9231 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
9232 | Ops[1] = EmitNeonRShiftImm(Vec: Ops[1], Shift: Ops[2], Ty, usgn, name: "vsra_n" ); |
9233 | return Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1]); |
9234 | case NEON::BI__builtin_neon_vst1q_lane_v: |
9235 | // Handle 64-bit integer elements as a special case. Use a shuffle to get |
9236 | // a one-element vector and avoid poor code for i64 in the backend. |
9237 | if (VTy->getElementType()->isIntegerTy(Bitwidth: 64)) { |
9238 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
9239 | Value *SV = llvm::ConstantVector::get(V: cast<llvm::Constant>(Val: Ops[2])); |
9240 | Ops[1] = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[1], Mask: SV); |
9241 | Ops[2] = getAlignmentValue32(PtrOp0); |
9242 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; |
9243 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vst1, |
9244 | Tys), Args: Ops); |
9245 | } |
9246 | [[fallthrough]]; |
9247 | case NEON::BI__builtin_neon_vst1_lane_v: { |
9248 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
9249 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: Ops[2]); |
9250 | return Builder.CreateStore(Val: Ops[1], |
9251 | Addr: PtrOp0.withElementType(ElemTy: Ops[1]->getType())); |
9252 | } |
9253 | case NEON::BI__builtin_neon_vtbl1_v: |
9254 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbl1), |
9255 | Ops, name: "vtbl1" ); |
9256 | case NEON::BI__builtin_neon_vtbl2_v: |
9257 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbl2), |
9258 | Ops, name: "vtbl2" ); |
9259 | case NEON::BI__builtin_neon_vtbl3_v: |
9260 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbl3), |
9261 | Ops, name: "vtbl3" ); |
9262 | case NEON::BI__builtin_neon_vtbl4_v: |
9263 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbl4), |
9264 | Ops, name: "vtbl4" ); |
9265 | case NEON::BI__builtin_neon_vtbx1_v: |
9266 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbx1), |
9267 | Ops, name: "vtbx1" ); |
9268 | case NEON::BI__builtin_neon_vtbx2_v: |
9269 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbx2), |
9270 | Ops, name: "vtbx2" ); |
9271 | case NEON::BI__builtin_neon_vtbx3_v: |
9272 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbx3), |
9273 | Ops, name: "vtbx3" ); |
9274 | case NEON::BI__builtin_neon_vtbx4_v: |
9275 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::arm_neon_vtbx4), |
9276 | Ops, name: "vtbx4" ); |
9277 | } |
9278 | } |
9279 | |
9280 | template<typename Integer> |
9281 | static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { |
9282 | return E->getIntegerConstantExpr(Ctx: Context)->getExtValue(); |
9283 | } |
9284 | |
9285 | static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V, |
9286 | llvm::Type *T, bool Unsigned) { |
9287 | // Helper function called by Tablegen-constructed ARM MVE builtin codegen, |
9288 | // which finds it convenient to specify signed/unsigned as a boolean flag. |
9289 | return Unsigned ? Builder.CreateZExt(V, DestTy: T) : Builder.CreateSExt(V, DestTy: T); |
9290 | } |
9291 | |
9292 | static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V, |
9293 | uint32_t Shift, bool Unsigned) { |
9294 | // MVE helper function for integer shift right. This must handle signed vs |
9295 | // unsigned, and also deal specially with the case where the shift count is |
9296 | // equal to the lane size. In LLVM IR, an LShr with that parameter would be |
9297 | // undefined behavior, but in MVE it's legal, so we must convert it to code |
9298 | // that is not undefined in IR. |
9299 | unsigned LaneBits = cast<llvm::VectorType>(Val: V->getType()) |
9300 | ->getElementType() |
9301 | ->getPrimitiveSizeInBits(); |
9302 | if (Shift == LaneBits) { |
9303 | // An unsigned shift of the full lane size always generates zero, so we can |
9304 | // simply emit a zero vector. A signed shift of the full lane size does the |
9305 | // same thing as shifting by one bit fewer. |
9306 | if (Unsigned) |
9307 | return llvm::Constant::getNullValue(Ty: V->getType()); |
9308 | else |
9309 | --Shift; |
9310 | } |
9311 | return Unsigned ? Builder.CreateLShr(LHS: V, RHS: Shift) : Builder.CreateAShr(LHS: V, RHS: Shift); |
9312 | } |
9313 | |
9314 | static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { |
9315 | // MVE-specific helper function for a vector splat, which infers the element |
9316 | // count of the output vector by knowing that MVE vectors are all 128 bits |
9317 | // wide. |
9318 | unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits(); |
9319 | return Builder.CreateVectorSplat(NumElts: Elements, V); |
9320 | } |
9321 | |
9322 | static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder, |
9323 | CodeGenFunction *CGF, |
9324 | llvm::Value *V, |
9325 | llvm::Type *DestType) { |
9326 | // Convert one MVE vector type into another by reinterpreting its in-register |
9327 | // format. |
9328 | // |
9329 | // Little-endian, this is identical to a bitcast (which reinterprets the |
9330 | // memory format). But big-endian, they're not necessarily the same, because |
9331 | // the register and memory formats map to each other differently depending on |
9332 | // the lane size. |
9333 | // |
9334 | // We generate a bitcast whenever we can (if we're little-endian, or if the |
9335 | // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic |
9336 | // that performs the different kind of reinterpretation. |
9337 | if (CGF->getTarget().isBigEndian() && |
9338 | V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) { |
9339 | return Builder.CreateCall( |
9340 | Callee: CGF->CGM.getIntrinsic(IID: Intrinsic::arm_mve_vreinterpretq, |
9341 | Tys: {DestType, V->getType()}), |
9342 | Args: V); |
9343 | } else { |
9344 | return Builder.CreateBitCast(V, DestTy: DestType); |
9345 | } |
9346 | } |
9347 | |
9348 | static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { |
9349 | // Make a shufflevector that extracts every other element of a vector (evens |
9350 | // or odds, as desired). |
9351 | SmallVector<int, 16> Indices; |
9352 | unsigned InputElements = |
9353 | cast<llvm::FixedVectorType>(Val: V->getType())->getNumElements(); |
9354 | for (unsigned i = 0; i < InputElements; i += 2) |
9355 | Indices.push_back(Elt: i + Odd); |
9356 | return Builder.CreateShuffleVector(V, Mask: Indices); |
9357 | } |
9358 | |
9359 | static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0, |
9360 | llvm::Value *V1) { |
9361 | // Make a shufflevector that interleaves two vectors element by element. |
9362 | assert(V0->getType() == V1->getType() && "Can't zip different vector types" ); |
9363 | SmallVector<int, 16> Indices; |
9364 | unsigned InputElements = |
9365 | cast<llvm::FixedVectorType>(Val: V0->getType())->getNumElements(); |
9366 | for (unsigned i = 0; i < InputElements; i++) { |
9367 | Indices.push_back(Elt: i); |
9368 | Indices.push_back(Elt: i + InputElements); |
9369 | } |
9370 | return Builder.CreateShuffleVector(V1: V0, V2: V1, Mask: Indices); |
9371 | } |
9372 | |
9373 | template<unsigned HighBit, unsigned OtherBits> |
9374 | static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { |
9375 | // MVE-specific helper function to make a vector splat of a constant such as |
9376 | // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. |
9377 | llvm::Type *T = cast<llvm::VectorType>(Val: VT)->getElementType(); |
9378 | unsigned LaneBits = T->getPrimitiveSizeInBits(); |
9379 | uint32_t Value = HighBit << (LaneBits - 1); |
9380 | if (OtherBits) |
9381 | Value |= (1UL << (LaneBits - 1)) - 1; |
9382 | llvm::Value *Lane = llvm::ConstantInt::get(Ty: T, V: Value); |
9383 | return ARMMVEVectorSplat(Builder, V: Lane); |
9384 | } |
9385 | |
9386 | static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder, |
9387 | llvm::Value *V, |
9388 | unsigned ReverseWidth) { |
9389 | // MVE-specific helper function which reverses the elements of a |
9390 | // vector within every (ReverseWidth)-bit collection of lanes. |
9391 | SmallVector<int, 16> Indices; |
9392 | unsigned LaneSize = V->getType()->getScalarSizeInBits(); |
9393 | unsigned Elements = 128 / LaneSize; |
9394 | unsigned Mask = ReverseWidth / LaneSize - 1; |
9395 | for (unsigned i = 0; i < Elements; i++) |
9396 | Indices.push_back(Elt: i ^ Mask); |
9397 | return Builder.CreateShuffleVector(V, Mask: Indices); |
9398 | } |
9399 | |
9400 | Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, |
9401 | const CallExpr *E, |
9402 | ReturnValueSlot ReturnValue, |
9403 | llvm::Triple::ArchType Arch) { |
9404 | enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType; |
9405 | Intrinsic::ID IRIntr; |
9406 | unsigned NumVectors; |
9407 | |
9408 | // Code autogenerated by Tablegen will handle all the simple builtins. |
9409 | switch (BuiltinID) { |
9410 | #include "clang/Basic/arm_mve_builtin_cg.inc" |
9411 | |
9412 | // If we didn't match an MVE builtin id at all, go back to the |
9413 | // main EmitARMBuiltinExpr. |
9414 | default: |
9415 | return nullptr; |
9416 | } |
9417 | |
9418 | // Anything that breaks from that switch is an MVE builtin that |
9419 | // needs handwritten code to generate. |
9420 | |
9421 | switch (CustomCodeGenType) { |
9422 | |
9423 | case CustomCodeGen::VLD24: { |
9424 | llvm::SmallVector<Value *, 4> Ops; |
9425 | llvm::SmallVector<llvm::Type *, 4> Tys; |
9426 | |
9427 | auto MvecCType = E->getType(); |
9428 | auto MvecLType = ConvertType(T: MvecCType); |
9429 | assert(MvecLType->isStructTy() && |
9430 | "Return type for vld[24]q should be a struct" ); |
9431 | assert(MvecLType->getStructNumElements() == 1 && |
9432 | "Return-type struct for vld[24]q should have one element" ); |
9433 | auto MvecLTypeInner = MvecLType->getStructElementType(N: 0); |
9434 | assert(MvecLTypeInner->isArrayTy() && |
9435 | "Return-type struct for vld[24]q should contain an array" ); |
9436 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors && |
9437 | "Array member of return-type struct vld[24]q has wrong length" ); |
9438 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
9439 | |
9440 | Tys.push_back(Elt: VecLType); |
9441 | |
9442 | auto Addr = E->getArg(Arg: 0); |
9443 | Ops.push_back(Elt: EmitScalarExpr(E: Addr)); |
9444 | Tys.push_back(Elt: ConvertType(T: Addr->getType())); |
9445 | |
9446 | Function *F = CGM.getIntrinsic(IID: IRIntr, Tys: ArrayRef(Tys)); |
9447 | Value *LoadResult = Builder.CreateCall(Callee: F, Args: Ops); |
9448 | Value *MvecOut = PoisonValue::get(T: MvecLType); |
9449 | for (unsigned i = 0; i < NumVectors; ++i) { |
9450 | Value *Vec = Builder.CreateExtractValue(Agg: LoadResult, Idxs: i); |
9451 | MvecOut = Builder.CreateInsertValue(Agg: MvecOut, Val: Vec, Idxs: {0, i}); |
9452 | } |
9453 | |
9454 | if (ReturnValue.isNull()) |
9455 | return MvecOut; |
9456 | else |
9457 | return Builder.CreateStore(Val: MvecOut, Addr: ReturnValue.getAddress()); |
9458 | } |
9459 | |
9460 | case CustomCodeGen::VST24: { |
9461 | llvm::SmallVector<Value *, 4> Ops; |
9462 | llvm::SmallVector<llvm::Type *, 4> Tys; |
9463 | |
9464 | auto Addr = E->getArg(Arg: 0); |
9465 | Ops.push_back(Elt: EmitScalarExpr(E: Addr)); |
9466 | Tys.push_back(Elt: ConvertType(T: Addr->getType())); |
9467 | |
9468 | auto MvecCType = E->getArg(Arg: 1)->getType(); |
9469 | auto MvecLType = ConvertType(T: MvecCType); |
9470 | assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct" ); |
9471 | assert(MvecLType->getStructNumElements() == 1 && |
9472 | "Data-type struct for vst2q should have one element" ); |
9473 | auto MvecLTypeInner = MvecLType->getStructElementType(N: 0); |
9474 | assert(MvecLTypeInner->isArrayTy() && |
9475 | "Data-type struct for vst2q should contain an array" ); |
9476 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors && |
9477 | "Array member of return-type struct vld[24]q has wrong length" ); |
9478 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
9479 | |
9480 | Tys.push_back(Elt: VecLType); |
9481 | |
9482 | AggValueSlot MvecSlot = CreateAggTemp(T: MvecCType); |
9483 | EmitAggExpr(E: E->getArg(Arg: 1), AS: MvecSlot); |
9484 | auto Mvec = Builder.CreateLoad(Addr: MvecSlot.getAddress()); |
9485 | for (unsigned i = 0; i < NumVectors; i++) |
9486 | Ops.push_back(Elt: Builder.CreateExtractValue(Agg: Mvec, Idxs: {0, i})); |
9487 | |
9488 | Function *F = CGM.getIntrinsic(IID: IRIntr, Tys: ArrayRef(Tys)); |
9489 | Value *ToReturn = nullptr; |
9490 | for (unsigned i = 0; i < NumVectors; i++) { |
9491 | Ops.push_back(Elt: llvm::ConstantInt::get(Ty: Int32Ty, V: i)); |
9492 | ToReturn = Builder.CreateCall(Callee: F, Args: Ops); |
9493 | Ops.pop_back(); |
9494 | } |
9495 | return ToReturn; |
9496 | } |
9497 | } |
9498 | llvm_unreachable("unknown custom codegen type." ); |
9499 | } |
9500 | |
9501 | Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID, |
9502 | const CallExpr *E, |
9503 | ReturnValueSlot ReturnValue, |
9504 | llvm::Triple::ArchType Arch) { |
9505 | switch (BuiltinID) { |
9506 | default: |
9507 | return nullptr; |
9508 | #include "clang/Basic/arm_cde_builtin_cg.inc" |
9509 | } |
9510 | } |
9511 | |
9512 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, |
9513 | const CallExpr *E, |
9514 | SmallVectorImpl<Value *> &Ops, |
9515 | llvm::Triple::ArchType Arch) { |
9516 | unsigned int Int = 0; |
9517 | const char *s = nullptr; |
9518 | |
9519 | switch (BuiltinID) { |
9520 | default: |
9521 | return nullptr; |
9522 | case NEON::BI__builtin_neon_vtbl1_v: |
9523 | case NEON::BI__builtin_neon_vqtbl1_v: |
9524 | case NEON::BI__builtin_neon_vqtbl1q_v: |
9525 | case NEON::BI__builtin_neon_vtbl2_v: |
9526 | case NEON::BI__builtin_neon_vqtbl2_v: |
9527 | case NEON::BI__builtin_neon_vqtbl2q_v: |
9528 | case NEON::BI__builtin_neon_vtbl3_v: |
9529 | case NEON::BI__builtin_neon_vqtbl3_v: |
9530 | case NEON::BI__builtin_neon_vqtbl3q_v: |
9531 | case NEON::BI__builtin_neon_vtbl4_v: |
9532 | case NEON::BI__builtin_neon_vqtbl4_v: |
9533 | case NEON::BI__builtin_neon_vqtbl4q_v: |
9534 | break; |
9535 | case NEON::BI__builtin_neon_vtbx1_v: |
9536 | case NEON::BI__builtin_neon_vqtbx1_v: |
9537 | case NEON::BI__builtin_neon_vqtbx1q_v: |
9538 | case NEON::BI__builtin_neon_vtbx2_v: |
9539 | case NEON::BI__builtin_neon_vqtbx2_v: |
9540 | case NEON::BI__builtin_neon_vqtbx2q_v: |
9541 | case NEON::BI__builtin_neon_vtbx3_v: |
9542 | case NEON::BI__builtin_neon_vqtbx3_v: |
9543 | case NEON::BI__builtin_neon_vqtbx3q_v: |
9544 | case NEON::BI__builtin_neon_vtbx4_v: |
9545 | case NEON::BI__builtin_neon_vqtbx4_v: |
9546 | case NEON::BI__builtin_neon_vqtbx4q_v: |
9547 | break; |
9548 | } |
9549 | |
9550 | assert(E->getNumArgs() >= 3); |
9551 | |
9552 | // Get the last argument, which specifies the vector type. |
9553 | const Expr *Arg = E->getArg(Arg: E->getNumArgs() - 1); |
9554 | std::optional<llvm::APSInt> Result = |
9555 | Arg->getIntegerConstantExpr(Ctx: CGF.getContext()); |
9556 | if (!Result) |
9557 | return nullptr; |
9558 | |
9559 | // Determine the type of this overloaded NEON intrinsic. |
9560 | NeonTypeFlags Type = Result->getZExtValue(); |
9561 | llvm::FixedVectorType *Ty = GetNeonType(CGF: &CGF, TypeFlags: Type); |
9562 | if (!Ty) |
9563 | return nullptr; |
9564 | |
9565 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
9566 | |
9567 | // AArch64 scalar builtins are not overloaded, they do not have an extra |
9568 | // argument that specifies the vector type, need to handle each case. |
9569 | switch (BuiltinID) { |
9570 | case NEON::BI__builtin_neon_vtbl1_v: { |
9571 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 0, M: 1), ExtOp: nullptr, IndexOp: Ops[1], |
9572 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbl1, Name: "vtbl1" ); |
9573 | } |
9574 | case NEON::BI__builtin_neon_vtbl2_v: { |
9575 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 0, M: 2), ExtOp: nullptr, IndexOp: Ops[2], |
9576 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbl1, Name: "vtbl1" ); |
9577 | } |
9578 | case NEON::BI__builtin_neon_vtbl3_v: { |
9579 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 0, M: 3), ExtOp: nullptr, IndexOp: Ops[3], |
9580 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbl2, Name: "vtbl2" ); |
9581 | } |
9582 | case NEON::BI__builtin_neon_vtbl4_v: { |
9583 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 0, M: 4), ExtOp: nullptr, IndexOp: Ops[4], |
9584 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbl2, Name: "vtbl2" ); |
9585 | } |
9586 | case NEON::BI__builtin_neon_vtbx1_v: { |
9587 | Value *TblRes = |
9588 | packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 1, M: 1), ExtOp: nullptr, IndexOp: Ops[2], ResTy: Ty, |
9589 | IntID: Intrinsic::aarch64_neon_tbl1, Name: "vtbl1" ); |
9590 | |
9591 | llvm::Constant *EightV = ConstantInt::get(Ty, V: 8); |
9592 | Value *CmpRes = Builder.CreateICmp(P: ICmpInst::ICMP_UGE, LHS: Ops[2], RHS: EightV); |
9593 | CmpRes = Builder.CreateSExt(V: CmpRes, DestTy: Ty); |
9594 | |
9595 | Value *EltsFromInput = Builder.CreateAnd(LHS: CmpRes, RHS: Ops[0]); |
9596 | Value *EltsFromTbl = Builder.CreateAnd(LHS: Builder.CreateNot(V: CmpRes), RHS: TblRes); |
9597 | return Builder.CreateOr(LHS: EltsFromInput, RHS: EltsFromTbl, Name: "vtbx" ); |
9598 | } |
9599 | case NEON::BI__builtin_neon_vtbx2_v: { |
9600 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 1, M: 2), ExtOp: Ops[0], IndexOp: Ops[3], |
9601 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbx1, Name: "vtbx1" ); |
9602 | } |
9603 | case NEON::BI__builtin_neon_vtbx3_v: { |
9604 | Value *TblRes = |
9605 | packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 1, M: 3), ExtOp: nullptr, IndexOp: Ops[4], ResTy: Ty, |
9606 | IntID: Intrinsic::aarch64_neon_tbl2, Name: "vtbl2" ); |
9607 | |
9608 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, V: 24); |
9609 | Value *CmpRes = Builder.CreateICmp(P: ICmpInst::ICMP_UGE, LHS: Ops[4], |
9610 | RHS: TwentyFourV); |
9611 | CmpRes = Builder.CreateSExt(V: CmpRes, DestTy: Ty); |
9612 | |
9613 | Value *EltsFromInput = Builder.CreateAnd(LHS: CmpRes, RHS: Ops[0]); |
9614 | Value *EltsFromTbl = Builder.CreateAnd(LHS: Builder.CreateNot(V: CmpRes), RHS: TblRes); |
9615 | return Builder.CreateOr(LHS: EltsFromInput, RHS: EltsFromTbl, Name: "vtbx" ); |
9616 | } |
9617 | case NEON::BI__builtin_neon_vtbx4_v: { |
9618 | return packTBLDVectorList(CGF, Ops: ArrayRef(Ops).slice(N: 1, M: 4), ExtOp: Ops[0], IndexOp: Ops[5], |
9619 | ResTy: Ty, IntID: Intrinsic::aarch64_neon_tbx2, Name: "vtbx2" ); |
9620 | } |
9621 | case NEON::BI__builtin_neon_vqtbl1_v: |
9622 | case NEON::BI__builtin_neon_vqtbl1q_v: |
9623 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1" ; break; |
9624 | case NEON::BI__builtin_neon_vqtbl2_v: |
9625 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
9626 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2" ; break; |
9627 | case NEON::BI__builtin_neon_vqtbl3_v: |
9628 | case NEON::BI__builtin_neon_vqtbl3q_v: |
9629 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3" ; break; |
9630 | case NEON::BI__builtin_neon_vqtbl4_v: |
9631 | case NEON::BI__builtin_neon_vqtbl4q_v: |
9632 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4" ; break; |
9633 | case NEON::BI__builtin_neon_vqtbx1_v: |
9634 | case NEON::BI__builtin_neon_vqtbx1q_v: |
9635 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1" ; break; |
9636 | case NEON::BI__builtin_neon_vqtbx2_v: |
9637 | case NEON::BI__builtin_neon_vqtbx2q_v: |
9638 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2" ; break; |
9639 | case NEON::BI__builtin_neon_vqtbx3_v: |
9640 | case NEON::BI__builtin_neon_vqtbx3q_v: |
9641 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3" ; break; |
9642 | case NEON::BI__builtin_neon_vqtbx4_v: |
9643 | case NEON::BI__builtin_neon_vqtbx4q_v: |
9644 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4" ; break; |
9645 | } |
9646 | } |
9647 | |
9648 | if (!Int) |
9649 | return nullptr; |
9650 | |
9651 | Function *F = CGF.CGM.getIntrinsic(IID: Int, Tys: Ty); |
9652 | return CGF.EmitNeonCall(F, Ops, name: s); |
9653 | } |
9654 | |
9655 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { |
9656 | auto *VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
9657 | Op = Builder.CreateBitCast(V: Op, DestTy: Int16Ty); |
9658 | Value *V = PoisonValue::get(T: VTy); |
9659 | llvm::Constant *CI = ConstantInt::get(Ty: SizeTy, V: 0); |
9660 | Op = Builder.CreateInsertElement(Vec: V, NewElt: Op, Idx: CI); |
9661 | return Op; |
9662 | } |
9663 | |
9664 | /// SVEBuiltinMemEltTy - Returns the memory element type for this memory |
9665 | /// access builtin. Only required if it can't be inferred from the base pointer |
9666 | /// operand. |
9667 | llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) { |
9668 | switch (TypeFlags.getMemEltType()) { |
9669 | case SVETypeFlags::MemEltTyDefault: |
9670 | return getEltType(TypeFlags); |
9671 | case SVETypeFlags::MemEltTyInt8: |
9672 | return Builder.getInt8Ty(); |
9673 | case SVETypeFlags::MemEltTyInt16: |
9674 | return Builder.getInt16Ty(); |
9675 | case SVETypeFlags::MemEltTyInt32: |
9676 | return Builder.getInt32Ty(); |
9677 | case SVETypeFlags::MemEltTyInt64: |
9678 | return Builder.getInt64Ty(); |
9679 | } |
9680 | llvm_unreachable("Unknown MemEltType" ); |
9681 | } |
9682 | |
9683 | llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) { |
9684 | switch (TypeFlags.getEltType()) { |
9685 | default: |
9686 | llvm_unreachable("Invalid SVETypeFlag!" ); |
9687 | |
9688 | case SVETypeFlags::EltTyInt8: |
9689 | return Builder.getInt8Ty(); |
9690 | case SVETypeFlags::EltTyInt16: |
9691 | return Builder.getInt16Ty(); |
9692 | case SVETypeFlags::EltTyInt32: |
9693 | return Builder.getInt32Ty(); |
9694 | case SVETypeFlags::EltTyInt64: |
9695 | return Builder.getInt64Ty(); |
9696 | case SVETypeFlags::EltTyInt128: |
9697 | return Builder.getInt128Ty(); |
9698 | |
9699 | case SVETypeFlags::EltTyFloat16: |
9700 | return Builder.getHalfTy(); |
9701 | case SVETypeFlags::EltTyFloat32: |
9702 | return Builder.getFloatTy(); |
9703 | case SVETypeFlags::EltTyFloat64: |
9704 | return Builder.getDoubleTy(); |
9705 | |
9706 | case SVETypeFlags::EltTyBFloat16: |
9707 | return Builder.getBFloatTy(); |
9708 | |
9709 | case SVETypeFlags::EltTyBool8: |
9710 | case SVETypeFlags::EltTyBool16: |
9711 | case SVETypeFlags::EltTyBool32: |
9712 | case SVETypeFlags::EltTyBool64: |
9713 | return Builder.getInt1Ty(); |
9714 | } |
9715 | } |
9716 | |
9717 | // Return the llvm predicate vector type corresponding to the specified element |
9718 | // TypeFlags. |
9719 | llvm::ScalableVectorType * |
9720 | CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) { |
9721 | switch (TypeFlags.getEltType()) { |
9722 | default: llvm_unreachable("Unhandled SVETypeFlag!" ); |
9723 | |
9724 | case SVETypeFlags::EltTyInt8: |
9725 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 16); |
9726 | case SVETypeFlags::EltTyInt16: |
9727 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 8); |
9728 | case SVETypeFlags::EltTyInt32: |
9729 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 4); |
9730 | case SVETypeFlags::EltTyInt64: |
9731 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 2); |
9732 | |
9733 | case SVETypeFlags::EltTyBFloat16: |
9734 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 8); |
9735 | case SVETypeFlags::EltTyFloat16: |
9736 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 8); |
9737 | case SVETypeFlags::EltTyFloat32: |
9738 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 4); |
9739 | case SVETypeFlags::EltTyFloat64: |
9740 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 2); |
9741 | |
9742 | case SVETypeFlags::EltTyBool8: |
9743 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 16); |
9744 | case SVETypeFlags::EltTyBool16: |
9745 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 8); |
9746 | case SVETypeFlags::EltTyBool32: |
9747 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 4); |
9748 | case SVETypeFlags::EltTyBool64: |
9749 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 2); |
9750 | } |
9751 | } |
9752 | |
9753 | // Return the llvm vector type corresponding to the specified element TypeFlags. |
9754 | llvm::ScalableVectorType * |
9755 | CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { |
9756 | switch (TypeFlags.getEltType()) { |
9757 | default: |
9758 | llvm_unreachable("Invalid SVETypeFlag!" ); |
9759 | |
9760 | case SVETypeFlags::EltTyInt8: |
9761 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt8Ty(), MinNumElts: 16); |
9762 | case SVETypeFlags::EltTyInt16: |
9763 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt16Ty(), MinNumElts: 8); |
9764 | case SVETypeFlags::EltTyInt32: |
9765 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt32Ty(), MinNumElts: 4); |
9766 | case SVETypeFlags::EltTyInt64: |
9767 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt64Ty(), MinNumElts: 2); |
9768 | |
9769 | case SVETypeFlags::EltTyFloat16: |
9770 | return llvm::ScalableVectorType::get(ElementType: Builder.getHalfTy(), MinNumElts: 8); |
9771 | case SVETypeFlags::EltTyBFloat16: |
9772 | return llvm::ScalableVectorType::get(ElementType: Builder.getBFloatTy(), MinNumElts: 8); |
9773 | case SVETypeFlags::EltTyFloat32: |
9774 | return llvm::ScalableVectorType::get(ElementType: Builder.getFloatTy(), MinNumElts: 4); |
9775 | case SVETypeFlags::EltTyFloat64: |
9776 | return llvm::ScalableVectorType::get(ElementType: Builder.getDoubleTy(), MinNumElts: 2); |
9777 | |
9778 | case SVETypeFlags::EltTyBool8: |
9779 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 16); |
9780 | case SVETypeFlags::EltTyBool16: |
9781 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 8); |
9782 | case SVETypeFlags::EltTyBool32: |
9783 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 4); |
9784 | case SVETypeFlags::EltTyBool64: |
9785 | return llvm::ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 2); |
9786 | } |
9787 | } |
9788 | |
9789 | llvm::Value * |
9790 | CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) { |
9791 | Function *Ptrue = |
9792 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_ptrue, Tys: getSVEPredType(TypeFlags)); |
9793 | return Builder.CreateCall(Callee: Ptrue, Args: {Builder.getInt32(/*SV_ALL*/ C: 31)}); |
9794 | } |
9795 | |
9796 | constexpr unsigned SVEBitsPerBlock = 128; |
9797 | |
9798 | static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) { |
9799 | unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits(); |
9800 | return llvm::ScalableVectorType::get(ElementType: EltTy, MinNumElts: NumElts); |
9801 | } |
9802 | |
9803 | // Reinterpret the input predicate so that it can be used to correctly isolate |
9804 | // the elements of the specified datatype. |
9805 | Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred, |
9806 | llvm::ScalableVectorType *VTy) { |
9807 | |
9808 | if (isa<TargetExtType>(Val: Pred->getType()) && |
9809 | cast<TargetExtType>(Val: Pred->getType())->getName() == "aarch64.svcount" ) |
9810 | return Pred; |
9811 | |
9812 | auto *RTy = llvm::VectorType::get(ElementType: IntegerType::get(C&: getLLVMContext(), NumBits: 1), Other: VTy); |
9813 | if (Pred->getType() == RTy) |
9814 | return Pred; |
9815 | |
9816 | unsigned IntID; |
9817 | llvm::Type *IntrinsicTy; |
9818 | switch (VTy->getMinNumElements()) { |
9819 | default: |
9820 | llvm_unreachable("unsupported element count!" ); |
9821 | case 1: |
9822 | case 2: |
9823 | case 4: |
9824 | case 8: |
9825 | IntID = Intrinsic::aarch64_sve_convert_from_svbool; |
9826 | IntrinsicTy = RTy; |
9827 | break; |
9828 | case 16: |
9829 | IntID = Intrinsic::aarch64_sve_convert_to_svbool; |
9830 | IntrinsicTy = Pred->getType(); |
9831 | break; |
9832 | } |
9833 | |
9834 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: IntrinsicTy); |
9835 | Value *C = Builder.CreateCall(Callee: F, Args: Pred); |
9836 | assert(C->getType() == RTy && "Unexpected return type!" ); |
9837 | return C; |
9838 | } |
9839 | |
9840 | Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, |
9841 | SmallVectorImpl<Value *> &Ops, |
9842 | unsigned IntID) { |
9843 | auto *ResultTy = getSVEType(TypeFlags); |
9844 | auto *OverloadedTy = |
9845 | llvm::ScalableVectorType::get(ElementType: SVEBuiltinMemEltTy(TypeFlags), SVTy: ResultTy); |
9846 | |
9847 | Function *F = nullptr; |
9848 | if (Ops[1]->getType()->isVectorTy()) |
9849 | // This is the "vector base, scalar offset" case. In order to uniquely |
9850 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
9851 | // and the type of the vector base. |
9852 | F = CGM.getIntrinsic(IID: IntID, Tys: {OverloadedTy, Ops[1]->getType()}); |
9853 | else |
9854 | // This is the "scalar base, vector offset case". The type of the offset |
9855 | // is encoded in the name of the intrinsic. We only need to specify the |
9856 | // return type in order to uniquely map this built-in to an LLVM IR |
9857 | // intrinsic. |
9858 | F = CGM.getIntrinsic(IID: IntID, Tys: OverloadedTy); |
9859 | |
9860 | // At the ACLE level there's only one predicate type, svbool_t, which is |
9861 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
9862 | // actual type being loaded. For example, when loading doubles (i64) the |
9863 | // predicate should be <n x 2 x i1> instead. At the IR level the type of |
9864 | // the predicate and the data being loaded must match. Cast to the type |
9865 | // expected by the intrinsic. The intrinsic itself should be defined in |
9866 | // a way than enforces relations between parameter types. |
9867 | Ops[0] = EmitSVEPredicateCast( |
9868 | Pred: Ops[0], VTy: cast<llvm::ScalableVectorType>(Val: F->getArg(i: 0)->getType())); |
9869 | |
9870 | // Pass 0 when the offset is missing. This can only be applied when using |
9871 | // the "vector base" addressing mode for which ACLE allows no offset. The |
9872 | // corresponding LLVM IR always requires an offset. |
9873 | if (Ops.size() == 2) { |
9874 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset" ); |
9875 | Ops.push_back(Elt: ConstantInt::get(Ty: Int64Ty, V: 0)); |
9876 | } |
9877 | |
9878 | // For "vector base, scalar index" scale the index so that it becomes a |
9879 | // scalar offset. |
9880 | if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) { |
9881 | unsigned BytesPerElt = |
9882 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
9883 | Ops[2] = Builder.CreateShl(LHS: Ops[2], RHS: Log2_32(Value: BytesPerElt)); |
9884 | } |
9885 | |
9886 | Value *Call = Builder.CreateCall(Callee: F, Args: Ops); |
9887 | |
9888 | // The following sext/zext is only needed when ResultTy != OverloadedTy. In |
9889 | // other cases it's folded into a nop. |
9890 | return TypeFlags.isZExtReturn() ? Builder.CreateZExt(V: Call, DestTy: ResultTy) |
9891 | : Builder.CreateSExt(V: Call, DestTy: ResultTy); |
9892 | } |
9893 | |
9894 | Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags, |
9895 | SmallVectorImpl<Value *> &Ops, |
9896 | unsigned IntID) { |
9897 | auto *SrcDataTy = getSVEType(TypeFlags); |
9898 | auto *OverloadedTy = |
9899 | llvm::ScalableVectorType::get(ElementType: SVEBuiltinMemEltTy(TypeFlags), SVTy: SrcDataTy); |
9900 | |
9901 | // In ACLE the source data is passed in the last argument, whereas in LLVM IR |
9902 | // it's the first argument. Move it accordingly. |
9903 | Ops.insert(I: Ops.begin(), Elt: Ops.pop_back_val()); |
9904 | |
9905 | Function *F = nullptr; |
9906 | if (Ops[2]->getType()->isVectorTy()) |
9907 | // This is the "vector base, scalar offset" case. In order to uniquely |
9908 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
9909 | // and the type of the vector base. |
9910 | F = CGM.getIntrinsic(IID: IntID, Tys: {OverloadedTy, Ops[2]->getType()}); |
9911 | else |
9912 | // This is the "scalar base, vector offset case". The type of the offset |
9913 | // is encoded in the name of the intrinsic. We only need to specify the |
9914 | // return type in order to uniquely map this built-in to an LLVM IR |
9915 | // intrinsic. |
9916 | F = CGM.getIntrinsic(IID: IntID, Tys: OverloadedTy); |
9917 | |
9918 | // Pass 0 when the offset is missing. This can only be applied when using |
9919 | // the "vector base" addressing mode for which ACLE allows no offset. The |
9920 | // corresponding LLVM IR always requires an offset. |
9921 | if (Ops.size() == 3) { |
9922 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset" ); |
9923 | Ops.push_back(Elt: ConstantInt::get(Ty: Int64Ty, V: 0)); |
9924 | } |
9925 | |
9926 | // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's |
9927 | // folded into a nop. |
9928 | Ops[0] = Builder.CreateTrunc(V: Ops[0], DestTy: OverloadedTy); |
9929 | |
9930 | // At the ACLE level there's only one predicate type, svbool_t, which is |
9931 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
9932 | // actual type being stored. For example, when storing doubles (i64) the |
9933 | // predicated should be <n x 2 x i1> instead. At the IR level the type of |
9934 | // the predicate and the data being stored must match. Cast to the type |
9935 | // expected by the intrinsic. The intrinsic itself should be defined in |
9936 | // a way that enforces relations between parameter types. |
9937 | Ops[1] = EmitSVEPredicateCast( |
9938 | Pred: Ops[1], VTy: cast<llvm::ScalableVectorType>(Val: F->getArg(i: 1)->getType())); |
9939 | |
9940 | // For "vector base, scalar index" scale the index so that it becomes a |
9941 | // scalar offset. |
9942 | if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) { |
9943 | unsigned BytesPerElt = |
9944 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
9945 | Ops[3] = Builder.CreateShl(LHS: Ops[3], RHS: Log2_32(Value: BytesPerElt)); |
9946 | } |
9947 | |
9948 | return Builder.CreateCall(Callee: F, Args: Ops); |
9949 | } |
9950 | |
9951 | Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, |
9952 | SmallVectorImpl<Value *> &Ops, |
9953 | unsigned IntID) { |
9954 | // The gather prefetches are overloaded on the vector input - this can either |
9955 | // be the vector of base addresses or vector of offsets. |
9956 | auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Val: Ops[1]->getType()); |
9957 | if (!OverloadedTy) |
9958 | OverloadedTy = cast<llvm::ScalableVectorType>(Val: Ops[2]->getType()); |
9959 | |
9960 | // Cast the predicate from svbool_t to the right number of elements. |
9961 | Ops[0] = EmitSVEPredicateCast(Pred: Ops[0], VTy: OverloadedTy); |
9962 | |
9963 | // vector + imm addressing modes |
9964 | if (Ops[1]->getType()->isVectorTy()) { |
9965 | if (Ops.size() == 3) { |
9966 | // Pass 0 for 'vector+imm' when the index is omitted. |
9967 | Ops.push_back(Elt: ConstantInt::get(Ty: Int64Ty, V: 0)); |
9968 | |
9969 | // The sv_prfop is the last operand in the builtin and IR intrinsic. |
9970 | std::swap(a&: Ops[2], b&: Ops[3]); |
9971 | } else { |
9972 | // Index needs to be passed as scaled offset. |
9973 | llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
9974 | unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8; |
9975 | if (BytesPerElt > 1) |
9976 | Ops[2] = Builder.CreateShl(LHS: Ops[2], RHS: Log2_32(Value: BytesPerElt)); |
9977 | } |
9978 | } |
9979 | |
9980 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: OverloadedTy); |
9981 | return Builder.CreateCall(Callee: F, Args: Ops); |
9982 | } |
9983 | |
9984 | Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags, |
9985 | SmallVectorImpl<Value*> &Ops, |
9986 | unsigned IntID) { |
9987 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
9988 | |
9989 | unsigned N; |
9990 | switch (IntID) { |
9991 | case Intrinsic::aarch64_sve_ld2_sret: |
9992 | case Intrinsic::aarch64_sve_ld1_pn_x2: |
9993 | case Intrinsic::aarch64_sve_ldnt1_pn_x2: |
9994 | case Intrinsic::aarch64_sve_ld2q_sret: |
9995 | N = 2; |
9996 | break; |
9997 | case Intrinsic::aarch64_sve_ld3_sret: |
9998 | case Intrinsic::aarch64_sve_ld3q_sret: |
9999 | N = 3; |
10000 | break; |
10001 | case Intrinsic::aarch64_sve_ld4_sret: |
10002 | case Intrinsic::aarch64_sve_ld1_pn_x4: |
10003 | case Intrinsic::aarch64_sve_ldnt1_pn_x4: |
10004 | case Intrinsic::aarch64_sve_ld4q_sret: |
10005 | N = 4; |
10006 | break; |
10007 | default: |
10008 | llvm_unreachable("unknown intrinsic!" ); |
10009 | } |
10010 | auto RetTy = llvm::VectorType::get(ElementType: VTy->getElementType(), |
10011 | EC: VTy->getElementCount() * N); |
10012 | |
10013 | Value *Predicate = EmitSVEPredicateCast(Pred: Ops[0], VTy); |
10014 | Value *BasePtr = Ops[1]; |
10015 | |
10016 | // Does the load have an offset? |
10017 | if (Ops.size() > 2) |
10018 | BasePtr = Builder.CreateGEP(Ty: VTy, Ptr: BasePtr, IdxList: Ops[2]); |
10019 | |
10020 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: {VTy}); |
10021 | Value *Call = Builder.CreateCall(Callee: F, Args: {Predicate, BasePtr}); |
10022 | unsigned MinElts = VTy->getMinNumElements(); |
10023 | Value *Ret = llvm::PoisonValue::get(T: RetTy); |
10024 | for (unsigned I = 0; I < N; I++) { |
10025 | Value *Idx = ConstantInt::get(Ty: CGM.Int64Ty, V: I * MinElts); |
10026 | Value *SRet = Builder.CreateExtractValue(Agg: Call, Idxs: I); |
10027 | Ret = Builder.CreateInsertVector(DstType: RetTy, SrcVec: Ret, SubVec: SRet, Idx); |
10028 | } |
10029 | return Ret; |
10030 | } |
10031 | |
10032 | Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags, |
10033 | SmallVectorImpl<Value*> &Ops, |
10034 | unsigned IntID) { |
10035 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
10036 | |
10037 | unsigned N; |
10038 | switch (IntID) { |
10039 | case Intrinsic::aarch64_sve_st2: |
10040 | case Intrinsic::aarch64_sve_st1_pn_x2: |
10041 | case Intrinsic::aarch64_sve_stnt1_pn_x2: |
10042 | case Intrinsic::aarch64_sve_st2q: |
10043 | N = 2; |
10044 | break; |
10045 | case Intrinsic::aarch64_sve_st3: |
10046 | case Intrinsic::aarch64_sve_st3q: |
10047 | N = 3; |
10048 | break; |
10049 | case Intrinsic::aarch64_sve_st4: |
10050 | case Intrinsic::aarch64_sve_st1_pn_x4: |
10051 | case Intrinsic::aarch64_sve_stnt1_pn_x4: |
10052 | case Intrinsic::aarch64_sve_st4q: |
10053 | N = 4; |
10054 | break; |
10055 | default: |
10056 | llvm_unreachable("unknown intrinsic!" ); |
10057 | } |
10058 | |
10059 | Value *Predicate = EmitSVEPredicateCast(Pred: Ops[0], VTy); |
10060 | Value *BasePtr = Ops[1]; |
10061 | |
10062 | // Does the store have an offset? |
10063 | if (Ops.size() > (2 + N)) |
10064 | BasePtr = Builder.CreateGEP(Ty: VTy, Ptr: BasePtr, IdxList: Ops[2]); |
10065 | |
10066 | // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we |
10067 | // need to break up the tuple vector. |
10068 | SmallVector<llvm::Value*, 5> Operands; |
10069 | for (unsigned I = Ops.size() - N; I < Ops.size(); ++I) |
10070 | Operands.push_back(Elt: Ops[I]); |
10071 | Operands.append(IL: {Predicate, BasePtr}); |
10072 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: { VTy }); |
10073 | |
10074 | return Builder.CreateCall(Callee: F, Args: Operands); |
10075 | } |
10076 | |
10077 | // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and |
10078 | // svpmullt_pair intrinsics, with the exception that their results are bitcast |
10079 | // to a wider type. |
10080 | Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags, |
10081 | SmallVectorImpl<Value *> &Ops, |
10082 | unsigned BuiltinID) { |
10083 | // Splat scalar operand to vector (intrinsics with _n infix) |
10084 | if (TypeFlags.hasSplatOperand()) { |
10085 | unsigned OpNo = TypeFlags.getSplatOperand(); |
10086 | Ops[OpNo] = EmitSVEDupX(Scalar: Ops[OpNo]); |
10087 | } |
10088 | |
10089 | // The pair-wise function has a narrower overloaded type. |
10090 | Function *F = CGM.getIntrinsic(IID: BuiltinID, Tys: Ops[0]->getType()); |
10091 | Value *Call = Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1]}); |
10092 | |
10093 | // Now bitcast to the wider result type. |
10094 | llvm::ScalableVectorType *Ty = getSVEType(TypeFlags); |
10095 | return EmitSVEReinterpret(Val: Call, Ty); |
10096 | } |
10097 | |
10098 | Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags, |
10099 | ArrayRef<Value *> Ops, unsigned BuiltinID) { |
10100 | llvm::Type *OverloadedTy = getSVEType(TypeFlags); |
10101 | Function *F = CGM.getIntrinsic(IID: BuiltinID, Tys: OverloadedTy); |
10102 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt32(C: 0)}); |
10103 | } |
10104 | |
10105 | Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, |
10106 | SmallVectorImpl<Value *> &Ops, |
10107 | unsigned BuiltinID) { |
10108 | auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
10109 | auto *VectorTy = getSVEVectorForElementType(EltTy: MemEltTy); |
10110 | auto *MemoryTy = llvm::ScalableVectorType::get(ElementType: MemEltTy, SVTy: VectorTy); |
10111 | |
10112 | Value *Predicate = EmitSVEPredicateCast(Pred: Ops[0], VTy: MemoryTy); |
10113 | Value *BasePtr = Ops[1]; |
10114 | |
10115 | // Implement the index operand if not omitted. |
10116 | if (Ops.size() > 3) |
10117 | BasePtr = Builder.CreateGEP(Ty: MemoryTy, Ptr: BasePtr, IdxList: Ops[2]); |
10118 | |
10119 | Value *PrfOp = Ops.back(); |
10120 | |
10121 | Function *F = CGM.getIntrinsic(IID: BuiltinID, Tys: Predicate->getType()); |
10122 | return Builder.CreateCall(Callee: F, Args: {Predicate, BasePtr, PrfOp}); |
10123 | } |
10124 | |
10125 | Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E, |
10126 | llvm::Type *ReturnTy, |
10127 | SmallVectorImpl<Value *> &Ops, |
10128 | unsigned IntrinsicID, |
10129 | bool IsZExtReturn) { |
10130 | QualType LangPTy = E->getArg(Arg: 1)->getType(); |
10131 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
10132 | T: LangPTy->castAs<PointerType>()->getPointeeType()); |
10133 | |
10134 | // The vector type that is returned may be different from the |
10135 | // eventual type loaded from memory. |
10136 | auto VectorTy = cast<llvm::ScalableVectorType>(Val: ReturnTy); |
10137 | llvm::ScalableVectorType *MemoryTy = nullptr; |
10138 | llvm::ScalableVectorType *PredTy = nullptr; |
10139 | bool IsQuadLoad = false; |
10140 | switch (IntrinsicID) { |
10141 | case Intrinsic::aarch64_sve_ld1uwq: |
10142 | case Intrinsic::aarch64_sve_ld1udq: |
10143 | MemoryTy = llvm::ScalableVectorType::get(ElementType: MemEltTy, MinNumElts: 1); |
10144 | PredTy = llvm::ScalableVectorType::get( |
10145 | ElementType: llvm::Type::getInt1Ty(C&: getLLVMContext()), MinNumElts: 1); |
10146 | IsQuadLoad = true; |
10147 | break; |
10148 | default: |
10149 | MemoryTy = llvm::ScalableVectorType::get(ElementType: MemEltTy, SVTy: VectorTy); |
10150 | PredTy = MemoryTy; |
10151 | break; |
10152 | } |
10153 | |
10154 | Value *Predicate = EmitSVEPredicateCast(Pred: Ops[0], VTy: PredTy); |
10155 | Value *BasePtr = Ops[1]; |
10156 | |
10157 | // Does the load have an offset? |
10158 | if (Ops.size() > 2) |
10159 | BasePtr = Builder.CreateGEP(Ty: MemoryTy, Ptr: BasePtr, IdxList: Ops[2]); |
10160 | |
10161 | Function *F = CGM.getIntrinsic(IID: IntrinsicID, Tys: IsQuadLoad ? VectorTy : MemoryTy); |
10162 | auto *Load = |
10163 | cast<llvm::Instruction>(Val: Builder.CreateCall(Callee: F, Args: {Predicate, BasePtr})); |
10164 | auto TBAAInfo = CGM.getTBAAAccessInfo(AccessType: LangPTy->getPointeeType()); |
10165 | CGM.DecorateInstructionWithTBAA(Inst: Load, TBAAInfo); |
10166 | |
10167 | if (IsQuadLoad) |
10168 | return Load; |
10169 | |
10170 | return IsZExtReturn ? Builder.CreateZExt(V: Load, DestTy: VectorTy) |
10171 | : Builder.CreateSExt(V: Load, DestTy: VectorTy); |
10172 | } |
10173 | |
10174 | Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, |
10175 | SmallVectorImpl<Value *> &Ops, |
10176 | unsigned IntrinsicID) { |
10177 | QualType LangPTy = E->getArg(Arg: 1)->getType(); |
10178 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
10179 | T: LangPTy->castAs<PointerType>()->getPointeeType()); |
10180 | |
10181 | // The vector type that is stored may be different from the |
10182 | // eventual type stored to memory. |
10183 | auto VectorTy = cast<llvm::ScalableVectorType>(Val: Ops.back()->getType()); |
10184 | auto MemoryTy = llvm::ScalableVectorType::get(ElementType: MemEltTy, SVTy: VectorTy); |
10185 | |
10186 | auto PredTy = MemoryTy; |
10187 | auto AddrMemoryTy = MemoryTy; |
10188 | bool IsQuadStore = false; |
10189 | |
10190 | switch (IntrinsicID) { |
10191 | case Intrinsic::aarch64_sve_st1wq: |
10192 | case Intrinsic::aarch64_sve_st1dq: |
10193 | AddrMemoryTy = llvm::ScalableVectorType::get(ElementType: MemEltTy, MinNumElts: 1); |
10194 | PredTy = |
10195 | llvm::ScalableVectorType::get(ElementType: IntegerType::get(C&: getLLVMContext(), NumBits: 1), MinNumElts: 1); |
10196 | IsQuadStore = true; |
10197 | break; |
10198 | default: |
10199 | break; |
10200 | } |
10201 | Value *Predicate = EmitSVEPredicateCast(Pred: Ops[0], VTy: PredTy); |
10202 | Value *BasePtr = Ops[1]; |
10203 | |
10204 | // Does the store have an offset? |
10205 | if (Ops.size() == 4) |
10206 | BasePtr = Builder.CreateGEP(Ty: AddrMemoryTy, Ptr: BasePtr, IdxList: Ops[2]); |
10207 | |
10208 | // Last value is always the data |
10209 | Value *Val = |
10210 | IsQuadStore ? Ops.back() : Builder.CreateTrunc(V: Ops.back(), DestTy: MemoryTy); |
10211 | |
10212 | Function *F = |
10213 | CGM.getIntrinsic(IID: IntrinsicID, Tys: IsQuadStore ? VectorTy : MemoryTy); |
10214 | auto *Store = |
10215 | cast<llvm::Instruction>(Val: Builder.CreateCall(Callee: F, Args: {Val, Predicate, BasePtr})); |
10216 | auto TBAAInfo = CGM.getTBAAAccessInfo(AccessType: LangPTy->getPointeeType()); |
10217 | CGM.DecorateInstructionWithTBAA(Inst: Store, TBAAInfo); |
10218 | return Store; |
10219 | } |
10220 | |
10221 | Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags, |
10222 | SmallVectorImpl<Value *> &Ops, |
10223 | unsigned IntID) { |
10224 | Ops[2] = EmitSVEPredicateCast( |
10225 | Pred: Ops[2], VTy: getSVEVectorForElementType(EltTy: SVEBuiltinMemEltTy(TypeFlags))); |
10226 | |
10227 | SmallVector<Value *> NewOps; |
10228 | NewOps.push_back(Elt: Ops[2]); |
10229 | |
10230 | llvm::Value *BasePtr = Ops[3]; |
10231 | |
10232 | // If the intrinsic contains the vnum parameter, multiply it with the vector |
10233 | // size in bytes. |
10234 | if (Ops.size() == 5) { |
10235 | Function *StreamingVectorLength = |
10236 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sme_cntsb); |
10237 | llvm::Value *StreamingVectorLengthCall = |
10238 | Builder.CreateCall(Callee: StreamingVectorLength); |
10239 | llvm::Value *Mulvl = |
10240 | Builder.CreateMul(LHS: StreamingVectorLengthCall, RHS: Ops[4], Name: "mulvl" ); |
10241 | // The type of the ptr parameter is void *, so use Int8Ty here. |
10242 | BasePtr = Builder.CreateGEP(Ty: Int8Ty, Ptr: Ops[3], IdxList: Mulvl); |
10243 | } |
10244 | NewOps.push_back(Elt: BasePtr); |
10245 | NewOps.push_back(Elt: Ops[0]); |
10246 | NewOps.push_back(Elt: Ops[1]); |
10247 | Function *F = CGM.getIntrinsic(IID: IntID); |
10248 | return Builder.CreateCall(Callee: F, Args: NewOps); |
10249 | } |
10250 | |
10251 | Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags, |
10252 | SmallVectorImpl<Value *> &Ops, |
10253 | unsigned IntID) { |
10254 | auto *VecTy = getSVEType(TypeFlags); |
10255 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: VecTy); |
10256 | if (TypeFlags.isReadZA()) |
10257 | Ops[1] = EmitSVEPredicateCast(Pred: Ops[1], VTy: VecTy); |
10258 | else if (TypeFlags.isWriteZA()) |
10259 | Ops[2] = EmitSVEPredicateCast(Pred: Ops[2], VTy: VecTy); |
10260 | return Builder.CreateCall(Callee: F, Args: Ops); |
10261 | } |
10262 | |
10263 | Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags, |
10264 | SmallVectorImpl<Value *> &Ops, |
10265 | unsigned IntID) { |
10266 | // svzero_za() intrinsic zeros the entire za tile and has no paramters. |
10267 | if (Ops.size() == 0) |
10268 | Ops.push_back(Elt: llvm::ConstantInt::get(Ty: Int32Ty, V: 255)); |
10269 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: {}); |
10270 | return Builder.CreateCall(Callee: F, Args: Ops); |
10271 | } |
10272 | |
10273 | Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags, |
10274 | SmallVectorImpl<Value *> &Ops, |
10275 | unsigned IntID) { |
10276 | if (Ops.size() == 2) |
10277 | Ops.push_back(Elt: Builder.getInt32(C: 0)); |
10278 | else |
10279 | Ops[2] = Builder.CreateIntCast(V: Ops[2], DestTy: Int32Ty, isSigned: true); |
10280 | Function *F = CGM.getIntrinsic(IID: IntID, Tys: {}); |
10281 | return Builder.CreateCall(Callee: F, Args: Ops); |
10282 | } |
10283 | |
10284 | // Limit the usage of scalable llvm IR generated by the ACLE by using the |
10285 | // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat. |
10286 | Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) { |
10287 | return Builder.CreateVectorSplat( |
10288 | EC: cast<llvm::VectorType>(Val: Ty)->getElementCount(), V: Scalar); |
10289 | } |
10290 | |
10291 | Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { |
10292 | return EmitSVEDupX(Scalar, Ty: getSVEVectorForElementType(EltTy: Scalar->getType())); |
10293 | } |
10294 | |
10295 | Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) { |
10296 | // FIXME: For big endian this needs an additional REV, or needs a separate |
10297 | // intrinsic that is code-generated as a no-op, because the LLVM bitcast |
10298 | // instruction is defined as 'bitwise' equivalent from memory point of |
10299 | // view (when storing/reloading), whereas the svreinterpret builtin |
10300 | // implements bitwise equivalent cast from register point of view. |
10301 | // LLVM CodeGen for a bitcast must add an explicit REV for big-endian. |
10302 | return Builder.CreateBitCast(V: Val, DestTy: Ty); |
10303 | } |
10304 | |
10305 | static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
10306 | SmallVectorImpl<Value *> &Ops) { |
10307 | auto *SplatZero = Constant::getNullValue(Ty); |
10308 | Ops.insert(I: Ops.begin(), Elt: SplatZero); |
10309 | } |
10310 | |
10311 | static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
10312 | SmallVectorImpl<Value *> &Ops) { |
10313 | auto *SplatUndef = UndefValue::get(T: Ty); |
10314 | Ops.insert(I: Ops.begin(), Elt: SplatUndef); |
10315 | } |
10316 | |
10317 | SmallVector<llvm::Type *, 2> |
10318 | CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags, |
10319 | llvm::Type *ResultType, |
10320 | ArrayRef<Value *> Ops) { |
10321 | if (TypeFlags.isOverloadNone()) |
10322 | return {}; |
10323 | |
10324 | llvm::Type *DefaultType = getSVEType(TypeFlags); |
10325 | |
10326 | if (TypeFlags.isOverloadWhileOrMultiVecCvt()) |
10327 | return {DefaultType, Ops[1]->getType()}; |
10328 | |
10329 | if (TypeFlags.isOverloadWhileRW()) |
10330 | return {getSVEPredType(TypeFlags), Ops[0]->getType()}; |
10331 | |
10332 | if (TypeFlags.isOverloadCvt()) |
10333 | return {Ops[0]->getType(), Ops.back()->getType()}; |
10334 | |
10335 | if (TypeFlags.isReductionQV() && !ResultType->isScalableTy() && |
10336 | ResultType->isVectorTy()) |
10337 | return {ResultType, Ops[1]->getType()}; |
10338 | |
10339 | assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads" ); |
10340 | return {DefaultType}; |
10341 | } |
10342 | |
10343 | Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags, |
10344 | llvm::Type *Ty, |
10345 | ArrayRef<Value *> Ops) { |
10346 | assert((TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) && |
10347 | "Expects TypleFlags.isTupleSet() or TypeFlags.isTupleGet()" ); |
10348 | |
10349 | unsigned I = cast<ConstantInt>(Val: Ops[1])->getSExtValue(); |
10350 | auto *SingleVecTy = dyn_cast<llvm::ScalableVectorType>( |
10351 | Val: TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty); |
10352 | |
10353 | if (!SingleVecTy) |
10354 | return nullptr; |
10355 | |
10356 | Value *Idx = ConstantInt::get(Ty: CGM.Int64Ty, |
10357 | V: I * SingleVecTy->getMinNumElements()); |
10358 | |
10359 | if (TypeFlags.isTupleSet()) |
10360 | return Builder.CreateInsertVector(DstType: Ty, SrcVec: Ops[0], SubVec: Ops[2], Idx); |
10361 | return Builder.CreateExtractVector(DstType: Ty, SrcVec: Ops[0], Idx); |
10362 | } |
10363 | |
10364 | Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags, |
10365 | llvm::Type *Ty, |
10366 | ArrayRef<Value *> Ops) { |
10367 | assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate" ); |
10368 | |
10369 | auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Val: Ops[0]->getType()); |
10370 | |
10371 | if (!SrcTy) |
10372 | return nullptr; |
10373 | |
10374 | unsigned MinElts = SrcTy->getMinNumElements(); |
10375 | Value *Call = llvm::PoisonValue::get(T: Ty); |
10376 | for (unsigned I = 0; I < Ops.size(); I++) { |
10377 | Value *Idx = ConstantInt::get(Ty: CGM.Int64Ty, V: I * MinElts); |
10378 | Call = Builder.CreateInsertVector(DstType: Ty, SrcVec: Call, SubVec: Ops[I], Idx); |
10379 | } |
10380 | |
10381 | return Call; |
10382 | } |
10383 | |
10384 | Value *CodeGenFunction::FormSVEBuiltinResult(Value *Call) { |
10385 | // Multi-vector results should be broken up into a single (wide) result |
10386 | // vector. |
10387 | auto *StructTy = dyn_cast<StructType>(Val: Call->getType()); |
10388 | if (!StructTy) |
10389 | return Call; |
10390 | |
10391 | auto *VTy = dyn_cast<ScalableVectorType>(Val: StructTy->getTypeAtIndex(N: 0U)); |
10392 | if (!VTy) |
10393 | return Call; |
10394 | unsigned N = StructTy->getNumElements(); |
10395 | |
10396 | // We may need to emit a cast to a svbool_t |
10397 | bool IsPredTy = VTy->getElementType()->isIntegerTy(Bitwidth: 1); |
10398 | unsigned MinElts = IsPredTy ? 16 : VTy->getMinNumElements(); |
10399 | |
10400 | ScalableVectorType *WideVTy = |
10401 | ScalableVectorType::get(ElementType: VTy->getElementType(), MinNumElts: MinElts * N); |
10402 | Value *Ret = llvm::PoisonValue::get(T: WideVTy); |
10403 | for (unsigned I = 0; I < N; ++I) { |
10404 | Value *SRet = Builder.CreateExtractValue(Agg: Call, Idxs: I); |
10405 | assert(SRet->getType() == VTy && "Unexpected type for result value" ); |
10406 | Value *Idx = ConstantInt::get(Ty: CGM.Int64Ty, V: I * MinElts); |
10407 | |
10408 | if (IsPredTy) |
10409 | SRet = EmitSVEPredicateCast( |
10410 | Pred: SRet, VTy: ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 16)); |
10411 | |
10412 | Ret = Builder.CreateInsertVector(DstType: WideVTy, SrcVec: Ret, SubVec: SRet, Idx); |
10413 | } |
10414 | Call = Ret; |
10415 | |
10416 | return Call; |
10417 | } |
10418 | |
10419 | void CodeGenFunction::GetAArch64SVEProcessedOperands( |
10420 | unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops, |
10421 | SVETypeFlags TypeFlags) { |
10422 | // Find out if any arguments are required to be integer constant expressions. |
10423 | unsigned ICEArguments = 0; |
10424 | ASTContext::GetBuiltinTypeError Error; |
10425 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
10426 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
10427 | |
10428 | // Tuple set/get only requires one insert/extract vector, which is |
10429 | // created by EmitSVETupleSetOrGet. |
10430 | bool IsTupleGetOrSet = TypeFlags.isTupleSet() || TypeFlags.isTupleGet(); |
10431 | |
10432 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
10433 | bool IsICE = ICEArguments & (1 << i); |
10434 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: i)); |
10435 | |
10436 | if (IsICE) { |
10437 | // If this is required to be a constant, constant fold it so that we know |
10438 | // that the generated intrinsic gets a ConstantInt. |
10439 | std::optional<llvm::APSInt> Result = |
10440 | E->getArg(Arg: i)->getIntegerConstantExpr(Ctx: getContext()); |
10441 | assert(Result && "Expected argument to be a constant" ); |
10442 | |
10443 | // Immediates for SVE llvm intrinsics are always 32bit. We can safely |
10444 | // truncate because the immediate has been range checked and no valid |
10445 | // immediate requires more than a handful of bits. |
10446 | *Result = Result->extOrTrunc(width: 32); |
10447 | Ops.push_back(Elt: llvm::ConstantInt::get(Context&: getLLVMContext(), V: *Result)); |
10448 | continue; |
10449 | } |
10450 | |
10451 | if (IsTupleGetOrSet || !isa<ScalableVectorType>(Val: Arg->getType())) { |
10452 | Ops.push_back(Elt: Arg); |
10453 | continue; |
10454 | } |
10455 | |
10456 | auto *VTy = cast<ScalableVectorType>(Val: Arg->getType()); |
10457 | unsigned MinElts = VTy->getMinNumElements(); |
10458 | bool IsPred = VTy->getElementType()->isIntegerTy(Bitwidth: 1); |
10459 | unsigned N = (MinElts * VTy->getScalarSizeInBits()) / (IsPred ? 16 : 128); |
10460 | |
10461 | if (N == 1) { |
10462 | Ops.push_back(Elt: Arg); |
10463 | continue; |
10464 | } |
10465 | |
10466 | for (unsigned I = 0; I < N; ++I) { |
10467 | Value *Idx = ConstantInt::get(Ty: CGM.Int64Ty, V: (I * MinElts) / N); |
10468 | auto *NewVTy = |
10469 | ScalableVectorType::get(ElementType: VTy->getElementType(), MinNumElts: MinElts / N); |
10470 | Ops.push_back(Elt: Builder.CreateExtractVector(DstType: NewVTy, SrcVec: Arg, Idx)); |
10471 | } |
10472 | } |
10473 | } |
10474 | |
10475 | Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, |
10476 | const CallExpr *E) { |
10477 | llvm::Type *Ty = ConvertType(T: E->getType()); |
10478 | if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && |
10479 | BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) { |
10480 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 0)); |
10481 | return EmitSVEReinterpret(Val, Ty); |
10482 | } |
10483 | |
10484 | auto *Builtin = findARMVectorIntrinsicInMap(IntrinsicMap: AArch64SVEIntrinsicMap, BuiltinID, |
10485 | MapProvenSorted&: AArch64SVEIntrinsicsProvenSorted); |
10486 | |
10487 | llvm::SmallVector<Value *, 4> Ops; |
10488 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
10489 | GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags); |
10490 | |
10491 | if (TypeFlags.isLoad()) |
10492 | return EmitSVEMaskedLoad(E, ReturnTy: Ty, Ops, IntrinsicID: Builtin->LLVMIntrinsic, |
10493 | IsZExtReturn: TypeFlags.isZExtReturn()); |
10494 | else if (TypeFlags.isStore()) |
10495 | return EmitSVEMaskedStore(E, Ops, IntrinsicID: Builtin->LLVMIntrinsic); |
10496 | else if (TypeFlags.isGatherLoad()) |
10497 | return EmitSVEGatherLoad(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10498 | else if (TypeFlags.isScatterStore()) |
10499 | return EmitSVEScatterStore(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10500 | else if (TypeFlags.isPrefetch()) |
10501 | return EmitSVEPrefetchLoad(TypeFlags, Ops, BuiltinID: Builtin->LLVMIntrinsic); |
10502 | else if (TypeFlags.isGatherPrefetch()) |
10503 | return EmitSVEGatherPrefetch(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10504 | else if (TypeFlags.isStructLoad()) |
10505 | return EmitSVEStructLoad(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10506 | else if (TypeFlags.isStructStore()) |
10507 | return EmitSVEStructStore(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10508 | else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) |
10509 | return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops); |
10510 | else if (TypeFlags.isTupleCreate()) |
10511 | return EmitSVETupleCreate(TypeFlags, Ty, Ops); |
10512 | else if (TypeFlags.isUndef()) |
10513 | return UndefValue::get(T: Ty); |
10514 | else if (Builtin->LLVMIntrinsic != 0) { |
10515 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp) |
10516 | InsertExplicitZeroOperand(Builder, Ty, Ops); |
10517 | |
10518 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp) |
10519 | InsertExplicitUndefOperand(Builder, Ty, Ops); |
10520 | |
10521 | // Some ACLE builtins leave out the argument to specify the predicate |
10522 | // pattern, which is expected to be expanded to an SV_ALL pattern. |
10523 | if (TypeFlags.isAppendSVALL()) |
10524 | Ops.push_back(Elt: Builder.getInt32(/*SV_ALL*/ C: 31)); |
10525 | if (TypeFlags.isInsertOp1SVALL()) |
10526 | Ops.insert(I: &Ops[1], Elt: Builder.getInt32(/*SV_ALL*/ C: 31)); |
10527 | |
10528 | // Predicates must match the main datatype. |
10529 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
10530 | if (auto PredTy = dyn_cast<llvm::VectorType>(Val: Ops[i]->getType())) |
10531 | if (PredTy->getElementType()->isIntegerTy(Bitwidth: 1)) |
10532 | Ops[i] = EmitSVEPredicateCast(Pred: Ops[i], VTy: getSVEType(TypeFlags)); |
10533 | |
10534 | // Splat scalar operand to vector (intrinsics with _n infix) |
10535 | if (TypeFlags.hasSplatOperand()) { |
10536 | unsigned OpNo = TypeFlags.getSplatOperand(); |
10537 | Ops[OpNo] = EmitSVEDupX(Scalar: Ops[OpNo]); |
10538 | } |
10539 | |
10540 | if (TypeFlags.isReverseCompare()) |
10541 | std::swap(a&: Ops[1], b&: Ops[2]); |
10542 | else if (TypeFlags.isReverseUSDOT()) |
10543 | std::swap(a&: Ops[1], b&: Ops[2]); |
10544 | else if (TypeFlags.isReverseMergeAnyBinOp() && |
10545 | TypeFlags.getMergeType() == SVETypeFlags::MergeAny) |
10546 | std::swap(a&: Ops[1], b&: Ops[2]); |
10547 | else if (TypeFlags.isReverseMergeAnyAccOp() && |
10548 | TypeFlags.getMergeType() == SVETypeFlags::MergeAny) |
10549 | std::swap(a&: Ops[1], b&: Ops[3]); |
10550 | |
10551 | // Predicated intrinsics with _z suffix need a select w/ zeroinitializer. |
10552 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) { |
10553 | llvm::Type *OpndTy = Ops[1]->getType(); |
10554 | auto *SplatZero = Constant::getNullValue(Ty: OpndTy); |
10555 | Ops[1] = Builder.CreateSelect(C: Ops[0], True: Ops[1], False: SplatZero); |
10556 | } |
10557 | |
10558 | Function *F = CGM.getIntrinsic(IID: Builtin->LLVMIntrinsic, |
10559 | Tys: getSVEOverloadTypes(TypeFlags, ResultType: Ty, Ops)); |
10560 | Value *Call = Builder.CreateCall(Callee: F, Args: Ops); |
10561 | |
10562 | // Predicate results must be converted to svbool_t. |
10563 | if (auto PredTy = dyn_cast<llvm::VectorType>(Val: Call->getType())) |
10564 | if (PredTy->getScalarType()->isIntegerTy(Bitwidth: 1)) |
10565 | Call = EmitSVEPredicateCast(Pred: Call, VTy: cast<llvm::ScalableVectorType>(Val: Ty)); |
10566 | |
10567 | return FormSVEBuiltinResult(Call); |
10568 | } |
10569 | |
10570 | switch (BuiltinID) { |
10571 | default: |
10572 | return nullptr; |
10573 | |
10574 | case SVE::BI__builtin_sve_svreinterpret_b: { |
10575 | auto SVCountTy = |
10576 | llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount" ); |
10577 | Function *CastFromSVCountF = |
10578 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_convert_to_svbool, Tys: SVCountTy); |
10579 | return Builder.CreateCall(Callee: CastFromSVCountF, Args: Ops[0]); |
10580 | } |
10581 | case SVE::BI__builtin_sve_svreinterpret_c: { |
10582 | auto SVCountTy = |
10583 | llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount" ); |
10584 | Function *CastToSVCountF = |
10585 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_convert_from_svbool, Tys: SVCountTy); |
10586 | return Builder.CreateCall(Callee: CastToSVCountF, Args: Ops[0]); |
10587 | } |
10588 | |
10589 | case SVE::BI__builtin_sve_svpsel_lane_b8: |
10590 | case SVE::BI__builtin_sve_svpsel_lane_b16: |
10591 | case SVE::BI__builtin_sve_svpsel_lane_b32: |
10592 | case SVE::BI__builtin_sve_svpsel_lane_b64: |
10593 | case SVE::BI__builtin_sve_svpsel_lane_c8: |
10594 | case SVE::BI__builtin_sve_svpsel_lane_c16: |
10595 | case SVE::BI__builtin_sve_svpsel_lane_c32: |
10596 | case SVE::BI__builtin_sve_svpsel_lane_c64: { |
10597 | bool IsSVCount = isa<TargetExtType>(Val: Ops[0]->getType()); |
10598 | assert(((!IsSVCount || cast<TargetExtType>(Ops[0]->getType())->getName() == |
10599 | "aarch64.svcount" )) && |
10600 | "Unexpected TargetExtType" ); |
10601 | auto SVCountTy = |
10602 | llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount" ); |
10603 | Function *CastFromSVCountF = |
10604 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_convert_to_svbool, Tys: SVCountTy); |
10605 | Function *CastToSVCountF = |
10606 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_convert_from_svbool, Tys: SVCountTy); |
10607 | |
10608 | auto OverloadedTy = getSVEType(TypeFlags: SVETypeFlags(Builtin->TypeModifier)); |
10609 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_psel, Tys: OverloadedTy); |
10610 | llvm::Value *Ops0 = |
10611 | IsSVCount ? Builder.CreateCall(Callee: CastFromSVCountF, Args: Ops[0]) : Ops[0]; |
10612 | llvm::Value *Ops1 = EmitSVEPredicateCast(Pred: Ops[1], VTy: OverloadedTy); |
10613 | llvm::Value *PSel = Builder.CreateCall(Callee: F, Args: {Ops0, Ops1, Ops[2]}); |
10614 | return IsSVCount ? Builder.CreateCall(Callee: CastToSVCountF, Args: PSel) : PSel; |
10615 | } |
10616 | case SVE::BI__builtin_sve_svmov_b_z: { |
10617 | // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op) |
10618 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
10619 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
10620 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_and_z, Tys: OverloadedTy); |
10621 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1], Ops[1]}); |
10622 | } |
10623 | |
10624 | case SVE::BI__builtin_sve_svnot_b_z: { |
10625 | // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg) |
10626 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
10627 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
10628 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_eor_z, Tys: OverloadedTy); |
10629 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1], Ops[0]}); |
10630 | } |
10631 | |
10632 | case SVE::BI__builtin_sve_svmovlb_u16: |
10633 | case SVE::BI__builtin_sve_svmovlb_u32: |
10634 | case SVE::BI__builtin_sve_svmovlb_u64: |
10635 | return EmitSVEMovl(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_ushllb); |
10636 | |
10637 | case SVE::BI__builtin_sve_svmovlb_s16: |
10638 | case SVE::BI__builtin_sve_svmovlb_s32: |
10639 | case SVE::BI__builtin_sve_svmovlb_s64: |
10640 | return EmitSVEMovl(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_sshllb); |
10641 | |
10642 | case SVE::BI__builtin_sve_svmovlt_u16: |
10643 | case SVE::BI__builtin_sve_svmovlt_u32: |
10644 | case SVE::BI__builtin_sve_svmovlt_u64: |
10645 | return EmitSVEMovl(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_ushllt); |
10646 | |
10647 | case SVE::BI__builtin_sve_svmovlt_s16: |
10648 | case SVE::BI__builtin_sve_svmovlt_s32: |
10649 | case SVE::BI__builtin_sve_svmovlt_s64: |
10650 | return EmitSVEMovl(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_sshllt); |
10651 | |
10652 | case SVE::BI__builtin_sve_svpmullt_u16: |
10653 | case SVE::BI__builtin_sve_svpmullt_u64: |
10654 | case SVE::BI__builtin_sve_svpmullt_n_u16: |
10655 | case SVE::BI__builtin_sve_svpmullt_n_u64: |
10656 | return EmitSVEPMull(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_pmullt_pair); |
10657 | |
10658 | case SVE::BI__builtin_sve_svpmullb_u16: |
10659 | case SVE::BI__builtin_sve_svpmullb_u64: |
10660 | case SVE::BI__builtin_sve_svpmullb_n_u16: |
10661 | case SVE::BI__builtin_sve_svpmullb_n_u64: |
10662 | return EmitSVEPMull(TypeFlags, Ops, BuiltinID: Intrinsic::aarch64_sve_pmullb_pair); |
10663 | |
10664 | case SVE::BI__builtin_sve_svdup_n_b8: |
10665 | case SVE::BI__builtin_sve_svdup_n_b16: |
10666 | case SVE::BI__builtin_sve_svdup_n_b32: |
10667 | case SVE::BI__builtin_sve_svdup_n_b64: { |
10668 | Value *CmpNE = |
10669 | Builder.CreateICmpNE(LHS: Ops[0], RHS: Constant::getNullValue(Ty: Ops[0]->getType())); |
10670 | llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags); |
10671 | Value *Dup = EmitSVEDupX(Scalar: CmpNE, Ty: OverloadedTy); |
10672 | return EmitSVEPredicateCast(Pred: Dup, VTy: cast<llvm::ScalableVectorType>(Val: Ty)); |
10673 | } |
10674 | |
10675 | case SVE::BI__builtin_sve_svdupq_n_b8: |
10676 | case SVE::BI__builtin_sve_svdupq_n_b16: |
10677 | case SVE::BI__builtin_sve_svdupq_n_b32: |
10678 | case SVE::BI__builtin_sve_svdupq_n_b64: |
10679 | case SVE::BI__builtin_sve_svdupq_n_u8: |
10680 | case SVE::BI__builtin_sve_svdupq_n_s8: |
10681 | case SVE::BI__builtin_sve_svdupq_n_u64: |
10682 | case SVE::BI__builtin_sve_svdupq_n_f64: |
10683 | case SVE::BI__builtin_sve_svdupq_n_s64: |
10684 | case SVE::BI__builtin_sve_svdupq_n_u16: |
10685 | case SVE::BI__builtin_sve_svdupq_n_f16: |
10686 | case SVE::BI__builtin_sve_svdupq_n_bf16: |
10687 | case SVE::BI__builtin_sve_svdupq_n_s16: |
10688 | case SVE::BI__builtin_sve_svdupq_n_u32: |
10689 | case SVE::BI__builtin_sve_svdupq_n_f32: |
10690 | case SVE::BI__builtin_sve_svdupq_n_s32: { |
10691 | // These builtins are implemented by storing each element to an array and using |
10692 | // ld1rq to materialize a vector. |
10693 | unsigned NumOpnds = Ops.size(); |
10694 | |
10695 | bool IsBoolTy = |
10696 | cast<llvm::VectorType>(Val: Ty)->getElementType()->isIntegerTy(Bitwidth: 1); |
10697 | |
10698 | // For svdupq_n_b* the element type of is an integer of type 128/numelts, |
10699 | // so that the compare can use the width that is natural for the expected |
10700 | // number of predicate lanes. |
10701 | llvm::Type *EltTy = Ops[0]->getType(); |
10702 | if (IsBoolTy) |
10703 | EltTy = IntegerType::get(C&: getLLVMContext(), NumBits: SVEBitsPerBlock / NumOpnds); |
10704 | |
10705 | SmallVector<llvm::Value *, 16> VecOps; |
10706 | for (unsigned I = 0; I < NumOpnds; ++I) |
10707 | VecOps.push_back(Elt: Builder.CreateZExt(V: Ops[I], DestTy: EltTy)); |
10708 | Value *Vec = BuildVector(Ops: VecOps); |
10709 | |
10710 | llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy); |
10711 | Value *InsertSubVec = Builder.CreateInsertVector( |
10712 | DstType: OverloadedTy, SrcVec: PoisonValue::get(T: OverloadedTy), SubVec: Vec, Idx: Builder.getInt64(C: 0)); |
10713 | |
10714 | Function *F = |
10715 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_dupq_lane, Tys: OverloadedTy); |
10716 | Value *DupQLane = |
10717 | Builder.CreateCall(Callee: F, Args: {InsertSubVec, Builder.getInt64(C: 0)}); |
10718 | |
10719 | if (!IsBoolTy) |
10720 | return DupQLane; |
10721 | |
10722 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
10723 | Value *Pred = EmitSVEAllTruePred(TypeFlags); |
10724 | |
10725 | // For svdupq_n_b* we need to add an additional 'cmpne' with '0'. |
10726 | F = CGM.getIntrinsic(IID: NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne |
10727 | : Intrinsic::aarch64_sve_cmpne_wide, |
10728 | Tys: OverloadedTy); |
10729 | Value *Call = Builder.CreateCall( |
10730 | Callee: F, Args: {Pred, DupQLane, EmitSVEDupX(Scalar: Builder.getInt64(C: 0))}); |
10731 | return EmitSVEPredicateCast(Pred: Call, VTy: cast<llvm::ScalableVectorType>(Val: Ty)); |
10732 | } |
10733 | |
10734 | case SVE::BI__builtin_sve_svpfalse_b: |
10735 | return ConstantInt::getFalse(Ty); |
10736 | |
10737 | case SVE::BI__builtin_sve_svpfalse_c: { |
10738 | auto SVBoolTy = ScalableVectorType::get(ElementType: Builder.getInt1Ty(), MinNumElts: 16); |
10739 | Function *CastToSVCountF = |
10740 | CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_convert_from_svbool, Tys: Ty); |
10741 | return Builder.CreateCall(Callee: CastToSVCountF, Args: ConstantInt::getFalse(Ty: SVBoolTy)); |
10742 | } |
10743 | |
10744 | case SVE::BI__builtin_sve_svlen_bf16: |
10745 | case SVE::BI__builtin_sve_svlen_f16: |
10746 | case SVE::BI__builtin_sve_svlen_f32: |
10747 | case SVE::BI__builtin_sve_svlen_f64: |
10748 | case SVE::BI__builtin_sve_svlen_s8: |
10749 | case SVE::BI__builtin_sve_svlen_s16: |
10750 | case SVE::BI__builtin_sve_svlen_s32: |
10751 | case SVE::BI__builtin_sve_svlen_s64: |
10752 | case SVE::BI__builtin_sve_svlen_u8: |
10753 | case SVE::BI__builtin_sve_svlen_u16: |
10754 | case SVE::BI__builtin_sve_svlen_u32: |
10755 | case SVE::BI__builtin_sve_svlen_u64: { |
10756 | SVETypeFlags TF(Builtin->TypeModifier); |
10757 | auto VTy = cast<llvm::VectorType>(Val: getSVEType(TypeFlags: TF)); |
10758 | auto *NumEls = |
10759 | llvm::ConstantInt::get(Ty, V: VTy->getElementCount().getKnownMinValue()); |
10760 | |
10761 | Function *F = CGM.getIntrinsic(IID: Intrinsic::vscale, Tys: Ty); |
10762 | return Builder.CreateMul(LHS: NumEls, RHS: Builder.CreateCall(Callee: F)); |
10763 | } |
10764 | |
10765 | case SVE::BI__builtin_sve_svtbl2_u8: |
10766 | case SVE::BI__builtin_sve_svtbl2_s8: |
10767 | case SVE::BI__builtin_sve_svtbl2_u16: |
10768 | case SVE::BI__builtin_sve_svtbl2_s16: |
10769 | case SVE::BI__builtin_sve_svtbl2_u32: |
10770 | case SVE::BI__builtin_sve_svtbl2_s32: |
10771 | case SVE::BI__builtin_sve_svtbl2_u64: |
10772 | case SVE::BI__builtin_sve_svtbl2_s64: |
10773 | case SVE::BI__builtin_sve_svtbl2_f16: |
10774 | case SVE::BI__builtin_sve_svtbl2_bf16: |
10775 | case SVE::BI__builtin_sve_svtbl2_f32: |
10776 | case SVE::BI__builtin_sve_svtbl2_f64: { |
10777 | SVETypeFlags TF(Builtin->TypeModifier); |
10778 | auto VTy = cast<llvm::ScalableVectorType>(Val: getSVEType(TypeFlags: TF)); |
10779 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_sve_tbl2, Tys: VTy); |
10780 | return Builder.CreateCall(Callee: F, Args: Ops); |
10781 | } |
10782 | |
10783 | case SVE::BI__builtin_sve_svset_neonq_s8: |
10784 | case SVE::BI__builtin_sve_svset_neonq_s16: |
10785 | case SVE::BI__builtin_sve_svset_neonq_s32: |
10786 | case SVE::BI__builtin_sve_svset_neonq_s64: |
10787 | case SVE::BI__builtin_sve_svset_neonq_u8: |
10788 | case SVE::BI__builtin_sve_svset_neonq_u16: |
10789 | case SVE::BI__builtin_sve_svset_neonq_u32: |
10790 | case SVE::BI__builtin_sve_svset_neonq_u64: |
10791 | case SVE::BI__builtin_sve_svset_neonq_f16: |
10792 | case SVE::BI__builtin_sve_svset_neonq_f32: |
10793 | case SVE::BI__builtin_sve_svset_neonq_f64: |
10794 | case SVE::BI__builtin_sve_svset_neonq_bf16: { |
10795 | return Builder.CreateInsertVector(DstType: Ty, SrcVec: Ops[0], SubVec: Ops[1], Idx: Builder.getInt64(C: 0)); |
10796 | } |
10797 | |
10798 | case SVE::BI__builtin_sve_svget_neonq_s8: |
10799 | case SVE::BI__builtin_sve_svget_neonq_s16: |
10800 | case SVE::BI__builtin_sve_svget_neonq_s32: |
10801 | case SVE::BI__builtin_sve_svget_neonq_s64: |
10802 | case SVE::BI__builtin_sve_svget_neonq_u8: |
10803 | case SVE::BI__builtin_sve_svget_neonq_u16: |
10804 | case SVE::BI__builtin_sve_svget_neonq_u32: |
10805 | case SVE::BI__builtin_sve_svget_neonq_u64: |
10806 | case SVE::BI__builtin_sve_svget_neonq_f16: |
10807 | case SVE::BI__builtin_sve_svget_neonq_f32: |
10808 | case SVE::BI__builtin_sve_svget_neonq_f64: |
10809 | case SVE::BI__builtin_sve_svget_neonq_bf16: { |
10810 | return Builder.CreateExtractVector(DstType: Ty, SrcVec: Ops[0], Idx: Builder.getInt64(C: 0)); |
10811 | } |
10812 | |
10813 | case SVE::BI__builtin_sve_svdup_neonq_s8: |
10814 | case SVE::BI__builtin_sve_svdup_neonq_s16: |
10815 | case SVE::BI__builtin_sve_svdup_neonq_s32: |
10816 | case SVE::BI__builtin_sve_svdup_neonq_s64: |
10817 | case SVE::BI__builtin_sve_svdup_neonq_u8: |
10818 | case SVE::BI__builtin_sve_svdup_neonq_u16: |
10819 | case SVE::BI__builtin_sve_svdup_neonq_u32: |
10820 | case SVE::BI__builtin_sve_svdup_neonq_u64: |
10821 | case SVE::BI__builtin_sve_svdup_neonq_f16: |
10822 | case SVE::BI__builtin_sve_svdup_neonq_f32: |
10823 | case SVE::BI__builtin_sve_svdup_neonq_f64: |
10824 | case SVE::BI__builtin_sve_svdup_neonq_bf16: { |
10825 | Value *Insert = Builder.CreateInsertVector(DstType: Ty, SrcVec: PoisonValue::get(T: Ty), SubVec: Ops[0], |
10826 | Idx: Builder.getInt64(C: 0)); |
10827 | return Builder.CreateIntrinsic(ID: Intrinsic::aarch64_sve_dupq_lane, Types: {Ty}, |
10828 | Args: {Insert, Builder.getInt64(C: 0)}); |
10829 | } |
10830 | } |
10831 | |
10832 | /// Should not happen |
10833 | return nullptr; |
10834 | } |
10835 | |
10836 | static void swapCommutativeSMEOperands(unsigned BuiltinID, |
10837 | SmallVectorImpl<Value *> &Ops) { |
10838 | unsigned MultiVec; |
10839 | switch (BuiltinID) { |
10840 | default: |
10841 | return; |
10842 | case SME::BI__builtin_sme_svsumla_za32_s8_vg4x1: |
10843 | MultiVec = 1; |
10844 | break; |
10845 | case SME::BI__builtin_sme_svsumla_za32_s8_vg4x2: |
10846 | case SME::BI__builtin_sme_svsudot_za32_s8_vg1x2: |
10847 | MultiVec = 2; |
10848 | break; |
10849 | case SME::BI__builtin_sme_svsudot_za32_s8_vg1x4: |
10850 | case SME::BI__builtin_sme_svsumla_za32_s8_vg4x4: |
10851 | MultiVec = 4; |
10852 | break; |
10853 | } |
10854 | |
10855 | if (MultiVec > 0) |
10856 | for (unsigned I = 0; I < MultiVec; ++I) |
10857 | std::swap(a&: Ops[I + 1], b&: Ops[I + 1 + MultiVec]); |
10858 | } |
10859 | |
10860 | Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, |
10861 | const CallExpr *E) { |
10862 | auto *Builtin = findARMVectorIntrinsicInMap(IntrinsicMap: AArch64SMEIntrinsicMap, BuiltinID, |
10863 | MapProvenSorted&: AArch64SMEIntrinsicsProvenSorted); |
10864 | |
10865 | llvm::SmallVector<Value *, 4> Ops; |
10866 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
10867 | GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags); |
10868 | |
10869 | if (TypeFlags.isLoad() || TypeFlags.isStore()) |
10870 | return EmitSMELd1St1(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10871 | else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA()) |
10872 | return EmitSMEReadWrite(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10873 | else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za || |
10874 | BuiltinID == SME::BI__builtin_sme_svzero_za) |
10875 | return EmitSMEZero(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10876 | else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za || |
10877 | BuiltinID == SME::BI__builtin_sme_svstr_vnum_za || |
10878 | BuiltinID == SME::BI__builtin_sme_svldr_za || |
10879 | BuiltinID == SME::BI__builtin_sme_svstr_za) |
10880 | return EmitSMELdrStr(TypeFlags, Ops, IntID: Builtin->LLVMIntrinsic); |
10881 | |
10882 | // Handle builtins which require their multi-vector operands to be swapped |
10883 | swapCommutativeSMEOperands(BuiltinID, Ops); |
10884 | |
10885 | // Should not happen! |
10886 | if (Builtin->LLVMIntrinsic == 0) |
10887 | return nullptr; |
10888 | |
10889 | // Predicates must match the main datatype. |
10890 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
10891 | if (auto PredTy = dyn_cast<llvm::VectorType>(Val: Ops[i]->getType())) |
10892 | if (PredTy->getElementType()->isIntegerTy(Bitwidth: 1)) |
10893 | Ops[i] = EmitSVEPredicateCast(Pred: Ops[i], VTy: getSVEType(TypeFlags)); |
10894 | |
10895 | Function *F = |
10896 | TypeFlags.isOverloadNone() |
10897 | ? CGM.getIntrinsic(IID: Builtin->LLVMIntrinsic) |
10898 | : CGM.getIntrinsic(IID: Builtin->LLVMIntrinsic, Tys: {getSVEType(TypeFlags)}); |
10899 | Value *Call = Builder.CreateCall(Callee: F, Args: Ops); |
10900 | |
10901 | return FormSVEBuiltinResult(Call); |
10902 | } |
10903 | |
10904 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, |
10905 | const CallExpr *E, |
10906 | llvm::Triple::ArchType Arch) { |
10907 | if (BuiltinID >= clang::AArch64::FirstSVEBuiltin && |
10908 | BuiltinID <= clang::AArch64::LastSVEBuiltin) |
10909 | return EmitAArch64SVEBuiltinExpr(BuiltinID, E); |
10910 | |
10911 | if (BuiltinID >= clang::AArch64::FirstSMEBuiltin && |
10912 | BuiltinID <= clang::AArch64::LastSMEBuiltin) |
10913 | return EmitAArch64SMEBuiltinExpr(BuiltinID, E); |
10914 | |
10915 | if (BuiltinID == Builtin::BI__builtin_cpu_supports) |
10916 | return EmitAArch64CpuSupports(E); |
10917 | |
10918 | unsigned HintID = static_cast<unsigned>(-1); |
10919 | switch (BuiltinID) { |
10920 | default: break; |
10921 | case clang::AArch64::BI__builtin_arm_nop: |
10922 | HintID = 0; |
10923 | break; |
10924 | case clang::AArch64::BI__builtin_arm_yield: |
10925 | case clang::AArch64::BI__yield: |
10926 | HintID = 1; |
10927 | break; |
10928 | case clang::AArch64::BI__builtin_arm_wfe: |
10929 | case clang::AArch64::BI__wfe: |
10930 | HintID = 2; |
10931 | break; |
10932 | case clang::AArch64::BI__builtin_arm_wfi: |
10933 | case clang::AArch64::BI__wfi: |
10934 | HintID = 3; |
10935 | break; |
10936 | case clang::AArch64::BI__builtin_arm_sev: |
10937 | case clang::AArch64::BI__sev: |
10938 | HintID = 4; |
10939 | break; |
10940 | case clang::AArch64::BI__builtin_arm_sevl: |
10941 | case clang::AArch64::BI__sevl: |
10942 | HintID = 5; |
10943 | break; |
10944 | } |
10945 | |
10946 | if (HintID != static_cast<unsigned>(-1)) { |
10947 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_hint); |
10948 | return Builder.CreateCall(Callee: F, Args: llvm::ConstantInt::get(Ty: Int32Ty, V: HintID)); |
10949 | } |
10950 | |
10951 | if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) { |
10952 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_break); |
10953 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
10954 | return Builder.CreateCall(Callee: F, Args: Builder.CreateZExt(V: Arg, DestTy: CGM.Int32Ty)); |
10955 | } |
10956 | |
10957 | if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) { |
10958 | // Create call to __arm_sme_state and store the results to the two pointers. |
10959 | CallInst *CI = EmitRuntimeCall(callee: CGM.CreateRuntimeFunction( |
10960 | Ty: llvm::FunctionType::get(Result: StructType::get(elt1: CGM.Int64Ty, elts: CGM.Int64Ty), Params: {}, |
10961 | isVarArg: false), |
10962 | Name: "__arm_sme_state" )); |
10963 | auto Attrs = AttributeList().addFnAttribute(C&: getLLVMContext(), |
10964 | Kind: "aarch64_pstate_sm_compatible" ); |
10965 | CI->setAttributes(Attrs); |
10966 | CI->setCallingConv( |
10967 | llvm::CallingConv:: |
10968 | AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2); |
10969 | Builder.CreateStore(Val: Builder.CreateExtractValue(Agg: CI, Idxs: 0), |
10970 | Addr: EmitPointerWithAlignment(Addr: E->getArg(Arg: 0))); |
10971 | return Builder.CreateStore(Val: Builder.CreateExtractValue(Agg: CI, Idxs: 1), |
10972 | Addr: EmitPointerWithAlignment(Addr: E->getArg(Arg: 1))); |
10973 | } |
10974 | |
10975 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) { |
10976 | assert((getContext().getTypeSize(E->getType()) == 32) && |
10977 | "rbit of unusual size!" ); |
10978 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
10979 | return Builder.CreateCall( |
10980 | Callee: CGM.getIntrinsic(IID: Intrinsic::bitreverse, Tys: Arg->getType()), Args: Arg, Name: "rbit" ); |
10981 | } |
10982 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) { |
10983 | assert((getContext().getTypeSize(E->getType()) == 64) && |
10984 | "rbit of unusual size!" ); |
10985 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
10986 | return Builder.CreateCall( |
10987 | Callee: CGM.getIntrinsic(IID: Intrinsic::bitreverse, Tys: Arg->getType()), Args: Arg, Name: "rbit" ); |
10988 | } |
10989 | |
10990 | if (BuiltinID == clang::AArch64::BI__builtin_arm_clz || |
10991 | BuiltinID == clang::AArch64::BI__builtin_arm_clz64) { |
10992 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
10993 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Arg->getType()); |
10994 | Value *Res = Builder.CreateCall(Callee: F, Args: {Arg, Builder.getInt1(V: false)}); |
10995 | if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64) |
10996 | Res = Builder.CreateTrunc(V: Res, DestTy: Builder.getInt32Ty()); |
10997 | return Res; |
10998 | } |
10999 | |
11000 | if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) { |
11001 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11002 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_cls), Args: Arg, |
11003 | Name: "cls" ); |
11004 | } |
11005 | if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) { |
11006 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11007 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_cls64), Args: Arg, |
11008 | Name: "cls" ); |
11009 | } |
11010 | |
11011 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf || |
11012 | BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) { |
11013 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11014 | llvm::Type *Ty = Arg->getType(); |
11015 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_frint32z, Tys: Ty), |
11016 | Args: Arg, Name: "frint32z" ); |
11017 | } |
11018 | |
11019 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf || |
11020 | BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) { |
11021 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11022 | llvm::Type *Ty = Arg->getType(); |
11023 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_frint64z, Tys: Ty), |
11024 | Args: Arg, Name: "frint64z" ); |
11025 | } |
11026 | |
11027 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf || |
11028 | BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) { |
11029 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11030 | llvm::Type *Ty = Arg->getType(); |
11031 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_frint32x, Tys: Ty), |
11032 | Args: Arg, Name: "frint32x" ); |
11033 | } |
11034 | |
11035 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf || |
11036 | BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) { |
11037 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11038 | llvm::Type *Ty = Arg->getType(); |
11039 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_frint64x, Tys: Ty), |
11040 | Args: Arg, Name: "frint64x" ); |
11041 | } |
11042 | |
11043 | if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) { |
11044 | assert((getContext().getTypeSize(E->getType()) == 32) && |
11045 | "__jcvt of unusual size!" ); |
11046 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11047 | return Builder.CreateCall( |
11048 | Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_fjcvtzs), Args: Arg); |
11049 | } |
11050 | |
11051 | if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b || |
11052 | BuiltinID == clang::AArch64::BI__builtin_arm_st64b || |
11053 | BuiltinID == clang::AArch64::BI__builtin_arm_st64bv || |
11054 | BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) { |
11055 | llvm::Value *MemAddr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11056 | llvm::Value *ValPtr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11057 | |
11058 | if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) { |
11059 | // Load from the address via an LLVM intrinsic, receiving a |
11060 | // tuple of 8 i64 words, and store each one to ValPtr. |
11061 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_ld64b); |
11062 | llvm::Value *Val = Builder.CreateCall(Callee: F, Args: MemAddr); |
11063 | llvm::Value *ToRet; |
11064 | for (size_t i = 0; i < 8; i++) { |
11065 | llvm::Value *ValOffsetPtr = |
11066 | Builder.CreateGEP(Ty: Int64Ty, Ptr: ValPtr, IdxList: Builder.getInt32(C: i)); |
11067 | Address Addr = |
11068 | Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(Quantity: 8)); |
11069 | ToRet = Builder.CreateStore(Val: Builder.CreateExtractValue(Agg: Val, Idxs: i), Addr); |
11070 | } |
11071 | return ToRet; |
11072 | } else { |
11073 | // Load 8 i64 words from ValPtr, and store them to the address |
11074 | // via an LLVM intrinsic. |
11075 | SmallVector<llvm::Value *, 9> Args; |
11076 | Args.push_back(Elt: MemAddr); |
11077 | for (size_t i = 0; i < 8; i++) { |
11078 | llvm::Value *ValOffsetPtr = |
11079 | Builder.CreateGEP(Ty: Int64Ty, Ptr: ValPtr, IdxList: Builder.getInt32(C: i)); |
11080 | Address Addr = |
11081 | Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(Quantity: 8)); |
11082 | Args.push_back(Elt: Builder.CreateLoad(Addr)); |
11083 | } |
11084 | |
11085 | auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b |
11086 | ? Intrinsic::aarch64_st64b |
11087 | : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv |
11088 | ? Intrinsic::aarch64_st64bv |
11089 | : Intrinsic::aarch64_st64bv0); |
11090 | Function *F = CGM.getIntrinsic(IID: Intr); |
11091 | return Builder.CreateCall(Callee: F, Args); |
11092 | } |
11093 | } |
11094 | |
11095 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr || |
11096 | BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) { |
11097 | |
11098 | auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr |
11099 | ? Intrinsic::aarch64_rndr |
11100 | : Intrinsic::aarch64_rndrrs); |
11101 | Function *F = CGM.getIntrinsic(IID: Intr); |
11102 | llvm::Value *Val = Builder.CreateCall(Callee: F); |
11103 | Value *RandomValue = Builder.CreateExtractValue(Agg: Val, Idxs: 0); |
11104 | Value *Status = Builder.CreateExtractValue(Agg: Val, Idxs: 1); |
11105 | |
11106 | Address MemAddress = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
11107 | Builder.CreateStore(Val: RandomValue, Addr: MemAddress); |
11108 | Status = Builder.CreateZExt(V: Status, DestTy: Int32Ty); |
11109 | return Status; |
11110 | } |
11111 | |
11112 | if (BuiltinID == clang::AArch64::BI__clear_cache) { |
11113 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments" ); |
11114 | const FunctionDecl *FD = E->getDirectCallee(); |
11115 | Value *Ops[2]; |
11116 | for (unsigned i = 0; i < 2; i++) |
11117 | Ops[i] = EmitScalarExpr(E: E->getArg(Arg: i)); |
11118 | llvm::Type *Ty = CGM.getTypes().ConvertType(T: FD->getType()); |
11119 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Val: Ty); |
11120 | StringRef Name = FD->getName(); |
11121 | return EmitNounwindRuntimeCall(callee: CGM.CreateRuntimeFunction(Ty: FTy, Name), args: Ops); |
11122 | } |
11123 | |
11124 | if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || |
11125 | BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) && |
11126 | getContext().getTypeSize(T: E->getType()) == 128) { |
11127 | Function *F = |
11128 | CGM.getIntrinsic(IID: BuiltinID == clang::AArch64::BI__builtin_arm_ldaex |
11129 | ? Intrinsic::aarch64_ldaxp |
11130 | : Intrinsic::aarch64_ldxp); |
11131 | |
11132 | Value *LdPtr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11133 | Value *Val = Builder.CreateCall(Callee: F, Args: LdPtr, Name: "ldxp" ); |
11134 | |
11135 | Value *Val0 = Builder.CreateExtractValue(Agg: Val, Idxs: 1); |
11136 | Value *Val1 = Builder.CreateExtractValue(Agg: Val, Idxs: 0); |
11137 | llvm::Type *Int128Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
11138 | Val0 = Builder.CreateZExt(V: Val0, DestTy: Int128Ty); |
11139 | Val1 = Builder.CreateZExt(V: Val1, DestTy: Int128Ty); |
11140 | |
11141 | Value *ShiftCst = llvm::ConstantInt::get(Ty: Int128Ty, V: 64); |
11142 | Val = Builder.CreateShl(LHS: Val0, RHS: ShiftCst, Name: "shl" , HasNUW: true /* nuw */); |
11143 | Val = Builder.CreateOr(LHS: Val, RHS: Val1); |
11144 | return Builder.CreateBitCast(V: Val, DestTy: ConvertType(T: E->getType())); |
11145 | } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || |
11146 | BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { |
11147 | Value *LoadAddr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11148 | |
11149 | QualType Ty = E->getType(); |
11150 | llvm::Type *RealResTy = ConvertType(T: Ty); |
11151 | llvm::Type *IntTy = |
11152 | llvm::IntegerType::get(C&: getLLVMContext(), NumBits: getContext().getTypeSize(T: Ty)); |
11153 | |
11154 | Function *F = |
11155 | CGM.getIntrinsic(IID: BuiltinID == clang::AArch64::BI__builtin_arm_ldaex |
11156 | ? Intrinsic::aarch64_ldaxr |
11157 | : Intrinsic::aarch64_ldxr, |
11158 | Tys: UnqualPtrTy); |
11159 | CallInst *Val = Builder.CreateCall(Callee: F, Args: LoadAddr, Name: "ldxr" ); |
11160 | Val->addParamAttr( |
11161 | ArgNo: 0, Attr: Attribute::get(Context&: getLLVMContext(), Kind: Attribute::ElementType, Ty: IntTy)); |
11162 | |
11163 | if (RealResTy->isPointerTy()) |
11164 | return Builder.CreateIntToPtr(V: Val, DestTy: RealResTy); |
11165 | |
11166 | llvm::Type *IntResTy = llvm::IntegerType::get( |
11167 | C&: getLLVMContext(), NumBits: CGM.getDataLayout().getTypeSizeInBits(Ty: RealResTy)); |
11168 | return Builder.CreateBitCast(V: Builder.CreateTruncOrBitCast(V: Val, DestTy: IntResTy), |
11169 | DestTy: RealResTy); |
11170 | } |
11171 | |
11172 | if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || |
11173 | BuiltinID == clang::AArch64::BI__builtin_arm_stlex) && |
11174 | getContext().getTypeSize(T: E->getArg(Arg: 0)->getType()) == 128) { |
11175 | Function *F = |
11176 | CGM.getIntrinsic(IID: BuiltinID == clang::AArch64::BI__builtin_arm_stlex |
11177 | ? Intrinsic::aarch64_stlxp |
11178 | : Intrinsic::aarch64_stxp); |
11179 | llvm::Type *STy = llvm::StructType::get(elt1: Int64Ty, elts: Int64Ty); |
11180 | |
11181 | Address Tmp = CreateMemTemp(T: E->getArg(Arg: 0)->getType()); |
11182 | EmitAnyExprToMem(E: E->getArg(Arg: 0), Location: Tmp, Quals: Qualifiers(), /*init*/ IsInitializer: true); |
11183 | |
11184 | Tmp = Tmp.withElementType(ElemTy: STy); |
11185 | llvm::Value *Val = Builder.CreateLoad(Addr: Tmp); |
11186 | |
11187 | Value *Arg0 = Builder.CreateExtractValue(Agg: Val, Idxs: 0); |
11188 | Value *Arg1 = Builder.CreateExtractValue(Agg: Val, Idxs: 1); |
11189 | Value *StPtr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11190 | return Builder.CreateCall(Callee: F, Args: {Arg0, Arg1, StPtr}, Name: "stxp" ); |
11191 | } |
11192 | |
11193 | if (BuiltinID == clang::AArch64::BI__builtin_arm_strex || |
11194 | BuiltinID == clang::AArch64::BI__builtin_arm_stlex) { |
11195 | Value *StoreVal = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11196 | Value *StoreAddr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11197 | |
11198 | QualType Ty = E->getArg(Arg: 0)->getType(); |
11199 | llvm::Type *StoreTy = |
11200 | llvm::IntegerType::get(C&: getLLVMContext(), NumBits: getContext().getTypeSize(T: Ty)); |
11201 | |
11202 | if (StoreVal->getType()->isPointerTy()) |
11203 | StoreVal = Builder.CreatePtrToInt(V: StoreVal, DestTy: Int64Ty); |
11204 | else { |
11205 | llvm::Type *IntTy = llvm::IntegerType::get( |
11206 | C&: getLLVMContext(), |
11207 | NumBits: CGM.getDataLayout().getTypeSizeInBits(Ty: StoreVal->getType())); |
11208 | StoreVal = Builder.CreateBitCast(V: StoreVal, DestTy: IntTy); |
11209 | StoreVal = Builder.CreateZExtOrBitCast(V: StoreVal, DestTy: Int64Ty); |
11210 | } |
11211 | |
11212 | Function *F = |
11213 | CGM.getIntrinsic(IID: BuiltinID == clang::AArch64::BI__builtin_arm_stlex |
11214 | ? Intrinsic::aarch64_stlxr |
11215 | : Intrinsic::aarch64_stxr, |
11216 | Tys: StoreAddr->getType()); |
11217 | CallInst *CI = Builder.CreateCall(Callee: F, Args: {StoreVal, StoreAddr}, Name: "stxr" ); |
11218 | CI->addParamAttr( |
11219 | ArgNo: 1, Attr: Attribute::get(Context&: getLLVMContext(), Kind: Attribute::ElementType, Ty: StoreTy)); |
11220 | return CI; |
11221 | } |
11222 | |
11223 | if (BuiltinID == clang::AArch64::BI__getReg) { |
11224 | Expr::EvalResult Result; |
11225 | if (!E->getArg(Arg: 0)->EvaluateAsInt(Result, Ctx: CGM.getContext())) |
11226 | llvm_unreachable("Sema will ensure that the parameter is constant" ); |
11227 | |
11228 | llvm::APSInt Value = Result.Val.getInt(); |
11229 | LLVMContext &Context = CGM.getLLVMContext(); |
11230 | std::string Reg = Value == 31 ? "sp" : "x" + toString(I: Value, Radix: 10); |
11231 | |
11232 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Str: Reg)}; |
11233 | llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: Ops); |
11234 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName); |
11235 | |
11236 | llvm::Function *F = |
11237 | CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: {Int64Ty}); |
11238 | return Builder.CreateCall(Callee: F, Args: Metadata); |
11239 | } |
11240 | |
11241 | if (BuiltinID == clang::AArch64::BI__break) { |
11242 | Expr::EvalResult Result; |
11243 | if (!E->getArg(Arg: 0)->EvaluateAsInt(Result, Ctx: CGM.getContext())) |
11244 | llvm_unreachable("Sema will ensure that the parameter is constant" ); |
11245 | |
11246 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::aarch64_break); |
11247 | return Builder.CreateCall(Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0))}); |
11248 | } |
11249 | |
11250 | if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) { |
11251 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_clrex); |
11252 | return Builder.CreateCall(Callee: F); |
11253 | } |
11254 | |
11255 | if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier) |
11256 | return Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent, |
11257 | SSID: llvm::SyncScope::SingleThread); |
11258 | |
11259 | // CRC32 |
11260 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
11261 | switch (BuiltinID) { |
11262 | case clang::AArch64::BI__builtin_arm_crc32b: |
11263 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; |
11264 | case clang::AArch64::BI__builtin_arm_crc32cb: |
11265 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; |
11266 | case clang::AArch64::BI__builtin_arm_crc32h: |
11267 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; |
11268 | case clang::AArch64::BI__builtin_arm_crc32ch: |
11269 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; |
11270 | case clang::AArch64::BI__builtin_arm_crc32w: |
11271 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; |
11272 | case clang::AArch64::BI__builtin_arm_crc32cw: |
11273 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; |
11274 | case clang::AArch64::BI__builtin_arm_crc32d: |
11275 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; |
11276 | case clang::AArch64::BI__builtin_arm_crc32cd: |
11277 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; |
11278 | } |
11279 | |
11280 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
11281 | Value *Arg0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11282 | Value *Arg1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11283 | Function *F = CGM.getIntrinsic(IID: CRCIntrinsicID); |
11284 | |
11285 | llvm::Type *DataTy = F->getFunctionType()->getParamType(i: 1); |
11286 | Arg1 = Builder.CreateZExtOrBitCast(V: Arg1, DestTy: DataTy); |
11287 | |
11288 | return Builder.CreateCall(Callee: F, Args: {Arg0, Arg1}); |
11289 | } |
11290 | |
11291 | // Memory Operations (MOPS) |
11292 | if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) { |
11293 | Value *Dst = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11294 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11295 | Value *Size = EmitScalarExpr(E: E->getArg(Arg: 2)); |
11296 | Dst = Builder.CreatePointerCast(V: Dst, DestTy: Int8PtrTy); |
11297 | Val = Builder.CreateTrunc(V: Val, DestTy: Int8Ty); |
11298 | Size = Builder.CreateIntCast(V: Size, DestTy: Int64Ty, isSigned: false); |
11299 | return Builder.CreateCall( |
11300 | Callee: CGM.getIntrinsic(IID: Intrinsic::aarch64_mops_memset_tag), Args: {Dst, Val, Size}); |
11301 | } |
11302 | |
11303 | // Memory Tagging Extensions (MTE) Intrinsics |
11304 | Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; |
11305 | switch (BuiltinID) { |
11306 | case clang::AArch64::BI__builtin_arm_irg: |
11307 | MTEIntrinsicID = Intrinsic::aarch64_irg; break; |
11308 | case clang::AArch64::BI__builtin_arm_addg: |
11309 | MTEIntrinsicID = Intrinsic::aarch64_addg; break; |
11310 | case clang::AArch64::BI__builtin_arm_gmi: |
11311 | MTEIntrinsicID = Intrinsic::aarch64_gmi; break; |
11312 | case clang::AArch64::BI__builtin_arm_ldg: |
11313 | MTEIntrinsicID = Intrinsic::aarch64_ldg; break; |
11314 | case clang::AArch64::BI__builtin_arm_stg: |
11315 | MTEIntrinsicID = Intrinsic::aarch64_stg; break; |
11316 | case clang::AArch64::BI__builtin_arm_subp: |
11317 | MTEIntrinsicID = Intrinsic::aarch64_subp; break; |
11318 | } |
11319 | |
11320 | if (MTEIntrinsicID != Intrinsic::not_intrinsic) { |
11321 | llvm::Type *T = ConvertType(T: E->getType()); |
11322 | |
11323 | if (MTEIntrinsicID == Intrinsic::aarch64_irg) { |
11324 | Value *Pointer = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11325 | Value *Mask = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11326 | |
11327 | Pointer = Builder.CreatePointerCast(V: Pointer, DestTy: Int8PtrTy); |
11328 | Mask = Builder.CreateZExt(V: Mask, DestTy: Int64Ty); |
11329 | Value *RV = Builder.CreateCall( |
11330 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {Pointer, Mask}); |
11331 | return Builder.CreatePointerCast(V: RV, DestTy: T); |
11332 | } |
11333 | if (MTEIntrinsicID == Intrinsic::aarch64_addg) { |
11334 | Value *Pointer = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11335 | Value *TagOffset = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11336 | |
11337 | Pointer = Builder.CreatePointerCast(V: Pointer, DestTy: Int8PtrTy); |
11338 | TagOffset = Builder.CreateZExt(V: TagOffset, DestTy: Int64Ty); |
11339 | Value *RV = Builder.CreateCall( |
11340 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {Pointer, TagOffset}); |
11341 | return Builder.CreatePointerCast(V: RV, DestTy: T); |
11342 | } |
11343 | if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { |
11344 | Value *Pointer = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11345 | Value *ExcludedMask = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11346 | |
11347 | ExcludedMask = Builder.CreateZExt(V: ExcludedMask, DestTy: Int64Ty); |
11348 | Pointer = Builder.CreatePointerCast(V: Pointer, DestTy: Int8PtrTy); |
11349 | return Builder.CreateCall( |
11350 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {Pointer, ExcludedMask}); |
11351 | } |
11352 | // Although it is possible to supply a different return |
11353 | // address (first arg) to this intrinsic, for now we set |
11354 | // return address same as input address. |
11355 | if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { |
11356 | Value *TagAddress = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11357 | TagAddress = Builder.CreatePointerCast(V: TagAddress, DestTy: Int8PtrTy); |
11358 | Value *RV = Builder.CreateCall( |
11359 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {TagAddress, TagAddress}); |
11360 | return Builder.CreatePointerCast(V: RV, DestTy: T); |
11361 | } |
11362 | // Although it is possible to supply a different tag (to set) |
11363 | // to this intrinsic (as first arg), for now we supply |
11364 | // the tag that is in input address arg (common use case). |
11365 | if (MTEIntrinsicID == Intrinsic::aarch64_stg) { |
11366 | Value *TagAddress = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11367 | TagAddress = Builder.CreatePointerCast(V: TagAddress, DestTy: Int8PtrTy); |
11368 | return Builder.CreateCall( |
11369 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {TagAddress, TagAddress}); |
11370 | } |
11371 | if (MTEIntrinsicID == Intrinsic::aarch64_subp) { |
11372 | Value *PointerA = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11373 | Value *PointerB = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11374 | PointerA = Builder.CreatePointerCast(V: PointerA, DestTy: Int8PtrTy); |
11375 | PointerB = Builder.CreatePointerCast(V: PointerB, DestTy: Int8PtrTy); |
11376 | return Builder.CreateCall( |
11377 | Callee: CGM.getIntrinsic(IID: MTEIntrinsicID), Args: {PointerA, PointerB}); |
11378 | } |
11379 | } |
11380 | |
11381 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || |
11382 | BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 || |
11383 | BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || |
11384 | BuiltinID == clang::AArch64::BI__builtin_arm_rsrp || |
11385 | BuiltinID == clang::AArch64::BI__builtin_arm_wsr || |
11386 | BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 || |
11387 | BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 || |
11388 | BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) { |
11389 | |
11390 | SpecialRegisterAccessKind AccessKind = Write; |
11391 | if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || |
11392 | BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 || |
11393 | BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || |
11394 | BuiltinID == clang::AArch64::BI__builtin_arm_rsrp) |
11395 | AccessKind = VolatileRead; |
11396 | |
11397 | bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp || |
11398 | BuiltinID == clang::AArch64::BI__builtin_arm_wsrp; |
11399 | |
11400 | bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr || |
11401 | BuiltinID == clang::AArch64::BI__builtin_arm_wsr; |
11402 | |
11403 | bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || |
11404 | BuiltinID == clang::AArch64::BI__builtin_arm_wsr128; |
11405 | |
11406 | llvm::Type *ValueType; |
11407 | llvm::Type *RegisterType = Int64Ty; |
11408 | if (Is32Bit) { |
11409 | ValueType = Int32Ty; |
11410 | } else if (Is128Bit) { |
11411 | llvm::Type *Int128Ty = |
11412 | llvm::IntegerType::getInt128Ty(C&: CGM.getLLVMContext()); |
11413 | ValueType = Int128Ty; |
11414 | RegisterType = Int128Ty; |
11415 | } else if (IsPointerBuiltin) { |
11416 | ValueType = VoidPtrTy; |
11417 | } else { |
11418 | ValueType = Int64Ty; |
11419 | }; |
11420 | |
11421 | return EmitSpecialRegisterBuiltin(CGF&: *this, E, RegisterType, ValueType, |
11422 | AccessKind); |
11423 | } |
11424 | |
11425 | if (BuiltinID == clang::AArch64::BI_ReadStatusReg || |
11426 | BuiltinID == clang::AArch64::BI_WriteStatusReg) { |
11427 | LLVMContext &Context = CGM.getLLVMContext(); |
11428 | |
11429 | unsigned SysReg = |
11430 | E->getArg(Arg: 0)->EvaluateKnownConstInt(Ctx: getContext()).getZExtValue(); |
11431 | |
11432 | std::string SysRegStr; |
11433 | llvm::raw_string_ostream(SysRegStr) << |
11434 | ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << |
11435 | ((SysReg >> 11) & 7) << ":" << |
11436 | ((SysReg >> 7) & 15) << ":" << |
11437 | ((SysReg >> 3) & 15) << ":" << |
11438 | ( SysReg & 7); |
11439 | |
11440 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, Str: SysRegStr) }; |
11441 | llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: Ops); |
11442 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName); |
11443 | |
11444 | llvm::Type *RegisterType = Int64Ty; |
11445 | llvm::Type *Types[] = { RegisterType }; |
11446 | |
11447 | if (BuiltinID == clang::AArch64::BI_ReadStatusReg) { |
11448 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: Types); |
11449 | |
11450 | return Builder.CreateCall(Callee: F, Args: Metadata); |
11451 | } |
11452 | |
11453 | llvm::Function *F = CGM.getIntrinsic(IID: llvm::Intrinsic::write_register, Tys: Types); |
11454 | llvm::Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11455 | |
11456 | return Builder.CreateCall(Callee: F, Args: { Metadata, ArgValue }); |
11457 | } |
11458 | |
11459 | if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { |
11460 | llvm::Function *F = |
11461 | CGM.getIntrinsic(IID: Intrinsic::addressofreturnaddress, Tys: AllocaInt8PtrTy); |
11462 | return Builder.CreateCall(Callee: F); |
11463 | } |
11464 | |
11465 | if (BuiltinID == clang::AArch64::BI__builtin_sponentry) { |
11466 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::sponentry, Tys: AllocaInt8PtrTy); |
11467 | return Builder.CreateCall(Callee: F); |
11468 | } |
11469 | |
11470 | if (BuiltinID == clang::AArch64::BI__mulh || |
11471 | BuiltinID == clang::AArch64::BI__umulh) { |
11472 | llvm::Type *ResType = ConvertType(T: E->getType()); |
11473 | llvm::Type *Int128Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
11474 | |
11475 | bool IsSigned = BuiltinID == clang::AArch64::BI__mulh; |
11476 | Value *LHS = |
11477 | Builder.CreateIntCast(V: EmitScalarExpr(E: E->getArg(Arg: 0)), DestTy: Int128Ty, isSigned: IsSigned); |
11478 | Value *RHS = |
11479 | Builder.CreateIntCast(V: EmitScalarExpr(E: E->getArg(Arg: 1)), DestTy: Int128Ty, isSigned: IsSigned); |
11480 | |
11481 | Value *MulResult, *HigherBits; |
11482 | if (IsSigned) { |
11483 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
11484 | HigherBits = Builder.CreateAShr(LHS: MulResult, RHS: 64); |
11485 | } else { |
11486 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
11487 | HigherBits = Builder.CreateLShr(LHS: MulResult, RHS: 64); |
11488 | } |
11489 | HigherBits = Builder.CreateIntCast(V: HigherBits, DestTy: ResType, isSigned: IsSigned); |
11490 | |
11491 | return HigherBits; |
11492 | } |
11493 | |
11494 | if (BuiltinID == AArch64::BI__writex18byte || |
11495 | BuiltinID == AArch64::BI__writex18word || |
11496 | BuiltinID == AArch64::BI__writex18dword || |
11497 | BuiltinID == AArch64::BI__writex18qword) { |
11498 | // Read x18 as i8* |
11499 | LLVMContext &Context = CGM.getLLVMContext(); |
11500 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Str: "x18" )}; |
11501 | llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: Ops); |
11502 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName); |
11503 | llvm::Function *F = |
11504 | CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: {Int64Ty}); |
11505 | llvm::Value *X18 = Builder.CreateCall(Callee: F, Args: Metadata); |
11506 | X18 = Builder.CreateIntToPtr(V: X18, DestTy: Int8PtrTy); |
11507 | |
11508 | // Store val at x18 + offset |
11509 | Value *Offset = Builder.CreateZExt(V: EmitScalarExpr(E: E->getArg(Arg: 0)), DestTy: Int64Ty); |
11510 | Value *Ptr = Builder.CreateGEP(Ty: Int8Ty, Ptr: X18, IdxList: Offset); |
11511 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
11512 | StoreInst *Store = Builder.CreateAlignedStore(Val, Addr: Ptr, Align: CharUnits::One()); |
11513 | return Store; |
11514 | } |
11515 | |
11516 | if (BuiltinID == AArch64::BI__readx18byte || |
11517 | BuiltinID == AArch64::BI__readx18word || |
11518 | BuiltinID == AArch64::BI__readx18dword || |
11519 | BuiltinID == AArch64::BI__readx18qword) { |
11520 | llvm::Type *IntTy = ConvertType(T: E->getType()); |
11521 | |
11522 | // Read x18 as i8* |
11523 | LLVMContext &Context = CGM.getLLVMContext(); |
11524 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Str: "x18" )}; |
11525 | llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: Ops); |
11526 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName); |
11527 | llvm::Function *F = |
11528 | CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: {Int64Ty}); |
11529 | llvm::Value *X18 = Builder.CreateCall(Callee: F, Args: Metadata); |
11530 | X18 = Builder.CreateIntToPtr(V: X18, DestTy: Int8PtrTy); |
11531 | |
11532 | // Load x18 + offset |
11533 | Value *Offset = Builder.CreateZExt(V: EmitScalarExpr(E: E->getArg(Arg: 0)), DestTy: Int64Ty); |
11534 | Value *Ptr = Builder.CreateGEP(Ty: Int8Ty, Ptr: X18, IdxList: Offset); |
11535 | LoadInst *Load = Builder.CreateAlignedLoad(Ty: IntTy, Addr: Ptr, Align: CharUnits::One()); |
11536 | return Load; |
11537 | } |
11538 | |
11539 | if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 || |
11540 | BuiltinID == AArch64::BI_CopyFloatFromInt32 || |
11541 | BuiltinID == AArch64::BI_CopyInt32FromFloat || |
11542 | BuiltinID == AArch64::BI_CopyInt64FromDouble) { |
11543 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11544 | llvm::Type *RetTy = ConvertType(T: E->getType()); |
11545 | return Builder.CreateBitCast(V: Arg, DestTy: RetTy); |
11546 | } |
11547 | |
11548 | if (BuiltinID == AArch64::BI_CountLeadingOnes || |
11549 | BuiltinID == AArch64::BI_CountLeadingOnes64 || |
11550 | BuiltinID == AArch64::BI_CountLeadingZeros || |
11551 | BuiltinID == AArch64::BI_CountLeadingZeros64) { |
11552 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11553 | llvm::Type *ArgType = Arg->getType(); |
11554 | |
11555 | if (BuiltinID == AArch64::BI_CountLeadingOnes || |
11556 | BuiltinID == AArch64::BI_CountLeadingOnes64) |
11557 | Arg = Builder.CreateXor(LHS: Arg, RHS: Constant::getAllOnesValue(Ty: ArgType)); |
11558 | |
11559 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ArgType); |
11560 | Value *Result = Builder.CreateCall(Callee: F, Args: {Arg, Builder.getInt1(V: false)}); |
11561 | |
11562 | if (BuiltinID == AArch64::BI_CountLeadingOnes64 || |
11563 | BuiltinID == AArch64::BI_CountLeadingZeros64) |
11564 | Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt32Ty()); |
11565 | return Result; |
11566 | } |
11567 | |
11568 | if (BuiltinID == AArch64::BI_CountLeadingSigns || |
11569 | BuiltinID == AArch64::BI_CountLeadingSigns64) { |
11570 | Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11571 | |
11572 | Function *F = (BuiltinID == AArch64::BI_CountLeadingSigns) |
11573 | ? CGM.getIntrinsic(IID: Intrinsic::aarch64_cls) |
11574 | : CGM.getIntrinsic(IID: Intrinsic::aarch64_cls64); |
11575 | |
11576 | Value *Result = Builder.CreateCall(Callee: F, Args: Arg, Name: "cls" ); |
11577 | if (BuiltinID == AArch64::BI_CountLeadingSigns64) |
11578 | Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt32Ty()); |
11579 | return Result; |
11580 | } |
11581 | |
11582 | if (BuiltinID == AArch64::BI_CountOneBits || |
11583 | BuiltinID == AArch64::BI_CountOneBits64) { |
11584 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11585 | llvm::Type *ArgType = ArgValue->getType(); |
11586 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ArgType); |
11587 | |
11588 | Value *Result = Builder.CreateCall(Callee: F, Args: ArgValue); |
11589 | if (BuiltinID == AArch64::BI_CountOneBits64) |
11590 | Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt32Ty()); |
11591 | return Result; |
11592 | } |
11593 | |
11594 | if (BuiltinID == AArch64::BI__prefetch) { |
11595 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11596 | Value *RW = llvm::ConstantInt::get(Ty: Int32Ty, V: 0); |
11597 | Value *Locality = ConstantInt::get(Ty: Int32Ty, V: 3); |
11598 | Value *Data = llvm::ConstantInt::get(Ty: Int32Ty, V: 1); |
11599 | Function *F = CGM.getIntrinsic(IID: Intrinsic::prefetch, Tys: Address->getType()); |
11600 | return Builder.CreateCall(Callee: F, Args: {Address, RW, Locality, Data}); |
11601 | } |
11602 | |
11603 | if (BuiltinID == AArch64::BI__hlt) { |
11604 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_hlt); |
11605 | Builder.CreateCall(Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0))}); |
11606 | |
11607 | // Return 0 for convenience, even though MSVC returns some other undefined |
11608 | // value. |
11609 | return ConstantInt::get(Ty: Builder.getInt32Ty(), V: 0); |
11610 | } |
11611 | |
11612 | // Handle MSVC intrinsics before argument evaluation to prevent double |
11613 | // evaluation. |
11614 | if (std::optional<MSVCIntrin> MsvcIntId = |
11615 | translateAarch64ToMsvcIntrin(BuiltinID)) |
11616 | return EmitMSVCBuiltinExpr(BuiltinID: *MsvcIntId, E); |
11617 | |
11618 | // Some intrinsics are equivalent - if they are use the base intrinsic ID. |
11619 | auto It = llvm::find_if(Range: NEONEquivalentIntrinsicMap, P: [BuiltinID](auto &P) { |
11620 | return P.first == BuiltinID; |
11621 | }); |
11622 | if (It != end(arr: NEONEquivalentIntrinsicMap)) |
11623 | BuiltinID = It->second; |
11624 | |
11625 | // Find out if any arguments are required to be integer constant |
11626 | // expressions. |
11627 | unsigned ICEArguments = 0; |
11628 | ASTContext::GetBuiltinTypeError Error; |
11629 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
11630 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
11631 | |
11632 | llvm::SmallVector<Value*, 4> Ops; |
11633 | Address PtrOp0 = Address::invalid(); |
11634 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { |
11635 | if (i == 0) { |
11636 | switch (BuiltinID) { |
11637 | case NEON::BI__builtin_neon_vld1_v: |
11638 | case NEON::BI__builtin_neon_vld1q_v: |
11639 | case NEON::BI__builtin_neon_vld1_dup_v: |
11640 | case NEON::BI__builtin_neon_vld1q_dup_v: |
11641 | case NEON::BI__builtin_neon_vld1_lane_v: |
11642 | case NEON::BI__builtin_neon_vld1q_lane_v: |
11643 | case NEON::BI__builtin_neon_vst1_v: |
11644 | case NEON::BI__builtin_neon_vst1q_v: |
11645 | case NEON::BI__builtin_neon_vst1_lane_v: |
11646 | case NEON::BI__builtin_neon_vst1q_lane_v: |
11647 | case NEON::BI__builtin_neon_vldap1_lane_s64: |
11648 | case NEON::BI__builtin_neon_vldap1q_lane_s64: |
11649 | case NEON::BI__builtin_neon_vstl1_lane_s64: |
11650 | case NEON::BI__builtin_neon_vstl1q_lane_s64: |
11651 | // Get the alignment for the argument in addition to the value; |
11652 | // we'll use it later. |
11653 | PtrOp0 = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
11654 | Ops.push_back(Elt: PtrOp0.emitRawPointer(CGF&: *this)); |
11655 | continue; |
11656 | } |
11657 | } |
11658 | Ops.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E)); |
11659 | } |
11660 | |
11661 | auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); |
11662 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
11663 | IntrinsicMap: SISDMap, BuiltinID, MapProvenSorted&: AArch64SISDIntrinsicsProvenSorted); |
11664 | |
11665 | if (Builtin) { |
11666 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: E->getNumArgs() - 1))); |
11667 | Value *Result = EmitCommonNeonSISDBuiltinExpr(CGF&: *this, SISDInfo: *Builtin, Ops, E); |
11668 | assert(Result && "SISD intrinsic should have been handled" ); |
11669 | return Result; |
11670 | } |
11671 | |
11672 | const Expr *Arg = E->getArg(Arg: E->getNumArgs()-1); |
11673 | NeonTypeFlags Type(0); |
11674 | if (std::optional<llvm::APSInt> Result = |
11675 | Arg->getIntegerConstantExpr(Ctx: getContext())) |
11676 | // Determine the type of this overloaded NEON intrinsic. |
11677 | Type = NeonTypeFlags(Result->getZExtValue()); |
11678 | |
11679 | bool usgn = Type.isUnsigned(); |
11680 | bool quad = Type.isQuad(); |
11681 | |
11682 | // Handle non-overloaded intrinsics first. |
11683 | switch (BuiltinID) { |
11684 | default: break; |
11685 | case NEON::BI__builtin_neon_vabsh_f16: |
11686 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11687 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: HalfTy), Ops, name: "vabs" ); |
11688 | case NEON::BI__builtin_neon_vaddq_p128: { |
11689 | llvm::Type *Ty = GetNeonType(CGF: this, TypeFlags: NeonTypeFlags::Poly128); |
11690 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11691 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
11692 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
11693 | Ops[0] = Builder.CreateXor(LHS: Ops[0], RHS: Ops[1]); |
11694 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(C&: getLLVMContext(), N: 128); |
11695 | return Builder.CreateBitCast(V: Ops[0], DestTy: Int128Ty); |
11696 | } |
11697 | case NEON::BI__builtin_neon_vldrq_p128: { |
11698 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(C&: getLLVMContext(), N: 128); |
11699 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11700 | return Builder.CreateAlignedLoad(Ty: Int128Ty, Addr: Ptr, |
11701 | Align: CharUnits::fromQuantity(Quantity: 16)); |
11702 | } |
11703 | case NEON::BI__builtin_neon_vstrq_p128: { |
11704 | Value *Ptr = Ops[0]; |
11705 | return Builder.CreateDefaultAlignedStore(Val: EmitScalarExpr(E: E->getArg(Arg: 1)), Addr: Ptr); |
11706 | } |
11707 | case NEON::BI__builtin_neon_vcvts_f32_u32: |
11708 | case NEON::BI__builtin_neon_vcvtd_f64_u64: |
11709 | usgn = true; |
11710 | [[fallthrough]]; |
11711 | case NEON::BI__builtin_neon_vcvts_f32_s32: |
11712 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { |
11713 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11714 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
11715 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
11716 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
11717 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: InTy); |
11718 | if (usgn) |
11719 | return Builder.CreateUIToFP(V: Ops[0], DestTy: FTy); |
11720 | return Builder.CreateSIToFP(V: Ops[0], DestTy: FTy); |
11721 | } |
11722 | case NEON::BI__builtin_neon_vcvth_f16_u16: |
11723 | case NEON::BI__builtin_neon_vcvth_f16_u32: |
11724 | case NEON::BI__builtin_neon_vcvth_f16_u64: |
11725 | usgn = true; |
11726 | [[fallthrough]]; |
11727 | case NEON::BI__builtin_neon_vcvth_f16_s16: |
11728 | case NEON::BI__builtin_neon_vcvth_f16_s32: |
11729 | case NEON::BI__builtin_neon_vcvth_f16_s64: { |
11730 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11731 | llvm::Type *FTy = HalfTy; |
11732 | llvm::Type *InTy; |
11733 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) |
11734 | InTy = Int64Ty; |
11735 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) |
11736 | InTy = Int32Ty; |
11737 | else |
11738 | InTy = Int16Ty; |
11739 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: InTy); |
11740 | if (usgn) |
11741 | return Builder.CreateUIToFP(V: Ops[0], DestTy: FTy); |
11742 | return Builder.CreateSIToFP(V: Ops[0], DestTy: FTy); |
11743 | } |
11744 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
11745 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
11746 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
11747 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
11748 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
11749 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
11750 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
11751 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
11752 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
11753 | case NEON::BI__builtin_neon_vcvth_s16_f16: { |
11754 | unsigned Int; |
11755 | llvm::Type* InTy = Int32Ty; |
11756 | llvm::Type* FTy = HalfTy; |
11757 | llvm::Type *Tys[2] = {InTy, FTy}; |
11758 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11759 | switch (BuiltinID) { |
11760 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11761 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
11762 | Int = Intrinsic::aarch64_neon_fcvtau; break; |
11763 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
11764 | Int = Intrinsic::aarch64_neon_fcvtmu; break; |
11765 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
11766 | Int = Intrinsic::aarch64_neon_fcvtnu; break; |
11767 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
11768 | Int = Intrinsic::aarch64_neon_fcvtpu; break; |
11769 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
11770 | Int = Intrinsic::aarch64_neon_fcvtzu; break; |
11771 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
11772 | Int = Intrinsic::aarch64_neon_fcvtas; break; |
11773 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
11774 | Int = Intrinsic::aarch64_neon_fcvtms; break; |
11775 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
11776 | Int = Intrinsic::aarch64_neon_fcvtns; break; |
11777 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
11778 | Int = Intrinsic::aarch64_neon_fcvtps; break; |
11779 | case NEON::BI__builtin_neon_vcvth_s16_f16: |
11780 | Int = Intrinsic::aarch64_neon_fcvtzs; break; |
11781 | } |
11782 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "fcvt" ); |
11783 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
11784 | } |
11785 | case NEON::BI__builtin_neon_vcaleh_f16: |
11786 | case NEON::BI__builtin_neon_vcalth_f16: |
11787 | case NEON::BI__builtin_neon_vcageh_f16: |
11788 | case NEON::BI__builtin_neon_vcagth_f16: { |
11789 | unsigned Int; |
11790 | llvm::Type* InTy = Int32Ty; |
11791 | llvm::Type* FTy = HalfTy; |
11792 | llvm::Type *Tys[2] = {InTy, FTy}; |
11793 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11794 | switch (BuiltinID) { |
11795 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11796 | case NEON::BI__builtin_neon_vcageh_f16: |
11797 | Int = Intrinsic::aarch64_neon_facge; break; |
11798 | case NEON::BI__builtin_neon_vcagth_f16: |
11799 | Int = Intrinsic::aarch64_neon_facgt; break; |
11800 | case NEON::BI__builtin_neon_vcaleh_f16: |
11801 | Int = Intrinsic::aarch64_neon_facge; std::swap(a&: Ops[0], b&: Ops[1]); break; |
11802 | case NEON::BI__builtin_neon_vcalth_f16: |
11803 | Int = Intrinsic::aarch64_neon_facgt; std::swap(a&: Ops[0], b&: Ops[1]); break; |
11804 | } |
11805 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "facg" ); |
11806 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
11807 | } |
11808 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
11809 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { |
11810 | unsigned Int; |
11811 | llvm::Type* InTy = Int32Ty; |
11812 | llvm::Type* FTy = HalfTy; |
11813 | llvm::Type *Tys[2] = {InTy, FTy}; |
11814 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11815 | switch (BuiltinID) { |
11816 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11817 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
11818 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; |
11819 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: |
11820 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; |
11821 | } |
11822 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "fcvth_n" ); |
11823 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
11824 | } |
11825 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
11826 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { |
11827 | unsigned Int; |
11828 | llvm::Type* FTy = HalfTy; |
11829 | llvm::Type* InTy = Int32Ty; |
11830 | llvm::Type *Tys[2] = {FTy, InTy}; |
11831 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11832 | switch (BuiltinID) { |
11833 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11834 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
11835 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; |
11836 | Ops[0] = Builder.CreateSExt(V: Ops[0], DestTy: InTy, Name: "sext" ); |
11837 | break; |
11838 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: |
11839 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; |
11840 | Ops[0] = Builder.CreateZExt(V: Ops[0], DestTy: InTy); |
11841 | break; |
11842 | } |
11843 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "fcvth_n" ); |
11844 | } |
11845 | case NEON::BI__builtin_neon_vpaddd_s64: { |
11846 | auto *Ty = llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2); |
11847 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11848 | // The vector is v2f64, so make sure it's bitcast to that. |
11849 | Vec = Builder.CreateBitCast(V: Vec, DestTy: Ty, Name: "v2i64" ); |
11850 | llvm::Value *Idx0 = llvm::ConstantInt::get(Ty: SizeTy, V: 0); |
11851 | llvm::Value *Idx1 = llvm::ConstantInt::get(Ty: SizeTy, V: 1); |
11852 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx: Idx0, Name: "lane0" ); |
11853 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx: Idx1, Name: "lane1" ); |
11854 | // Pairwise addition of a v2f64 into a scalar f64. |
11855 | return Builder.CreateAdd(LHS: Op0, RHS: Op1, Name: "vpaddd" ); |
11856 | } |
11857 | case NEON::BI__builtin_neon_vpaddd_f64: { |
11858 | auto *Ty = llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 2); |
11859 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11860 | // The vector is v2f64, so make sure it's bitcast to that. |
11861 | Vec = Builder.CreateBitCast(V: Vec, DestTy: Ty, Name: "v2f64" ); |
11862 | llvm::Value *Idx0 = llvm::ConstantInt::get(Ty: SizeTy, V: 0); |
11863 | llvm::Value *Idx1 = llvm::ConstantInt::get(Ty: SizeTy, V: 1); |
11864 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx: Idx0, Name: "lane0" ); |
11865 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx: Idx1, Name: "lane1" ); |
11866 | // Pairwise addition of a v2f64 into a scalar f64. |
11867 | return Builder.CreateFAdd(L: Op0, R: Op1, Name: "vpaddd" ); |
11868 | } |
11869 | case NEON::BI__builtin_neon_vpadds_f32: { |
11870 | auto *Ty = llvm::FixedVectorType::get(ElementType: FloatTy, NumElts: 2); |
11871 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
11872 | // The vector is v2f32, so make sure it's bitcast to that. |
11873 | Vec = Builder.CreateBitCast(V: Vec, DestTy: Ty, Name: "v2f32" ); |
11874 | llvm::Value *Idx0 = llvm::ConstantInt::get(Ty: SizeTy, V: 0); |
11875 | llvm::Value *Idx1 = llvm::ConstantInt::get(Ty: SizeTy, V: 1); |
11876 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx: Idx0, Name: "lane0" ); |
11877 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx: Idx1, Name: "lane1" ); |
11878 | // Pairwise addition of a v2f32 into a scalar f32. |
11879 | return Builder.CreateFAdd(L: Op0, R: Op1, Name: "vpaddd" ); |
11880 | } |
11881 | case NEON::BI__builtin_neon_vceqzd_s64: |
11882 | case NEON::BI__builtin_neon_vceqzd_f64: |
11883 | case NEON::BI__builtin_neon_vceqzs_f32: |
11884 | case NEON::BI__builtin_neon_vceqzh_f16: |
11885 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11886 | return EmitAArch64CompareBuiltinExpr( |
11887 | Op: Ops[0], Ty: ConvertType(T: E->getCallReturnType(Ctx: getContext())), |
11888 | Fp: ICmpInst::FCMP_OEQ, Ip: ICmpInst::ICMP_EQ, Name: "vceqz" ); |
11889 | case NEON::BI__builtin_neon_vcgezd_s64: |
11890 | case NEON::BI__builtin_neon_vcgezd_f64: |
11891 | case NEON::BI__builtin_neon_vcgezs_f32: |
11892 | case NEON::BI__builtin_neon_vcgezh_f16: |
11893 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11894 | return EmitAArch64CompareBuiltinExpr( |
11895 | Op: Ops[0], Ty: ConvertType(T: E->getCallReturnType(Ctx: getContext())), |
11896 | Fp: ICmpInst::FCMP_OGE, Ip: ICmpInst::ICMP_SGE, Name: "vcgez" ); |
11897 | case NEON::BI__builtin_neon_vclezd_s64: |
11898 | case NEON::BI__builtin_neon_vclezd_f64: |
11899 | case NEON::BI__builtin_neon_vclezs_f32: |
11900 | case NEON::BI__builtin_neon_vclezh_f16: |
11901 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11902 | return EmitAArch64CompareBuiltinExpr( |
11903 | Op: Ops[0], Ty: ConvertType(T: E->getCallReturnType(Ctx: getContext())), |
11904 | Fp: ICmpInst::FCMP_OLE, Ip: ICmpInst::ICMP_SLE, Name: "vclez" ); |
11905 | case NEON::BI__builtin_neon_vcgtzd_s64: |
11906 | case NEON::BI__builtin_neon_vcgtzd_f64: |
11907 | case NEON::BI__builtin_neon_vcgtzs_f32: |
11908 | case NEON::BI__builtin_neon_vcgtzh_f16: |
11909 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11910 | return EmitAArch64CompareBuiltinExpr( |
11911 | Op: Ops[0], Ty: ConvertType(T: E->getCallReturnType(Ctx: getContext())), |
11912 | Fp: ICmpInst::FCMP_OGT, Ip: ICmpInst::ICMP_SGT, Name: "vcgtz" ); |
11913 | case NEON::BI__builtin_neon_vcltzd_s64: |
11914 | case NEON::BI__builtin_neon_vcltzd_f64: |
11915 | case NEON::BI__builtin_neon_vcltzs_f32: |
11916 | case NEON::BI__builtin_neon_vcltzh_f16: |
11917 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11918 | return EmitAArch64CompareBuiltinExpr( |
11919 | Op: Ops[0], Ty: ConvertType(T: E->getCallReturnType(Ctx: getContext())), |
11920 | Fp: ICmpInst::FCMP_OLT, Ip: ICmpInst::ICMP_SLT, Name: "vcltz" ); |
11921 | |
11922 | case NEON::BI__builtin_neon_vceqzd_u64: { |
11923 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
11924 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Int64Ty); |
11925 | Ops[0] = |
11926 | Builder.CreateICmpEQ(LHS: Ops[0], RHS: llvm::Constant::getNullValue(Ty: Int64Ty)); |
11927 | return Builder.CreateSExt(V: Ops[0], DestTy: Int64Ty, Name: "vceqzd" ); |
11928 | } |
11929 | case NEON::BI__builtin_neon_vceqd_f64: |
11930 | case NEON::BI__builtin_neon_vcled_f64: |
11931 | case NEON::BI__builtin_neon_vcltd_f64: |
11932 | case NEON::BI__builtin_neon_vcged_f64: |
11933 | case NEON::BI__builtin_neon_vcgtd_f64: { |
11934 | llvm::CmpInst::Predicate P; |
11935 | switch (BuiltinID) { |
11936 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11937 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; |
11938 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; |
11939 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; |
11940 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; |
11941 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; |
11942 | } |
11943 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11944 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: DoubleTy); |
11945 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: DoubleTy); |
11946 | if (P == llvm::FCmpInst::FCMP_OEQ) |
11947 | Ops[0] = Builder.CreateFCmp(P, LHS: Ops[0], RHS: Ops[1]); |
11948 | else |
11949 | Ops[0] = Builder.CreateFCmpS(P, LHS: Ops[0], RHS: Ops[1]); |
11950 | return Builder.CreateSExt(V: Ops[0], DestTy: Int64Ty, Name: "vcmpd" ); |
11951 | } |
11952 | case NEON::BI__builtin_neon_vceqs_f32: |
11953 | case NEON::BI__builtin_neon_vcles_f32: |
11954 | case NEON::BI__builtin_neon_vclts_f32: |
11955 | case NEON::BI__builtin_neon_vcges_f32: |
11956 | case NEON::BI__builtin_neon_vcgts_f32: { |
11957 | llvm::CmpInst::Predicate P; |
11958 | switch (BuiltinID) { |
11959 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11960 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; |
11961 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; |
11962 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; |
11963 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; |
11964 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; |
11965 | } |
11966 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11967 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: FloatTy); |
11968 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: FloatTy); |
11969 | if (P == llvm::FCmpInst::FCMP_OEQ) |
11970 | Ops[0] = Builder.CreateFCmp(P, LHS: Ops[0], RHS: Ops[1]); |
11971 | else |
11972 | Ops[0] = Builder.CreateFCmpS(P, LHS: Ops[0], RHS: Ops[1]); |
11973 | return Builder.CreateSExt(V: Ops[0], DestTy: Int32Ty, Name: "vcmpd" ); |
11974 | } |
11975 | case NEON::BI__builtin_neon_vceqh_f16: |
11976 | case NEON::BI__builtin_neon_vcleh_f16: |
11977 | case NEON::BI__builtin_neon_vclth_f16: |
11978 | case NEON::BI__builtin_neon_vcgeh_f16: |
11979 | case NEON::BI__builtin_neon_vcgth_f16: { |
11980 | llvm::CmpInst::Predicate P; |
11981 | switch (BuiltinID) { |
11982 | default: llvm_unreachable("missing builtin ID in switch!" ); |
11983 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; |
11984 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; |
11985 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; |
11986 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; |
11987 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; |
11988 | } |
11989 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
11990 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: HalfTy); |
11991 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: HalfTy); |
11992 | if (P == llvm::FCmpInst::FCMP_OEQ) |
11993 | Ops[0] = Builder.CreateFCmp(P, LHS: Ops[0], RHS: Ops[1]); |
11994 | else |
11995 | Ops[0] = Builder.CreateFCmpS(P, LHS: Ops[0], RHS: Ops[1]); |
11996 | return Builder.CreateSExt(V: Ops[0], DestTy: Int16Ty, Name: "vcmpd" ); |
11997 | } |
11998 | case NEON::BI__builtin_neon_vceqd_s64: |
11999 | case NEON::BI__builtin_neon_vceqd_u64: |
12000 | case NEON::BI__builtin_neon_vcgtd_s64: |
12001 | case NEON::BI__builtin_neon_vcgtd_u64: |
12002 | case NEON::BI__builtin_neon_vcltd_s64: |
12003 | case NEON::BI__builtin_neon_vcltd_u64: |
12004 | case NEON::BI__builtin_neon_vcged_u64: |
12005 | case NEON::BI__builtin_neon_vcged_s64: |
12006 | case NEON::BI__builtin_neon_vcled_u64: |
12007 | case NEON::BI__builtin_neon_vcled_s64: { |
12008 | llvm::CmpInst::Predicate P; |
12009 | switch (BuiltinID) { |
12010 | default: llvm_unreachable("missing builtin ID in switch!" ); |
12011 | case NEON::BI__builtin_neon_vceqd_s64: |
12012 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; |
12013 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; |
12014 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; |
12015 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; |
12016 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; |
12017 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; |
12018 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; |
12019 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; |
12020 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; |
12021 | } |
12022 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12023 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Int64Ty); |
12024 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Int64Ty); |
12025 | Ops[0] = Builder.CreateICmp(P, LHS: Ops[0], RHS: Ops[1]); |
12026 | return Builder.CreateSExt(V: Ops[0], DestTy: Int64Ty, Name: "vceqd" ); |
12027 | } |
12028 | case NEON::BI__builtin_neon_vtstd_s64: |
12029 | case NEON::BI__builtin_neon_vtstd_u64: { |
12030 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12031 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Int64Ty); |
12032 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Int64Ty); |
12033 | Ops[0] = Builder.CreateAnd(LHS: Ops[0], RHS: Ops[1]); |
12034 | Ops[0] = Builder.CreateICmp(P: ICmpInst::ICMP_NE, LHS: Ops[0], |
12035 | RHS: llvm::Constant::getNullValue(Ty: Int64Ty)); |
12036 | return Builder.CreateSExt(V: Ops[0], DestTy: Int64Ty, Name: "vtstd" ); |
12037 | } |
12038 | case NEON::BI__builtin_neon_vset_lane_i8: |
12039 | case NEON::BI__builtin_neon_vset_lane_i16: |
12040 | case NEON::BI__builtin_neon_vset_lane_i32: |
12041 | case NEON::BI__builtin_neon_vset_lane_i64: |
12042 | case NEON::BI__builtin_neon_vset_lane_bf16: |
12043 | case NEON::BI__builtin_neon_vset_lane_f32: |
12044 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
12045 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
12046 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
12047 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
12048 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
12049 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
12050 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12051 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vset_lane" ); |
12052 | case NEON::BI__builtin_neon_vset_lane_f64: |
12053 | // The vector type needs a cast for the v1f64 variant. |
12054 | Ops[1] = |
12055 | Builder.CreateBitCast(V: Ops[1], DestTy: llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 1)); |
12056 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12057 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vset_lane" ); |
12058 | case NEON::BI__builtin_neon_vsetq_lane_f64: |
12059 | // The vector type needs a cast for the v2f64 variant. |
12060 | Ops[1] = |
12061 | Builder.CreateBitCast(V: Ops[1], DestTy: llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 2)); |
12062 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12063 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vset_lane" ); |
12064 | |
12065 | case NEON::BI__builtin_neon_vget_lane_i8: |
12066 | case NEON::BI__builtin_neon_vdupb_lane_i8: |
12067 | Ops[0] = |
12068 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8)); |
12069 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12070 | Name: "vget_lane" ); |
12071 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
12072 | case NEON::BI__builtin_neon_vdupb_laneq_i8: |
12073 | Ops[0] = |
12074 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16)); |
12075 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12076 | Name: "vgetq_lane" ); |
12077 | case NEON::BI__builtin_neon_vget_lane_i16: |
12078 | case NEON::BI__builtin_neon_vduph_lane_i16: |
12079 | Ops[0] = |
12080 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4)); |
12081 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12082 | Name: "vget_lane" ); |
12083 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
12084 | case NEON::BI__builtin_neon_vduph_laneq_i16: |
12085 | Ops[0] = |
12086 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8)); |
12087 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12088 | Name: "vgetq_lane" ); |
12089 | case NEON::BI__builtin_neon_vget_lane_i32: |
12090 | case NEON::BI__builtin_neon_vdups_lane_i32: |
12091 | Ops[0] = |
12092 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 2)); |
12093 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12094 | Name: "vget_lane" ); |
12095 | case NEON::BI__builtin_neon_vdups_lane_f32: |
12096 | Ops[0] = |
12097 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: FloatTy, NumElts: 2)); |
12098 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12099 | Name: "vdups_lane" ); |
12100 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
12101 | case NEON::BI__builtin_neon_vdups_laneq_i32: |
12102 | Ops[0] = |
12103 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4)); |
12104 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12105 | Name: "vgetq_lane" ); |
12106 | case NEON::BI__builtin_neon_vget_lane_i64: |
12107 | case NEON::BI__builtin_neon_vdupd_lane_i64: |
12108 | Ops[0] = |
12109 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 1)); |
12110 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12111 | Name: "vget_lane" ); |
12112 | case NEON::BI__builtin_neon_vdupd_lane_f64: |
12113 | Ops[0] = |
12114 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 1)); |
12115 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12116 | Name: "vdupd_lane" ); |
12117 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
12118 | case NEON::BI__builtin_neon_vdupd_laneq_i64: |
12119 | Ops[0] = |
12120 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
12121 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12122 | Name: "vgetq_lane" ); |
12123 | case NEON::BI__builtin_neon_vget_lane_f32: |
12124 | Ops[0] = |
12125 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: FloatTy, NumElts: 2)); |
12126 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12127 | Name: "vget_lane" ); |
12128 | case NEON::BI__builtin_neon_vget_lane_f64: |
12129 | Ops[0] = |
12130 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 1)); |
12131 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12132 | Name: "vget_lane" ); |
12133 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
12134 | case NEON::BI__builtin_neon_vdups_laneq_f32: |
12135 | Ops[0] = |
12136 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: FloatTy, NumElts: 4)); |
12137 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12138 | Name: "vgetq_lane" ); |
12139 | case NEON::BI__builtin_neon_vgetq_lane_f64: |
12140 | case NEON::BI__builtin_neon_vdupd_laneq_f64: |
12141 | Ops[0] = |
12142 | Builder.CreateBitCast(V: Ops[0], DestTy: llvm::FixedVectorType::get(ElementType: DoubleTy, NumElts: 2)); |
12143 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12144 | Name: "vgetq_lane" ); |
12145 | case NEON::BI__builtin_neon_vaddh_f16: |
12146 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12147 | return Builder.CreateFAdd(L: Ops[0], R: Ops[1], Name: "vaddh" ); |
12148 | case NEON::BI__builtin_neon_vsubh_f16: |
12149 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12150 | return Builder.CreateFSub(L: Ops[0], R: Ops[1], Name: "vsubh" ); |
12151 | case NEON::BI__builtin_neon_vmulh_f16: |
12152 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12153 | return Builder.CreateFMul(L: Ops[0], R: Ops[1], Name: "vmulh" ); |
12154 | case NEON::BI__builtin_neon_vdivh_f16: |
12155 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12156 | return Builder.CreateFDiv(L: Ops[0], R: Ops[1], Name: "vdivh" ); |
12157 | case NEON::BI__builtin_neon_vfmah_f16: |
12158 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
12159 | return emitCallMaybeConstrainedFPBuiltin( |
12160 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty: HalfTy, |
12161 | Args: {EmitScalarExpr(E: E->getArg(Arg: 1)), EmitScalarExpr(E: E->getArg(Arg: 2)), Ops[0]}); |
12162 | case NEON::BI__builtin_neon_vfmsh_f16: { |
12163 | Value* Neg = Builder.CreateFNeg(V: EmitScalarExpr(E: E->getArg(Arg: 1)), Name: "vsubh" ); |
12164 | |
12165 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
12166 | return emitCallMaybeConstrainedFPBuiltin( |
12167 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty: HalfTy, |
12168 | Args: {Neg, EmitScalarExpr(E: E->getArg(Arg: 2)), Ops[0]}); |
12169 | } |
12170 | case NEON::BI__builtin_neon_vaddd_s64: |
12171 | case NEON::BI__builtin_neon_vaddd_u64: |
12172 | return Builder.CreateAdd(LHS: Ops[0], RHS: EmitScalarExpr(E: E->getArg(Arg: 1)), Name: "vaddd" ); |
12173 | case NEON::BI__builtin_neon_vsubd_s64: |
12174 | case NEON::BI__builtin_neon_vsubd_u64: |
12175 | return Builder.CreateSub(LHS: Ops[0], RHS: EmitScalarExpr(E: E->getArg(Arg: 1)), Name: "vsubd" ); |
12176 | case NEON::BI__builtin_neon_vqdmlalh_s16: |
12177 | case NEON::BI__builtin_neon_vqdmlslh_s16: { |
12178 | SmallVector<Value *, 2> ProductOps; |
12179 | ProductOps.push_back(Elt: vectorWrapScalar16(Op: Ops[1])); |
12180 | ProductOps.push_back(Elt: vectorWrapScalar16(Op: EmitScalarExpr(E: E->getArg(Arg: 2)))); |
12181 | auto *VTy = llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4); |
12182 | Ops[1] = EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_sqdmull, Tys: VTy), |
12183 | Ops&: ProductOps, name: "vqdmlXl" ); |
12184 | Constant *CI = ConstantInt::get(Ty: SizeTy, V: 0); |
12185 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: CI, Name: "lane0" ); |
12186 | |
12187 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 |
12188 | ? Intrinsic::aarch64_neon_sqadd |
12189 | : Intrinsic::aarch64_neon_sqsub; |
12190 | return EmitNeonCall(F: CGM.getIntrinsic(IID: AccumInt, Tys: Int32Ty), Ops, name: "vqdmlXl" ); |
12191 | } |
12192 | case NEON::BI__builtin_neon_vqshlud_n_s64: { |
12193 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12194 | Ops[1] = Builder.CreateZExt(V: Ops[1], DestTy: Int64Ty); |
12195 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_sqshlu, Tys: Int64Ty), |
12196 | Ops, name: "vqshlu_n" ); |
12197 | } |
12198 | case NEON::BI__builtin_neon_vqshld_n_u64: |
12199 | case NEON::BI__builtin_neon_vqshld_n_s64: { |
12200 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 |
12201 | ? Intrinsic::aarch64_neon_uqshl |
12202 | : Intrinsic::aarch64_neon_sqshl; |
12203 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12204 | Ops[1] = Builder.CreateZExt(V: Ops[1], DestTy: Int64Ty); |
12205 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Int64Ty), Ops, name: "vqshl_n" ); |
12206 | } |
12207 | case NEON::BI__builtin_neon_vrshrd_n_u64: |
12208 | case NEON::BI__builtin_neon_vrshrd_n_s64: { |
12209 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 |
12210 | ? Intrinsic::aarch64_neon_urshl |
12211 | : Intrinsic::aarch64_neon_srshl; |
12212 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12213 | int SV = cast<ConstantInt>(Val: Ops[1])->getSExtValue(); |
12214 | Ops[1] = ConstantInt::get(Ty: Int64Ty, V: -SV); |
12215 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Int64Ty), Ops, name: "vrshr_n" ); |
12216 | } |
12217 | case NEON::BI__builtin_neon_vrsrad_n_u64: |
12218 | case NEON::BI__builtin_neon_vrsrad_n_s64: { |
12219 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 |
12220 | ? Intrinsic::aarch64_neon_urshl |
12221 | : Intrinsic::aarch64_neon_srshl; |
12222 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Int64Ty); |
12223 | Ops.push_back(Elt: Builder.CreateNeg(V: EmitScalarExpr(E: E->getArg(Arg: 2)))); |
12224 | Ops[1] = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Int, Tys: Int64Ty), |
12225 | Args: {Ops[1], Builder.CreateSExt(V: Ops[2], DestTy: Int64Ty)}); |
12226 | return Builder.CreateAdd(LHS: Ops[0], RHS: Builder.CreateBitCast(V: Ops[1], DestTy: Int64Ty)); |
12227 | } |
12228 | case NEON::BI__builtin_neon_vshld_n_s64: |
12229 | case NEON::BI__builtin_neon_vshld_n_u64: { |
12230 | llvm::ConstantInt *Amt = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12231 | return Builder.CreateShl( |
12232 | LHS: Ops[0], RHS: ConstantInt::get(Ty: Int64Ty, V: Amt->getZExtValue()), Name: "shld_n" ); |
12233 | } |
12234 | case NEON::BI__builtin_neon_vshrd_n_s64: { |
12235 | llvm::ConstantInt *Amt = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12236 | return Builder.CreateAShr( |
12237 | LHS: Ops[0], RHS: ConstantInt::get(Ty: Int64Ty, V: std::min(a: static_cast<uint64_t>(63), |
12238 | b: Amt->getZExtValue())), |
12239 | Name: "shrd_n" ); |
12240 | } |
12241 | case NEON::BI__builtin_neon_vshrd_n_u64: { |
12242 | llvm::ConstantInt *Amt = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12243 | uint64_t ShiftAmt = Amt->getZExtValue(); |
12244 | // Right-shifting an unsigned value by its size yields 0. |
12245 | if (ShiftAmt == 64) |
12246 | return ConstantInt::get(Ty: Int64Ty, V: 0); |
12247 | return Builder.CreateLShr(LHS: Ops[0], RHS: ConstantInt::get(Ty: Int64Ty, V: ShiftAmt), |
12248 | Name: "shrd_n" ); |
12249 | } |
12250 | case NEON::BI__builtin_neon_vsrad_n_s64: { |
12251 | llvm::ConstantInt *Amt = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12252 | Ops[1] = Builder.CreateAShr( |
12253 | LHS: Ops[1], RHS: ConstantInt::get(Ty: Int64Ty, V: std::min(a: static_cast<uint64_t>(63), |
12254 | b: Amt->getZExtValue())), |
12255 | Name: "shrd_n" ); |
12256 | return Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1]); |
12257 | } |
12258 | case NEON::BI__builtin_neon_vsrad_n_u64: { |
12259 | llvm::ConstantInt *Amt = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12260 | uint64_t ShiftAmt = Amt->getZExtValue(); |
12261 | // Right-shifting an unsigned value by its size yields 0. |
12262 | // As Op + 0 = Op, return Ops[0] directly. |
12263 | if (ShiftAmt == 64) |
12264 | return Ops[0]; |
12265 | Ops[1] = Builder.CreateLShr(LHS: Ops[1], RHS: ConstantInt::get(Ty: Int64Ty, V: ShiftAmt), |
12266 | Name: "shrd_n" ); |
12267 | return Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1]); |
12268 | } |
12269 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: |
12270 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: |
12271 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: |
12272 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { |
12273 | Ops[2] = Builder.CreateExtractElement(Vec: Ops[2], Idx: EmitScalarExpr(E: E->getArg(Arg: 3)), |
12274 | Name: "lane" ); |
12275 | SmallVector<Value *, 2> ProductOps; |
12276 | ProductOps.push_back(Elt: vectorWrapScalar16(Op: Ops[1])); |
12277 | ProductOps.push_back(Elt: vectorWrapScalar16(Op: Ops[2])); |
12278 | auto *VTy = llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4); |
12279 | Ops[1] = EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_sqdmull, Tys: VTy), |
12280 | Ops&: ProductOps, name: "vqdmlXl" ); |
12281 | Constant *CI = ConstantInt::get(Ty: SizeTy, V: 0); |
12282 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: CI, Name: "lane0" ); |
12283 | Ops.pop_back(); |
12284 | |
12285 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || |
12286 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) |
12287 | ? Intrinsic::aarch64_neon_sqadd |
12288 | : Intrinsic::aarch64_neon_sqsub; |
12289 | return EmitNeonCall(F: CGM.getIntrinsic(IID: AccInt, Tys: Int32Ty), Ops, name: "vqdmlXl" ); |
12290 | } |
12291 | case NEON::BI__builtin_neon_vqdmlals_s32: |
12292 | case NEON::BI__builtin_neon_vqdmlsls_s32: { |
12293 | SmallVector<Value *, 2> ProductOps; |
12294 | ProductOps.push_back(Elt: Ops[1]); |
12295 | ProductOps.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12296 | Ops[1] = |
12297 | EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_sqdmulls_scalar), |
12298 | Ops&: ProductOps, name: "vqdmlXl" ); |
12299 | |
12300 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 |
12301 | ? Intrinsic::aarch64_neon_sqadd |
12302 | : Intrinsic::aarch64_neon_sqsub; |
12303 | return EmitNeonCall(F: CGM.getIntrinsic(IID: AccumInt, Tys: Int64Ty), Ops, name: "vqdmlXl" ); |
12304 | } |
12305 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: |
12306 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: |
12307 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: |
12308 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { |
12309 | Ops[2] = Builder.CreateExtractElement(Vec: Ops[2], Idx: EmitScalarExpr(E: E->getArg(Arg: 3)), |
12310 | Name: "lane" ); |
12311 | SmallVector<Value *, 2> ProductOps; |
12312 | ProductOps.push_back(Elt: Ops[1]); |
12313 | ProductOps.push_back(Elt: Ops[2]); |
12314 | Ops[1] = |
12315 | EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_sqdmulls_scalar), |
12316 | Ops&: ProductOps, name: "vqdmlXl" ); |
12317 | Ops.pop_back(); |
12318 | |
12319 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || |
12320 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) |
12321 | ? Intrinsic::aarch64_neon_sqadd |
12322 | : Intrinsic::aarch64_neon_sqsub; |
12323 | return EmitNeonCall(F: CGM.getIntrinsic(IID: AccInt, Tys: Int64Ty), Ops, name: "vqdmlXl" ); |
12324 | } |
12325 | case NEON::BI__builtin_neon_vget_lane_bf16: |
12326 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
12327 | case NEON::BI__builtin_neon_vduph_lane_f16: { |
12328 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12329 | Name: "vget_lane" ); |
12330 | } |
12331 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
12332 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
12333 | case NEON::BI__builtin_neon_vduph_laneq_f16: { |
12334 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: EmitScalarExpr(E: E->getArg(Arg: 1)), |
12335 | Name: "vgetq_lane" ); |
12336 | } |
12337 | |
12338 | case clang::AArch64::BI_InterlockedAdd: |
12339 | case clang::AArch64::BI_InterlockedAdd64: { |
12340 | Address DestAddr = CheckAtomicAlignment(CGF&: *this, E); |
12341 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
12342 | AtomicRMWInst *RMWI = |
12343 | Builder.CreateAtomicRMW(Op: AtomicRMWInst::Add, Addr: DestAddr, Val, |
12344 | Ordering: llvm::AtomicOrdering::SequentiallyConsistent); |
12345 | return Builder.CreateAdd(LHS: RMWI, RHS: Val); |
12346 | } |
12347 | } |
12348 | |
12349 | llvm::FixedVectorType *VTy = GetNeonType(CGF: this, TypeFlags: Type); |
12350 | llvm::Type *Ty = VTy; |
12351 | if (!Ty) |
12352 | return nullptr; |
12353 | |
12354 | // Not all intrinsics handled by the common case work for AArch64 yet, so only |
12355 | // defer to common code if it's been added to our special map. |
12356 | Builtin = findARMVectorIntrinsicInMap(IntrinsicMap: AArch64SIMDIntrinsicMap, BuiltinID, |
12357 | MapProvenSorted&: AArch64SIMDIntrinsicsProvenSorted); |
12358 | |
12359 | if (Builtin) |
12360 | return EmitCommonNeonBuiltinExpr( |
12361 | BuiltinID: Builtin->BuiltinID, LLVMIntrinsic: Builtin->LLVMIntrinsic, AltLLVMIntrinsic: Builtin->AltLLVMIntrinsic, |
12362 | NameHint: Builtin->NameHint, Modifier: Builtin->TypeModifier, E, Ops, |
12363 | /*never use addresses*/ PtrOp0: Address::invalid(), PtrOp1: Address::invalid(), Arch); |
12364 | |
12365 | if (Value *V = EmitAArch64TblBuiltinExpr(CGF&: *this, BuiltinID, E, Ops, Arch)) |
12366 | return V; |
12367 | |
12368 | unsigned Int; |
12369 | switch (BuiltinID) { |
12370 | default: return nullptr; |
12371 | case NEON::BI__builtin_neon_vbsl_v: |
12372 | case NEON::BI__builtin_neon_vbslq_v: { |
12373 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); |
12374 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: BitTy, Name: "vbsl" ); |
12375 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: BitTy, Name: "vbsl" ); |
12376 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: BitTy, Name: "vbsl" ); |
12377 | |
12378 | Ops[1] = Builder.CreateAnd(LHS: Ops[0], RHS: Ops[1], Name: "vbsl" ); |
12379 | Ops[2] = Builder.CreateAnd(LHS: Builder.CreateNot(V: Ops[0]), RHS: Ops[2], Name: "vbsl" ); |
12380 | Ops[0] = Builder.CreateOr(LHS: Ops[1], RHS: Ops[2], Name: "vbsl" ); |
12381 | return Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
12382 | } |
12383 | case NEON::BI__builtin_neon_vfma_lane_v: |
12384 | case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types |
12385 | // The ARM builtins (and instructions) have the addend as the first |
12386 | // operand, but the 'fma' intrinsics have it last. Swap it around here. |
12387 | Value *Addend = Ops[0]; |
12388 | Value *Multiplicand = Ops[1]; |
12389 | Value *LaneSource = Ops[2]; |
12390 | Ops[0] = Multiplicand; |
12391 | Ops[1] = LaneSource; |
12392 | Ops[2] = Addend; |
12393 | |
12394 | // Now adjust things to handle the lane access. |
12395 | auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v |
12396 | ? llvm::FixedVectorType::get(ElementType: VTy->getElementType(), |
12397 | NumElts: VTy->getNumElements() / 2) |
12398 | : VTy; |
12399 | llvm::Constant *cst = cast<Constant>(Val: Ops[3]); |
12400 | Value *SV = llvm::ConstantVector::getSplat(EC: VTy->getElementCount(), Elt: cst); |
12401 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: SourceTy); |
12402 | Ops[1] = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[1], Mask: SV, Name: "lane" ); |
12403 | |
12404 | Ops.pop_back(); |
12405 | Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma |
12406 | : Intrinsic::fma; |
12407 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "fmla" ); |
12408 | } |
12409 | case NEON::BI__builtin_neon_vfma_laneq_v: { |
12410 | auto *VTy = cast<llvm::FixedVectorType>(Val: Ty); |
12411 | // v1f64 fma should be mapped to Neon scalar f64 fma |
12412 | if (VTy && VTy->getElementType() == DoubleTy) { |
12413 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: DoubleTy); |
12414 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: DoubleTy); |
12415 | llvm::FixedVectorType *VTy = |
12416 | GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(NeonTypeFlags::Float64, false, true)); |
12417 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: VTy); |
12418 | Ops[2] = Builder.CreateExtractElement(Vec: Ops[2], Idx: Ops[3], Name: "extract" ); |
12419 | Value *Result; |
12420 | Result = emitCallMaybeConstrainedFPBuiltin( |
12421 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, |
12422 | Ty: DoubleTy, Args: {Ops[1], Ops[2], Ops[0]}); |
12423 | return Builder.CreateBitCast(V: Result, DestTy: Ty); |
12424 | } |
12425 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
12426 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
12427 | |
12428 | auto *STy = llvm::FixedVectorType::get(ElementType: VTy->getElementType(), |
12429 | NumElts: VTy->getNumElements() * 2); |
12430 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: STy); |
12431 | Value *SV = llvm::ConstantVector::getSplat(EC: VTy->getElementCount(), |
12432 | Elt: cast<ConstantInt>(Val: Ops[3])); |
12433 | Ops[2] = Builder.CreateShuffleVector(V1: Ops[2], V2: Ops[2], Mask: SV, Name: "lane" ); |
12434 | |
12435 | return emitCallMaybeConstrainedFPBuiltin( |
12436 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty, |
12437 | Args: {Ops[2], Ops[1], Ops[0]}); |
12438 | } |
12439 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { |
12440 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
12441 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
12442 | |
12443 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
12444 | Ops[2] = EmitNeonSplat(V: Ops[2], C: cast<ConstantInt>(Val: Ops[3])); |
12445 | return emitCallMaybeConstrainedFPBuiltin( |
12446 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty, |
12447 | Args: {Ops[2], Ops[1], Ops[0]}); |
12448 | } |
12449 | case NEON::BI__builtin_neon_vfmah_lane_f16: |
12450 | case NEON::BI__builtin_neon_vfmas_lane_f32: |
12451 | case NEON::BI__builtin_neon_vfmah_laneq_f16: |
12452 | case NEON::BI__builtin_neon_vfmas_laneq_f32: |
12453 | case NEON::BI__builtin_neon_vfmad_lane_f64: |
12454 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { |
12455 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 3))); |
12456 | llvm::Type *Ty = ConvertType(T: E->getCallReturnType(Ctx: getContext())); |
12457 | Ops[2] = Builder.CreateExtractElement(Vec: Ops[2], Idx: Ops[3], Name: "extract" ); |
12458 | return emitCallMaybeConstrainedFPBuiltin( |
12459 | CGF&: *this, IntrinsicID: Intrinsic::fma, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_fma, Ty, |
12460 | Args: {Ops[1], Ops[2], Ops[0]}); |
12461 | } |
12462 | case NEON::BI__builtin_neon_vmull_v: |
12463 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12464 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; |
12465 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; |
12466 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmull" ); |
12467 | case NEON::BI__builtin_neon_vmax_v: |
12468 | case NEON::BI__builtin_neon_vmaxq_v: |
12469 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12470 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; |
12471 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; |
12472 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmax" ); |
12473 | case NEON::BI__builtin_neon_vmaxh_f16: { |
12474 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12475 | Int = Intrinsic::aarch64_neon_fmax; |
12476 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vmax" ); |
12477 | } |
12478 | case NEON::BI__builtin_neon_vmin_v: |
12479 | case NEON::BI__builtin_neon_vminq_v: |
12480 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12481 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; |
12482 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; |
12483 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmin" ); |
12484 | case NEON::BI__builtin_neon_vminh_f16: { |
12485 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12486 | Int = Intrinsic::aarch64_neon_fmin; |
12487 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vmin" ); |
12488 | } |
12489 | case NEON::BI__builtin_neon_vabd_v: |
12490 | case NEON::BI__builtin_neon_vabdq_v: |
12491 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12492 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; |
12493 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; |
12494 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vabd" ); |
12495 | case NEON::BI__builtin_neon_vpadal_v: |
12496 | case NEON::BI__builtin_neon_vpadalq_v: { |
12497 | unsigned ArgElts = VTy->getNumElements(); |
12498 | llvm::IntegerType *EltTy = cast<IntegerType>(Val: VTy->getElementType()); |
12499 | unsigned BitWidth = EltTy->getBitWidth(); |
12500 | auto *ArgTy = llvm::FixedVectorType::get( |
12501 | ElementType: llvm::IntegerType::get(C&: getLLVMContext(), NumBits: BitWidth / 2), NumElts: 2 * ArgElts); |
12502 | llvm::Type* Tys[2] = { VTy, ArgTy }; |
12503 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; |
12504 | SmallVector<llvm::Value*, 1> TmpOps; |
12505 | TmpOps.push_back(Elt: Ops[1]); |
12506 | Function *F = CGM.getIntrinsic(IID: Int, Tys); |
12507 | llvm::Value *tmp = EmitNeonCall(F, Ops&: TmpOps, name: "vpadal" ); |
12508 | llvm::Value *addend = Builder.CreateBitCast(V: Ops[0], DestTy: tmp->getType()); |
12509 | return Builder.CreateAdd(LHS: tmp, RHS: addend); |
12510 | } |
12511 | case NEON::BI__builtin_neon_vpmin_v: |
12512 | case NEON::BI__builtin_neon_vpminq_v: |
12513 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12514 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; |
12515 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; |
12516 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vpmin" ); |
12517 | case NEON::BI__builtin_neon_vpmax_v: |
12518 | case NEON::BI__builtin_neon_vpmaxq_v: |
12519 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
12520 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; |
12521 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; |
12522 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vpmax" ); |
12523 | case NEON::BI__builtin_neon_vminnm_v: |
12524 | case NEON::BI__builtin_neon_vminnmq_v: |
12525 | Int = Intrinsic::aarch64_neon_fminnm; |
12526 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vminnm" ); |
12527 | case NEON::BI__builtin_neon_vminnmh_f16: |
12528 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12529 | Int = Intrinsic::aarch64_neon_fminnm; |
12530 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vminnm" ); |
12531 | case NEON::BI__builtin_neon_vmaxnm_v: |
12532 | case NEON::BI__builtin_neon_vmaxnmq_v: |
12533 | Int = Intrinsic::aarch64_neon_fmaxnm; |
12534 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmaxnm" ); |
12535 | case NEON::BI__builtin_neon_vmaxnmh_f16: |
12536 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12537 | Int = Intrinsic::aarch64_neon_fmaxnm; |
12538 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vmaxnm" ); |
12539 | case NEON::BI__builtin_neon_vrecpss_f32: { |
12540 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12541 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_frecps, Tys: FloatTy), |
12542 | Ops, name: "vrecps" ); |
12543 | } |
12544 | case NEON::BI__builtin_neon_vrecpsd_f64: |
12545 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12546 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_frecps, Tys: DoubleTy), |
12547 | Ops, name: "vrecps" ); |
12548 | case NEON::BI__builtin_neon_vrecpsh_f16: |
12549 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
12550 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_frecps, Tys: HalfTy), |
12551 | Ops, name: "vrecps" ); |
12552 | case NEON::BI__builtin_neon_vqshrun_n_v: |
12553 | Int = Intrinsic::aarch64_neon_sqshrun; |
12554 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqshrun_n" ); |
12555 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
12556 | Int = Intrinsic::aarch64_neon_sqrshrun; |
12557 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqrshrun_n" ); |
12558 | case NEON::BI__builtin_neon_vqshrn_n_v: |
12559 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; |
12560 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqshrn_n" ); |
12561 | case NEON::BI__builtin_neon_vrshrn_n_v: |
12562 | Int = Intrinsic::aarch64_neon_rshrn; |
12563 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrshrn_n" ); |
12564 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
12565 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; |
12566 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vqrshrn_n" ); |
12567 | case NEON::BI__builtin_neon_vrndah_f16: { |
12568 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12569 | Int = Builder.getIsFPConstrained() |
12570 | ? Intrinsic::experimental_constrained_round |
12571 | : Intrinsic::round; |
12572 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrnda" ); |
12573 | } |
12574 | case NEON::BI__builtin_neon_vrnda_v: |
12575 | case NEON::BI__builtin_neon_vrndaq_v: { |
12576 | Int = Builder.getIsFPConstrained() |
12577 | ? Intrinsic::experimental_constrained_round |
12578 | : Intrinsic::round; |
12579 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrnda" ); |
12580 | } |
12581 | case NEON::BI__builtin_neon_vrndih_f16: { |
12582 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12583 | Int = Builder.getIsFPConstrained() |
12584 | ? Intrinsic::experimental_constrained_nearbyint |
12585 | : Intrinsic::nearbyint; |
12586 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndi" ); |
12587 | } |
12588 | case NEON::BI__builtin_neon_vrndmh_f16: { |
12589 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12590 | Int = Builder.getIsFPConstrained() |
12591 | ? Intrinsic::experimental_constrained_floor |
12592 | : Intrinsic::floor; |
12593 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndm" ); |
12594 | } |
12595 | case NEON::BI__builtin_neon_vrndm_v: |
12596 | case NEON::BI__builtin_neon_vrndmq_v: { |
12597 | Int = Builder.getIsFPConstrained() |
12598 | ? Intrinsic::experimental_constrained_floor |
12599 | : Intrinsic::floor; |
12600 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrndm" ); |
12601 | } |
12602 | case NEON::BI__builtin_neon_vrndnh_f16: { |
12603 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12604 | Int = Builder.getIsFPConstrained() |
12605 | ? Intrinsic::experimental_constrained_roundeven |
12606 | : Intrinsic::roundeven; |
12607 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndn" ); |
12608 | } |
12609 | case NEON::BI__builtin_neon_vrndn_v: |
12610 | case NEON::BI__builtin_neon_vrndnq_v: { |
12611 | Int = Builder.getIsFPConstrained() |
12612 | ? Intrinsic::experimental_constrained_roundeven |
12613 | : Intrinsic::roundeven; |
12614 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrndn" ); |
12615 | } |
12616 | case NEON::BI__builtin_neon_vrndns_f32: { |
12617 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12618 | Int = Builder.getIsFPConstrained() |
12619 | ? Intrinsic::experimental_constrained_roundeven |
12620 | : Intrinsic::roundeven; |
12621 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: FloatTy), Ops, name: "vrndn" ); |
12622 | } |
12623 | case NEON::BI__builtin_neon_vrndph_f16: { |
12624 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12625 | Int = Builder.getIsFPConstrained() |
12626 | ? Intrinsic::experimental_constrained_ceil |
12627 | : Intrinsic::ceil; |
12628 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndp" ); |
12629 | } |
12630 | case NEON::BI__builtin_neon_vrndp_v: |
12631 | case NEON::BI__builtin_neon_vrndpq_v: { |
12632 | Int = Builder.getIsFPConstrained() |
12633 | ? Intrinsic::experimental_constrained_ceil |
12634 | : Intrinsic::ceil; |
12635 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrndp" ); |
12636 | } |
12637 | case NEON::BI__builtin_neon_vrndxh_f16: { |
12638 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12639 | Int = Builder.getIsFPConstrained() |
12640 | ? Intrinsic::experimental_constrained_rint |
12641 | : Intrinsic::rint; |
12642 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndx" ); |
12643 | } |
12644 | case NEON::BI__builtin_neon_vrndx_v: |
12645 | case NEON::BI__builtin_neon_vrndxq_v: { |
12646 | Int = Builder.getIsFPConstrained() |
12647 | ? Intrinsic::experimental_constrained_rint |
12648 | : Intrinsic::rint; |
12649 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrndx" ); |
12650 | } |
12651 | case NEON::BI__builtin_neon_vrndh_f16: { |
12652 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12653 | Int = Builder.getIsFPConstrained() |
12654 | ? Intrinsic::experimental_constrained_trunc |
12655 | : Intrinsic::trunc; |
12656 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vrndz" ); |
12657 | } |
12658 | case NEON::BI__builtin_neon_vrnd32x_f32: |
12659 | case NEON::BI__builtin_neon_vrnd32xq_f32: |
12660 | case NEON::BI__builtin_neon_vrnd32x_f64: |
12661 | case NEON::BI__builtin_neon_vrnd32xq_f64: { |
12662 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12663 | Int = Intrinsic::aarch64_neon_frint32x; |
12664 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrnd32x" ); |
12665 | } |
12666 | case NEON::BI__builtin_neon_vrnd32z_f32: |
12667 | case NEON::BI__builtin_neon_vrnd32zq_f32: |
12668 | case NEON::BI__builtin_neon_vrnd32z_f64: |
12669 | case NEON::BI__builtin_neon_vrnd32zq_f64: { |
12670 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12671 | Int = Intrinsic::aarch64_neon_frint32z; |
12672 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrnd32z" ); |
12673 | } |
12674 | case NEON::BI__builtin_neon_vrnd64x_f32: |
12675 | case NEON::BI__builtin_neon_vrnd64xq_f32: |
12676 | case NEON::BI__builtin_neon_vrnd64x_f64: |
12677 | case NEON::BI__builtin_neon_vrnd64xq_f64: { |
12678 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12679 | Int = Intrinsic::aarch64_neon_frint64x; |
12680 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrnd64x" ); |
12681 | } |
12682 | case NEON::BI__builtin_neon_vrnd64z_f32: |
12683 | case NEON::BI__builtin_neon_vrnd64zq_f32: |
12684 | case NEON::BI__builtin_neon_vrnd64z_f64: |
12685 | case NEON::BI__builtin_neon_vrnd64zq_f64: { |
12686 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12687 | Int = Intrinsic::aarch64_neon_frint64z; |
12688 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrnd64z" ); |
12689 | } |
12690 | case NEON::BI__builtin_neon_vrnd_v: |
12691 | case NEON::BI__builtin_neon_vrndq_v: { |
12692 | Int = Builder.getIsFPConstrained() |
12693 | ? Intrinsic::experimental_constrained_trunc |
12694 | : Intrinsic::trunc; |
12695 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrndz" ); |
12696 | } |
12697 | case NEON::BI__builtin_neon_vcvt_f64_v: |
12698 | case NEON::BI__builtin_neon_vcvtq_f64_v: |
12699 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
12700 | Ty = GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); |
12701 | return usgn ? Builder.CreateUIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ) |
12702 | : Builder.CreateSIToFP(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
12703 | case NEON::BI__builtin_neon_vcvt_f64_f32: { |
12704 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad && |
12705 | "unexpected vcvt_f64_f32 builtin" ); |
12706 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); |
12707 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: GetNeonType(CGF: this, TypeFlags: SrcFlag)); |
12708 | |
12709 | return Builder.CreateFPExt(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
12710 | } |
12711 | case NEON::BI__builtin_neon_vcvt_f32_f64: { |
12712 | assert(Type.getEltType() == NeonTypeFlags::Float32 && |
12713 | "unexpected vcvt_f32_f64 builtin" ); |
12714 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); |
12715 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: GetNeonType(CGF: this, TypeFlags: SrcFlag)); |
12716 | |
12717 | return Builder.CreateFPTrunc(V: Ops[0], DestTy: Ty, Name: "vcvt" ); |
12718 | } |
12719 | case NEON::BI__builtin_neon_vcvt_s32_v: |
12720 | case NEON::BI__builtin_neon_vcvt_u32_v: |
12721 | case NEON::BI__builtin_neon_vcvt_s64_v: |
12722 | case NEON::BI__builtin_neon_vcvt_u64_v: |
12723 | case NEON::BI__builtin_neon_vcvt_s16_f16: |
12724 | case NEON::BI__builtin_neon_vcvt_u16_f16: |
12725 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
12726 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
12727 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
12728 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
12729 | case NEON::BI__builtin_neon_vcvtq_s16_f16: |
12730 | case NEON::BI__builtin_neon_vcvtq_u16_f16: { |
12731 | Int = |
12732 | usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs; |
12733 | llvm::Type *Tys[2] = {Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type)}; |
12734 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vcvtz" ); |
12735 | } |
12736 | case NEON::BI__builtin_neon_vcvta_s16_f16: |
12737 | case NEON::BI__builtin_neon_vcvta_u16_f16: |
12738 | case NEON::BI__builtin_neon_vcvta_s32_v: |
12739 | case NEON::BI__builtin_neon_vcvtaq_s16_f16: |
12740 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
12741 | case NEON::BI__builtin_neon_vcvta_u32_v: |
12742 | case NEON::BI__builtin_neon_vcvtaq_u16_f16: |
12743 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
12744 | case NEON::BI__builtin_neon_vcvta_s64_v: |
12745 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
12746 | case NEON::BI__builtin_neon_vcvta_u64_v: |
12747 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { |
12748 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; |
12749 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
12750 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vcvta" ); |
12751 | } |
12752 | case NEON::BI__builtin_neon_vcvtm_s16_f16: |
12753 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
12754 | case NEON::BI__builtin_neon_vcvtmq_s16_f16: |
12755 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
12756 | case NEON::BI__builtin_neon_vcvtm_u16_f16: |
12757 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
12758 | case NEON::BI__builtin_neon_vcvtmq_u16_f16: |
12759 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
12760 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
12761 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
12762 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
12763 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
12764 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; |
12765 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
12766 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vcvtm" ); |
12767 | } |
12768 | case NEON::BI__builtin_neon_vcvtn_s16_f16: |
12769 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
12770 | case NEON::BI__builtin_neon_vcvtnq_s16_f16: |
12771 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
12772 | case NEON::BI__builtin_neon_vcvtn_u16_f16: |
12773 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
12774 | case NEON::BI__builtin_neon_vcvtnq_u16_f16: |
12775 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
12776 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
12777 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
12778 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
12779 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { |
12780 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; |
12781 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
12782 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vcvtn" ); |
12783 | } |
12784 | case NEON::BI__builtin_neon_vcvtp_s16_f16: |
12785 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
12786 | case NEON::BI__builtin_neon_vcvtpq_s16_f16: |
12787 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
12788 | case NEON::BI__builtin_neon_vcvtp_u16_f16: |
12789 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
12790 | case NEON::BI__builtin_neon_vcvtpq_u16_f16: |
12791 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
12792 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
12793 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
12794 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
12795 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { |
12796 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; |
12797 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(CGF: this, IntTypeFlags: Type) }; |
12798 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vcvtp" ); |
12799 | } |
12800 | case NEON::BI__builtin_neon_vmulx_v: |
12801 | case NEON::BI__builtin_neon_vmulxq_v: { |
12802 | Int = Intrinsic::aarch64_neon_fmulx; |
12803 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vmulx" ); |
12804 | } |
12805 | case NEON::BI__builtin_neon_vmulxh_lane_f16: |
12806 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { |
12807 | // vmulx_lane should be mapped to Neon scalar mulx after |
12808 | // extracting the scalar element |
12809 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
12810 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: Ops[2], Name: "extract" ); |
12811 | Ops.pop_back(); |
12812 | Int = Intrinsic::aarch64_neon_fmulx; |
12813 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vmulx" ); |
12814 | } |
12815 | case NEON::BI__builtin_neon_vmul_lane_v: |
12816 | case NEON::BI__builtin_neon_vmul_laneq_v: { |
12817 | // v1f64 vmul_lane should be mapped to Neon scalar mul lane |
12818 | bool Quad = false; |
12819 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) |
12820 | Quad = true; |
12821 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: DoubleTy); |
12822 | llvm::FixedVectorType *VTy = |
12823 | GetNeonType(CGF: this, TypeFlags: NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); |
12824 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: VTy); |
12825 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: Ops[2], Name: "extract" ); |
12826 | Value *Result = Builder.CreateFMul(L: Ops[0], R: Ops[1]); |
12827 | return Builder.CreateBitCast(V: Result, DestTy: Ty); |
12828 | } |
12829 | case NEON::BI__builtin_neon_vnegd_s64: |
12830 | return Builder.CreateNeg(V: EmitScalarExpr(E: E->getArg(Arg: 0)), Name: "vnegd" ); |
12831 | case NEON::BI__builtin_neon_vnegh_f16: |
12832 | return Builder.CreateFNeg(V: EmitScalarExpr(E: E->getArg(Arg: 0)), Name: "vnegh" ); |
12833 | case NEON::BI__builtin_neon_vpmaxnm_v: |
12834 | case NEON::BI__builtin_neon_vpmaxnmq_v: { |
12835 | Int = Intrinsic::aarch64_neon_fmaxnmp; |
12836 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vpmaxnm" ); |
12837 | } |
12838 | case NEON::BI__builtin_neon_vpminnm_v: |
12839 | case NEON::BI__builtin_neon_vpminnmq_v: { |
12840 | Int = Intrinsic::aarch64_neon_fminnmp; |
12841 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vpminnm" ); |
12842 | } |
12843 | case NEON::BI__builtin_neon_vsqrth_f16: { |
12844 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12845 | Int = Builder.getIsFPConstrained() |
12846 | ? Intrinsic::experimental_constrained_sqrt |
12847 | : Intrinsic::sqrt; |
12848 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: HalfTy), Ops, name: "vsqrt" ); |
12849 | } |
12850 | case NEON::BI__builtin_neon_vsqrt_v: |
12851 | case NEON::BI__builtin_neon_vsqrtq_v: { |
12852 | Int = Builder.getIsFPConstrained() |
12853 | ? Intrinsic::experimental_constrained_sqrt |
12854 | : Intrinsic::sqrt; |
12855 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
12856 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vsqrt" ); |
12857 | } |
12858 | case NEON::BI__builtin_neon_vrbit_v: |
12859 | case NEON::BI__builtin_neon_vrbitq_v: { |
12860 | Int = Intrinsic::bitreverse; |
12861 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vrbit" ); |
12862 | } |
12863 | case NEON::BI__builtin_neon_vaddv_u8: |
12864 | // FIXME: These are handled by the AArch64 scalar code. |
12865 | usgn = true; |
12866 | [[fallthrough]]; |
12867 | case NEON::BI__builtin_neon_vaddv_s8: { |
12868 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
12869 | Ty = Int32Ty; |
12870 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
12871 | llvm::Type *Tys[2] = { Ty, VTy }; |
12872 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12873 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddv" ); |
12874 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12875 | } |
12876 | case NEON::BI__builtin_neon_vaddv_u16: |
12877 | usgn = true; |
12878 | [[fallthrough]]; |
12879 | case NEON::BI__builtin_neon_vaddv_s16: { |
12880 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
12881 | Ty = Int32Ty; |
12882 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
12883 | llvm::Type *Tys[2] = { Ty, VTy }; |
12884 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12885 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddv" ); |
12886 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12887 | } |
12888 | case NEON::BI__builtin_neon_vaddvq_u8: |
12889 | usgn = true; |
12890 | [[fallthrough]]; |
12891 | case NEON::BI__builtin_neon_vaddvq_s8: { |
12892 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
12893 | Ty = Int32Ty; |
12894 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
12895 | llvm::Type *Tys[2] = { Ty, VTy }; |
12896 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12897 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddv" ); |
12898 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12899 | } |
12900 | case NEON::BI__builtin_neon_vaddvq_u16: |
12901 | usgn = true; |
12902 | [[fallthrough]]; |
12903 | case NEON::BI__builtin_neon_vaddvq_s16: { |
12904 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
12905 | Ty = Int32Ty; |
12906 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
12907 | llvm::Type *Tys[2] = { Ty, VTy }; |
12908 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12909 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddv" ); |
12910 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12911 | } |
12912 | case NEON::BI__builtin_neon_vmaxv_u8: { |
12913 | Int = Intrinsic::aarch64_neon_umaxv; |
12914 | Ty = Int32Ty; |
12915 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
12916 | llvm::Type *Tys[2] = { Ty, VTy }; |
12917 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12918 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12919 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12920 | } |
12921 | case NEON::BI__builtin_neon_vmaxv_u16: { |
12922 | Int = Intrinsic::aarch64_neon_umaxv; |
12923 | Ty = Int32Ty; |
12924 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
12925 | llvm::Type *Tys[2] = { Ty, VTy }; |
12926 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12927 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12928 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12929 | } |
12930 | case NEON::BI__builtin_neon_vmaxvq_u8: { |
12931 | Int = Intrinsic::aarch64_neon_umaxv; |
12932 | Ty = Int32Ty; |
12933 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
12934 | llvm::Type *Tys[2] = { Ty, VTy }; |
12935 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12936 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12937 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12938 | } |
12939 | case NEON::BI__builtin_neon_vmaxvq_u16: { |
12940 | Int = Intrinsic::aarch64_neon_umaxv; |
12941 | Ty = Int32Ty; |
12942 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
12943 | llvm::Type *Tys[2] = { Ty, VTy }; |
12944 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12945 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12946 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12947 | } |
12948 | case NEON::BI__builtin_neon_vmaxv_s8: { |
12949 | Int = Intrinsic::aarch64_neon_smaxv; |
12950 | Ty = Int32Ty; |
12951 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
12952 | llvm::Type *Tys[2] = { Ty, VTy }; |
12953 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12954 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12955 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12956 | } |
12957 | case NEON::BI__builtin_neon_vmaxv_s16: { |
12958 | Int = Intrinsic::aarch64_neon_smaxv; |
12959 | Ty = Int32Ty; |
12960 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
12961 | llvm::Type *Tys[2] = { Ty, VTy }; |
12962 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12963 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12964 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12965 | } |
12966 | case NEON::BI__builtin_neon_vmaxvq_s8: { |
12967 | Int = Intrinsic::aarch64_neon_smaxv; |
12968 | Ty = Int32Ty; |
12969 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
12970 | llvm::Type *Tys[2] = { Ty, VTy }; |
12971 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12972 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12973 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
12974 | } |
12975 | case NEON::BI__builtin_neon_vmaxvq_s16: { |
12976 | Int = Intrinsic::aarch64_neon_smaxv; |
12977 | Ty = Int32Ty; |
12978 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
12979 | llvm::Type *Tys[2] = { Ty, VTy }; |
12980 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12981 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12982 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
12983 | } |
12984 | case NEON::BI__builtin_neon_vmaxv_f16: { |
12985 | Int = Intrinsic::aarch64_neon_fmaxv; |
12986 | Ty = HalfTy; |
12987 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 4); |
12988 | llvm::Type *Tys[2] = { Ty, VTy }; |
12989 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12990 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
12991 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
12992 | } |
12993 | case NEON::BI__builtin_neon_vmaxvq_f16: { |
12994 | Int = Intrinsic::aarch64_neon_fmaxv; |
12995 | Ty = HalfTy; |
12996 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 8); |
12997 | llvm::Type *Tys[2] = { Ty, VTy }; |
12998 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
12999 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxv" ); |
13000 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13001 | } |
13002 | case NEON::BI__builtin_neon_vminv_u8: { |
13003 | Int = Intrinsic::aarch64_neon_uminv; |
13004 | Ty = Int32Ty; |
13005 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
13006 | llvm::Type *Tys[2] = { Ty, VTy }; |
13007 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13008 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13009 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
13010 | } |
13011 | case NEON::BI__builtin_neon_vminv_u16: { |
13012 | Int = Intrinsic::aarch64_neon_uminv; |
13013 | Ty = Int32Ty; |
13014 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
13015 | llvm::Type *Tys[2] = { Ty, VTy }; |
13016 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13017 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13018 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13019 | } |
13020 | case NEON::BI__builtin_neon_vminvq_u8: { |
13021 | Int = Intrinsic::aarch64_neon_uminv; |
13022 | Ty = Int32Ty; |
13023 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
13024 | llvm::Type *Tys[2] = { Ty, VTy }; |
13025 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13026 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13027 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
13028 | } |
13029 | case NEON::BI__builtin_neon_vminvq_u16: { |
13030 | Int = Intrinsic::aarch64_neon_uminv; |
13031 | Ty = Int32Ty; |
13032 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
13033 | llvm::Type *Tys[2] = { Ty, VTy }; |
13034 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13035 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13036 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13037 | } |
13038 | case NEON::BI__builtin_neon_vminv_s8: { |
13039 | Int = Intrinsic::aarch64_neon_sminv; |
13040 | Ty = Int32Ty; |
13041 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
13042 | llvm::Type *Tys[2] = { Ty, VTy }; |
13043 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13044 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13045 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
13046 | } |
13047 | case NEON::BI__builtin_neon_vminv_s16: { |
13048 | Int = Intrinsic::aarch64_neon_sminv; |
13049 | Ty = Int32Ty; |
13050 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
13051 | llvm::Type *Tys[2] = { Ty, VTy }; |
13052 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13053 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13054 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13055 | } |
13056 | case NEON::BI__builtin_neon_vminvq_s8: { |
13057 | Int = Intrinsic::aarch64_neon_sminv; |
13058 | Ty = Int32Ty; |
13059 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
13060 | llvm::Type *Tys[2] = { Ty, VTy }; |
13061 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13062 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13063 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty); |
13064 | } |
13065 | case NEON::BI__builtin_neon_vminvq_s16: { |
13066 | Int = Intrinsic::aarch64_neon_sminv; |
13067 | Ty = Int32Ty; |
13068 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
13069 | llvm::Type *Tys[2] = { Ty, VTy }; |
13070 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13071 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13072 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13073 | } |
13074 | case NEON::BI__builtin_neon_vminv_f16: { |
13075 | Int = Intrinsic::aarch64_neon_fminv; |
13076 | Ty = HalfTy; |
13077 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 4); |
13078 | llvm::Type *Tys[2] = { Ty, VTy }; |
13079 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13080 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13081 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13082 | } |
13083 | case NEON::BI__builtin_neon_vminvq_f16: { |
13084 | Int = Intrinsic::aarch64_neon_fminv; |
13085 | Ty = HalfTy; |
13086 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 8); |
13087 | llvm::Type *Tys[2] = { Ty, VTy }; |
13088 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13089 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminv" ); |
13090 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13091 | } |
13092 | case NEON::BI__builtin_neon_vmaxnmv_f16: { |
13093 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
13094 | Ty = HalfTy; |
13095 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 4); |
13096 | llvm::Type *Tys[2] = { Ty, VTy }; |
13097 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13098 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxnmv" ); |
13099 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13100 | } |
13101 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { |
13102 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
13103 | Ty = HalfTy; |
13104 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 8); |
13105 | llvm::Type *Tys[2] = { Ty, VTy }; |
13106 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13107 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vmaxnmv" ); |
13108 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13109 | } |
13110 | case NEON::BI__builtin_neon_vminnmv_f16: { |
13111 | Int = Intrinsic::aarch64_neon_fminnmv; |
13112 | Ty = HalfTy; |
13113 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 4); |
13114 | llvm::Type *Tys[2] = { Ty, VTy }; |
13115 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13116 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminnmv" ); |
13117 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13118 | } |
13119 | case NEON::BI__builtin_neon_vminnmvq_f16: { |
13120 | Int = Intrinsic::aarch64_neon_fminnmv; |
13121 | Ty = HalfTy; |
13122 | VTy = llvm::FixedVectorType::get(ElementType: HalfTy, NumElts: 8); |
13123 | llvm::Type *Tys[2] = { Ty, VTy }; |
13124 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13125 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vminnmv" ); |
13126 | return Builder.CreateTrunc(V: Ops[0], DestTy: HalfTy); |
13127 | } |
13128 | case NEON::BI__builtin_neon_vmul_n_f64: { |
13129 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: DoubleTy); |
13130 | Value *RHS = Builder.CreateBitCast(V: EmitScalarExpr(E: E->getArg(Arg: 1)), DestTy: DoubleTy); |
13131 | return Builder.CreateFMul(L: Ops[0], R: RHS); |
13132 | } |
13133 | case NEON::BI__builtin_neon_vaddlv_u8: { |
13134 | Int = Intrinsic::aarch64_neon_uaddlv; |
13135 | Ty = Int32Ty; |
13136 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
13137 | llvm::Type *Tys[2] = { Ty, VTy }; |
13138 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13139 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13140 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13141 | } |
13142 | case NEON::BI__builtin_neon_vaddlv_u16: { |
13143 | Int = Intrinsic::aarch64_neon_uaddlv; |
13144 | Ty = Int32Ty; |
13145 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
13146 | llvm::Type *Tys[2] = { Ty, VTy }; |
13147 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13148 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13149 | } |
13150 | case NEON::BI__builtin_neon_vaddlvq_u8: { |
13151 | Int = Intrinsic::aarch64_neon_uaddlv; |
13152 | Ty = Int32Ty; |
13153 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
13154 | llvm::Type *Tys[2] = { Ty, VTy }; |
13155 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13156 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13157 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13158 | } |
13159 | case NEON::BI__builtin_neon_vaddlvq_u16: { |
13160 | Int = Intrinsic::aarch64_neon_uaddlv; |
13161 | Ty = Int32Ty; |
13162 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
13163 | llvm::Type *Tys[2] = { Ty, VTy }; |
13164 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13165 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13166 | } |
13167 | case NEON::BI__builtin_neon_vaddlv_s8: { |
13168 | Int = Intrinsic::aarch64_neon_saddlv; |
13169 | Ty = Int32Ty; |
13170 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 8); |
13171 | llvm::Type *Tys[2] = { Ty, VTy }; |
13172 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13173 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13174 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13175 | } |
13176 | case NEON::BI__builtin_neon_vaddlv_s16: { |
13177 | Int = Intrinsic::aarch64_neon_saddlv; |
13178 | Ty = Int32Ty; |
13179 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 4); |
13180 | llvm::Type *Tys[2] = { Ty, VTy }; |
13181 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13182 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13183 | } |
13184 | case NEON::BI__builtin_neon_vaddlvq_s8: { |
13185 | Int = Intrinsic::aarch64_neon_saddlv; |
13186 | Ty = Int32Ty; |
13187 | VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
13188 | llvm::Type *Tys[2] = { Ty, VTy }; |
13189 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13190 | Ops[0] = EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13191 | return Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty); |
13192 | } |
13193 | case NEON::BI__builtin_neon_vaddlvq_s16: { |
13194 | Int = Intrinsic::aarch64_neon_saddlv; |
13195 | Ty = Int32Ty; |
13196 | VTy = llvm::FixedVectorType::get(ElementType: Int16Ty, NumElts: 8); |
13197 | llvm::Type *Tys[2] = { Ty, VTy }; |
13198 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
13199 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys), Ops, name: "vaddlv" ); |
13200 | } |
13201 | case NEON::BI__builtin_neon_vsri_n_v: |
13202 | case NEON::BI__builtin_neon_vsriq_n_v: { |
13203 | Int = Intrinsic::aarch64_neon_vsri; |
13204 | llvm::Function *Intrin = CGM.getIntrinsic(IID: Int, Tys: Ty); |
13205 | return EmitNeonCall(F: Intrin, Ops, name: "vsri_n" ); |
13206 | } |
13207 | case NEON::BI__builtin_neon_vsli_n_v: |
13208 | case NEON::BI__builtin_neon_vsliq_n_v: { |
13209 | Int = Intrinsic::aarch64_neon_vsli; |
13210 | llvm::Function *Intrin = CGM.getIntrinsic(IID: Int, Tys: Ty); |
13211 | return EmitNeonCall(F: Intrin, Ops, name: "vsli_n" ); |
13212 | } |
13213 | case NEON::BI__builtin_neon_vsra_n_v: |
13214 | case NEON::BI__builtin_neon_vsraq_n_v: |
13215 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
13216 | Ops[1] = EmitNeonRShiftImm(Vec: Ops[1], Shift: Ops[2], Ty, usgn, name: "vsra_n" ); |
13217 | return Builder.CreateAdd(LHS: Ops[0], RHS: Ops[1]); |
13218 | case NEON::BI__builtin_neon_vrsra_n_v: |
13219 | case NEON::BI__builtin_neon_vrsraq_n_v: { |
13220 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; |
13221 | SmallVector<llvm::Value*,2> TmpOps; |
13222 | TmpOps.push_back(Elt: Ops[1]); |
13223 | TmpOps.push_back(Elt: Ops[2]); |
13224 | Function* F = CGM.getIntrinsic(IID: Int, Tys: Ty); |
13225 | llvm::Value *tmp = EmitNeonCall(F, Ops&: TmpOps, name: "vrshr_n" , shift: 1, rightshift: true); |
13226 | Ops[0] = Builder.CreateBitCast(V: Ops[0], DestTy: VTy); |
13227 | return Builder.CreateAdd(LHS: Ops[0], RHS: tmp); |
13228 | } |
13229 | case NEON::BI__builtin_neon_vld1_v: |
13230 | case NEON::BI__builtin_neon_vld1q_v: { |
13231 | return Builder.CreateAlignedLoad(Ty: VTy, Addr: Ops[0], Align: PtrOp0.getAlignment()); |
13232 | } |
13233 | case NEON::BI__builtin_neon_vst1_v: |
13234 | case NEON::BI__builtin_neon_vst1q_v: |
13235 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: VTy); |
13236 | return Builder.CreateAlignedStore(Val: Ops[1], Addr: Ops[0], Align: PtrOp0.getAlignment()); |
13237 | case NEON::BI__builtin_neon_vld1_lane_v: |
13238 | case NEON::BI__builtin_neon_vld1q_lane_v: { |
13239 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13240 | Ops[0] = Builder.CreateAlignedLoad(Ty: VTy->getElementType(), Addr: Ops[0], |
13241 | Align: PtrOp0.getAlignment()); |
13242 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vld1_lane" ); |
13243 | } |
13244 | case NEON::BI__builtin_neon_vldap1_lane_s64: |
13245 | case NEON::BI__builtin_neon_vldap1q_lane_s64: { |
13246 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13247 | llvm::LoadInst *LI = Builder.CreateAlignedLoad( |
13248 | Ty: VTy->getElementType(), Addr: Ops[0], Align: PtrOp0.getAlignment()); |
13249 | LI->setAtomic(Ordering: llvm::AtomicOrdering::Acquire); |
13250 | Ops[0] = LI; |
13251 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: Ops[0], Idx: Ops[2], Name: "vldap1_lane" ); |
13252 | } |
13253 | case NEON::BI__builtin_neon_vld1_dup_v: |
13254 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
13255 | Value *V = PoisonValue::get(T: Ty); |
13256 | Ops[0] = Builder.CreateAlignedLoad(Ty: VTy->getElementType(), Addr: Ops[0], |
13257 | Align: PtrOp0.getAlignment()); |
13258 | llvm::Constant *CI = ConstantInt::get(Ty: Int32Ty, V: 0); |
13259 | Ops[0] = Builder.CreateInsertElement(Vec: V, NewElt: Ops[0], Idx: CI); |
13260 | return EmitNeonSplat(V: Ops[0], C: CI); |
13261 | } |
13262 | case NEON::BI__builtin_neon_vst1_lane_v: |
13263 | case NEON::BI__builtin_neon_vst1q_lane_v: |
13264 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13265 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: Ops[2]); |
13266 | return Builder.CreateAlignedStore(Val: Ops[1], Addr: Ops[0], Align: PtrOp0.getAlignment()); |
13267 | case NEON::BI__builtin_neon_vstl1_lane_s64: |
13268 | case NEON::BI__builtin_neon_vstl1q_lane_s64: { |
13269 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13270 | Ops[1] = Builder.CreateExtractElement(Vec: Ops[1], Idx: Ops[2]); |
13271 | llvm::StoreInst *SI = |
13272 | Builder.CreateAlignedStore(Val: Ops[1], Addr: Ops[0], Align: PtrOp0.getAlignment()); |
13273 | SI->setAtomic(Ordering: llvm::AtomicOrdering::Release); |
13274 | return SI; |
13275 | } |
13276 | case NEON::BI__builtin_neon_vld2_v: |
13277 | case NEON::BI__builtin_neon_vld2q_v: { |
13278 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13279 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld2, Tys); |
13280 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld2" ); |
13281 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13282 | } |
13283 | case NEON::BI__builtin_neon_vld3_v: |
13284 | case NEON::BI__builtin_neon_vld3q_v: { |
13285 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13286 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld3, Tys); |
13287 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld3" ); |
13288 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13289 | } |
13290 | case NEON::BI__builtin_neon_vld4_v: |
13291 | case NEON::BI__builtin_neon_vld4q_v: { |
13292 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13293 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld4, Tys); |
13294 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld4" ); |
13295 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13296 | } |
13297 | case NEON::BI__builtin_neon_vld2_dup_v: |
13298 | case NEON::BI__builtin_neon_vld2q_dup_v: { |
13299 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13300 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld2r, Tys); |
13301 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld2" ); |
13302 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13303 | } |
13304 | case NEON::BI__builtin_neon_vld3_dup_v: |
13305 | case NEON::BI__builtin_neon_vld3q_dup_v: { |
13306 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13307 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld3r, Tys); |
13308 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld3" ); |
13309 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13310 | } |
13311 | case NEON::BI__builtin_neon_vld4_dup_v: |
13312 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
13313 | llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; |
13314 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld4r, Tys); |
13315 | Ops[1] = Builder.CreateCall(Callee: F, Args: Ops[1], Name: "vld4" ); |
13316 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13317 | } |
13318 | case NEON::BI__builtin_neon_vld2_lane_v: |
13319 | case NEON::BI__builtin_neon_vld2q_lane_v: { |
13320 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
13321 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld2lane, Tys); |
13322 | std::rotate(first: Ops.begin() + 1, middle: Ops.begin() + 2, last: Ops.end()); |
13323 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13324 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13325 | Ops[3] = Builder.CreateZExt(V: Ops[3], DestTy: Int64Ty); |
13326 | Ops[1] = Builder.CreateCall(Callee: F, Args: ArrayRef(Ops).slice(N: 1), Name: "vld2_lane" ); |
13327 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13328 | } |
13329 | case NEON::BI__builtin_neon_vld3_lane_v: |
13330 | case NEON::BI__builtin_neon_vld3q_lane_v: { |
13331 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
13332 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld3lane, Tys); |
13333 | std::rotate(first: Ops.begin() + 1, middle: Ops.begin() + 2, last: Ops.end()); |
13334 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13335 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13336 | Ops[3] = Builder.CreateBitCast(V: Ops[3], DestTy: Ty); |
13337 | Ops[4] = Builder.CreateZExt(V: Ops[4], DestTy: Int64Ty); |
13338 | Ops[1] = Builder.CreateCall(Callee: F, Args: ArrayRef(Ops).slice(N: 1), Name: "vld3_lane" ); |
13339 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13340 | } |
13341 | case NEON::BI__builtin_neon_vld4_lane_v: |
13342 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
13343 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
13344 | Function *F = CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_ld4lane, Tys); |
13345 | std::rotate(first: Ops.begin() + 1, middle: Ops.begin() + 2, last: Ops.end()); |
13346 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13347 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13348 | Ops[3] = Builder.CreateBitCast(V: Ops[3], DestTy: Ty); |
13349 | Ops[4] = Builder.CreateBitCast(V: Ops[4], DestTy: Ty); |
13350 | Ops[5] = Builder.CreateZExt(V: Ops[5], DestTy: Int64Ty); |
13351 | Ops[1] = Builder.CreateCall(Callee: F, Args: ArrayRef(Ops).slice(N: 1), Name: "vld4_lane" ); |
13352 | return Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
13353 | } |
13354 | case NEON::BI__builtin_neon_vst2_v: |
13355 | case NEON::BI__builtin_neon_vst2q_v: { |
13356 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13357 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; |
13358 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st2, Tys), |
13359 | Ops, name: "" ); |
13360 | } |
13361 | case NEON::BI__builtin_neon_vst2_lane_v: |
13362 | case NEON::BI__builtin_neon_vst2q_lane_v: { |
13363 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13364 | Ops[2] = Builder.CreateZExt(V: Ops[2], DestTy: Int64Ty); |
13365 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
13366 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st2lane, Tys), |
13367 | Ops, name: "" ); |
13368 | } |
13369 | case NEON::BI__builtin_neon_vst3_v: |
13370 | case NEON::BI__builtin_neon_vst3q_v: { |
13371 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13372 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
13373 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st3, Tys), |
13374 | Ops, name: "" ); |
13375 | } |
13376 | case NEON::BI__builtin_neon_vst3_lane_v: |
13377 | case NEON::BI__builtin_neon_vst3q_lane_v: { |
13378 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13379 | Ops[3] = Builder.CreateZExt(V: Ops[3], DestTy: Int64Ty); |
13380 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
13381 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st3lane, Tys), |
13382 | Ops, name: "" ); |
13383 | } |
13384 | case NEON::BI__builtin_neon_vst4_v: |
13385 | case NEON::BI__builtin_neon_vst4q_v: { |
13386 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13387 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
13388 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st4, Tys), |
13389 | Ops, name: "" ); |
13390 | } |
13391 | case NEON::BI__builtin_neon_vst4_lane_v: |
13392 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
13393 | std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end()); |
13394 | Ops[4] = Builder.CreateZExt(V: Ops[4], DestTy: Int64Ty); |
13395 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; |
13396 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_st4lane, Tys), |
13397 | Ops, name: "" ); |
13398 | } |
13399 | case NEON::BI__builtin_neon_vtrn_v: |
13400 | case NEON::BI__builtin_neon_vtrnq_v: { |
13401 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13402 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13403 | Value *SV = nullptr; |
13404 | |
13405 | for (unsigned vi = 0; vi != 2; ++vi) { |
13406 | SmallVector<int, 16> Indices; |
13407 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
13408 | Indices.push_back(Elt: i+vi); |
13409 | Indices.push_back(Elt: i+e+vi); |
13410 | } |
13411 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
13412 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vtrn" ); |
13413 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
13414 | } |
13415 | return SV; |
13416 | } |
13417 | case NEON::BI__builtin_neon_vuzp_v: |
13418 | case NEON::BI__builtin_neon_vuzpq_v: { |
13419 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13420 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13421 | Value *SV = nullptr; |
13422 | |
13423 | for (unsigned vi = 0; vi != 2; ++vi) { |
13424 | SmallVector<int, 16> Indices; |
13425 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
13426 | Indices.push_back(Elt: 2*i+vi); |
13427 | |
13428 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
13429 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vuzp" ); |
13430 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
13431 | } |
13432 | return SV; |
13433 | } |
13434 | case NEON::BI__builtin_neon_vzip_v: |
13435 | case NEON::BI__builtin_neon_vzipq_v: { |
13436 | Ops[1] = Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
13437 | Ops[2] = Builder.CreateBitCast(V: Ops[2], DestTy: Ty); |
13438 | Value *SV = nullptr; |
13439 | |
13440 | for (unsigned vi = 0; vi != 2; ++vi) { |
13441 | SmallVector<int, 16> Indices; |
13442 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
13443 | Indices.push_back(Elt: (i + vi*e) >> 1); |
13444 | Indices.push_back(Elt: ((i + vi*e) >> 1)+e); |
13445 | } |
13446 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ptr: Ops[0], Idx0: vi); |
13447 | SV = Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[2], Mask: Indices, Name: "vzip" ); |
13448 | SV = Builder.CreateDefaultAlignedStore(Val: SV, Addr); |
13449 | } |
13450 | return SV; |
13451 | } |
13452 | case NEON::BI__builtin_neon_vqtbl1q_v: { |
13453 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbl1, Tys: Ty), |
13454 | Ops, name: "vtbl1" ); |
13455 | } |
13456 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
13457 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbl2, Tys: Ty), |
13458 | Ops, name: "vtbl2" ); |
13459 | } |
13460 | case NEON::BI__builtin_neon_vqtbl3q_v: { |
13461 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbl3, Tys: Ty), |
13462 | Ops, name: "vtbl3" ); |
13463 | } |
13464 | case NEON::BI__builtin_neon_vqtbl4q_v: { |
13465 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbl4, Tys: Ty), |
13466 | Ops, name: "vtbl4" ); |
13467 | } |
13468 | case NEON::BI__builtin_neon_vqtbx1q_v: { |
13469 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbx1, Tys: Ty), |
13470 | Ops, name: "vtbx1" ); |
13471 | } |
13472 | case NEON::BI__builtin_neon_vqtbx2q_v: { |
13473 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbx2, Tys: Ty), |
13474 | Ops, name: "vtbx2" ); |
13475 | } |
13476 | case NEON::BI__builtin_neon_vqtbx3q_v: { |
13477 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbx3, Tys: Ty), |
13478 | Ops, name: "vtbx3" ); |
13479 | } |
13480 | case NEON::BI__builtin_neon_vqtbx4q_v: { |
13481 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Intrinsic::aarch64_neon_tbx4, Tys: Ty), |
13482 | Ops, name: "vtbx4" ); |
13483 | } |
13484 | case NEON::BI__builtin_neon_vsqadd_v: |
13485 | case NEON::BI__builtin_neon_vsqaddq_v: { |
13486 | Int = Intrinsic::aarch64_neon_usqadd; |
13487 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vsqadd" ); |
13488 | } |
13489 | case NEON::BI__builtin_neon_vuqadd_v: |
13490 | case NEON::BI__builtin_neon_vuqaddq_v: { |
13491 | Int = Intrinsic::aarch64_neon_suqadd; |
13492 | return EmitNeonCall(F: CGM.getIntrinsic(IID: Int, Tys: Ty), Ops, name: "vuqadd" ); |
13493 | } |
13494 | } |
13495 | } |
13496 | |
13497 | Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, |
13498 | const CallExpr *E) { |
13499 | assert((BuiltinID == BPF::BI__builtin_preserve_field_info || |
13500 | BuiltinID == BPF::BI__builtin_btf_type_id || |
13501 | BuiltinID == BPF::BI__builtin_preserve_type_info || |
13502 | BuiltinID == BPF::BI__builtin_preserve_enum_value) && |
13503 | "unexpected BPF builtin" ); |
13504 | |
13505 | // A sequence number, injected into IR builtin functions, to |
13506 | // prevent CSE given the only difference of the function |
13507 | // may just be the debuginfo metadata. |
13508 | static uint32_t BuiltinSeqNum; |
13509 | |
13510 | switch (BuiltinID) { |
13511 | default: |
13512 | llvm_unreachable("Unexpected BPF builtin" ); |
13513 | case BPF::BI__builtin_preserve_field_info: { |
13514 | const Expr *Arg = E->getArg(Arg: 0); |
13515 | bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField; |
13516 | |
13517 | if (!getDebugInfo()) { |
13518 | CGM.Error(loc: E->getExprLoc(), |
13519 | error: "using __builtin_preserve_field_info() without -g" ); |
13520 | return IsBitField ? EmitLValue(E: Arg).getRawBitFieldPointer(CGF&: *this) |
13521 | : EmitLValue(E: Arg).emitRawPointer(CGF&: *this); |
13522 | } |
13523 | |
13524 | // Enable underlying preserve_*_access_index() generation. |
13525 | bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; |
13526 | IsInPreservedAIRegion = true; |
13527 | Value *FieldAddr = IsBitField ? EmitLValue(E: Arg).getRawBitFieldPointer(CGF&: *this) |
13528 | : EmitLValue(E: Arg).emitRawPointer(CGF&: *this); |
13529 | IsInPreservedAIRegion = OldIsInPreservedAIRegion; |
13530 | |
13531 | ConstantInt *C = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
13532 | Value *InfoKind = ConstantInt::get(Ty: Int64Ty, V: C->getSExtValue()); |
13533 | |
13534 | // Built the IR for the preserve_field_info intrinsic. |
13535 | llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration( |
13536 | M: &CGM.getModule(), id: llvm::Intrinsic::bpf_preserve_field_info, |
13537 | Tys: {FieldAddr->getType()}); |
13538 | return Builder.CreateCall(Callee: FnGetFieldInfo, Args: {FieldAddr, InfoKind}); |
13539 | } |
13540 | case BPF::BI__builtin_btf_type_id: |
13541 | case BPF::BI__builtin_preserve_type_info: { |
13542 | if (!getDebugInfo()) { |
13543 | CGM.Error(loc: E->getExprLoc(), error: "using builtin function without -g" ); |
13544 | return nullptr; |
13545 | } |
13546 | |
13547 | const Expr *Arg0 = E->getArg(Arg: 0); |
13548 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
13549 | Ty: Arg0->getType(), Loc: Arg0->getExprLoc()); |
13550 | |
13551 | ConstantInt *Flag = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
13552 | Value *FlagValue = ConstantInt::get(Ty: Int64Ty, V: Flag->getSExtValue()); |
13553 | Value *SeqNumVal = ConstantInt::get(Ty: Int32Ty, V: BuiltinSeqNum++); |
13554 | |
13555 | llvm::Function *FnDecl; |
13556 | if (BuiltinID == BPF::BI__builtin_btf_type_id) |
13557 | FnDecl = llvm::Intrinsic::getDeclaration( |
13558 | M: &CGM.getModule(), id: llvm::Intrinsic::bpf_btf_type_id, Tys: {}); |
13559 | else |
13560 | FnDecl = llvm::Intrinsic::getDeclaration( |
13561 | M: &CGM.getModule(), id: llvm::Intrinsic::bpf_preserve_type_info, Tys: {}); |
13562 | CallInst *Fn = Builder.CreateCall(Callee: FnDecl, Args: {SeqNumVal, FlagValue}); |
13563 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
13564 | return Fn; |
13565 | } |
13566 | case BPF::BI__builtin_preserve_enum_value: { |
13567 | if (!getDebugInfo()) { |
13568 | CGM.Error(loc: E->getExprLoc(), error: "using builtin function without -g" ); |
13569 | return nullptr; |
13570 | } |
13571 | |
13572 | const Expr *Arg0 = E->getArg(Arg: 0); |
13573 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
13574 | Ty: Arg0->getType(), Loc: Arg0->getExprLoc()); |
13575 | |
13576 | // Find enumerator |
13577 | const auto *UO = cast<UnaryOperator>(Val: Arg0->IgnoreParens()); |
13578 | const auto *CE = cast<CStyleCastExpr>(Val: UO->getSubExpr()); |
13579 | const auto *DR = cast<DeclRefExpr>(Val: CE->getSubExpr()); |
13580 | const auto *Enumerator = cast<EnumConstantDecl>(Val: DR->getDecl()); |
13581 | |
13582 | auto InitVal = Enumerator->getInitVal(); |
13583 | std::string InitValStr; |
13584 | if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX)) |
13585 | InitValStr = std::to_string(val: InitVal.getSExtValue()); |
13586 | else |
13587 | InitValStr = std::to_string(val: InitVal.getZExtValue()); |
13588 | std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr; |
13589 | Value *EnumStrVal = Builder.CreateGlobalStringPtr(Str: EnumStr); |
13590 | |
13591 | ConstantInt *Flag = cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1))); |
13592 | Value *FlagValue = ConstantInt::get(Ty: Int64Ty, V: Flag->getSExtValue()); |
13593 | Value *SeqNumVal = ConstantInt::get(Ty: Int32Ty, V: BuiltinSeqNum++); |
13594 | |
13595 | llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration( |
13596 | M: &CGM.getModule(), id: llvm::Intrinsic::bpf_preserve_enum_value, Tys: {}); |
13597 | CallInst *Fn = |
13598 | Builder.CreateCall(Callee: IntrinsicFn, Args: {SeqNumVal, EnumStrVal, FlagValue}); |
13599 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
13600 | return Fn; |
13601 | } |
13602 | } |
13603 | } |
13604 | |
13605 | llvm::Value *CodeGenFunction:: |
13606 | BuildVector(ArrayRef<llvm::Value*> Ops) { |
13607 | assert((Ops.size() & (Ops.size() - 1)) == 0 && |
13608 | "Not a power-of-two sized vector!" ); |
13609 | bool AllConstants = true; |
13610 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) |
13611 | AllConstants &= isa<Constant>(Val: Ops[i]); |
13612 | |
13613 | // If this is a constant vector, create a ConstantVector. |
13614 | if (AllConstants) { |
13615 | SmallVector<llvm::Constant*, 16> CstOps; |
13616 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
13617 | CstOps.push_back(Elt: cast<Constant>(Val: Ops[i])); |
13618 | return llvm::ConstantVector::get(V: CstOps); |
13619 | } |
13620 | |
13621 | // Otherwise, insertelement the values to build the vector. |
13622 | Value *Result = llvm::PoisonValue::get( |
13623 | T: llvm::FixedVectorType::get(ElementType: Ops[0]->getType(), NumElts: Ops.size())); |
13624 | |
13625 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
13626 | Result = Builder.CreateInsertElement(Vec: Result, NewElt: Ops[i], Idx: Builder.getInt64(C: i)); |
13627 | |
13628 | return Result; |
13629 | } |
13630 | |
13631 | // Convert the mask from an integer type to a vector of i1. |
13632 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, |
13633 | unsigned NumElts) { |
13634 | |
13635 | auto *MaskTy = llvm::FixedVectorType::get( |
13636 | ElementType: CGF.Builder.getInt1Ty(), |
13637 | NumElts: cast<IntegerType>(Val: Mask->getType())->getBitWidth()); |
13638 | Value *MaskVec = CGF.Builder.CreateBitCast(V: Mask, DestTy: MaskTy); |
13639 | |
13640 | // If we have less than 8 elements, then the starting mask was an i8 and |
13641 | // we need to extract down to the right number of elements. |
13642 | if (NumElts < 8) { |
13643 | int Indices[4]; |
13644 | for (unsigned i = 0; i != NumElts; ++i) |
13645 | Indices[i] = i; |
13646 | MaskVec = CGF.Builder.CreateShuffleVector( |
13647 | V1: MaskVec, V2: MaskVec, Mask: ArrayRef(Indices, NumElts), Name: "extract" ); |
13648 | } |
13649 | return MaskVec; |
13650 | } |
13651 | |
13652 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
13653 | Align Alignment) { |
13654 | Value *Ptr = Ops[0]; |
13655 | |
13656 | Value *MaskVec = getMaskVecValue( |
13657 | CGF, Mask: Ops[2], |
13658 | NumElts: cast<llvm::FixedVectorType>(Val: Ops[1]->getType())->getNumElements()); |
13659 | |
13660 | return CGF.Builder.CreateMaskedStore(Val: Ops[1], Ptr, Alignment, Mask: MaskVec); |
13661 | } |
13662 | |
13663 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
13664 | Align Alignment) { |
13665 | llvm::Type *Ty = Ops[1]->getType(); |
13666 | Value *Ptr = Ops[0]; |
13667 | |
13668 | Value *MaskVec = getMaskVecValue( |
13669 | CGF, Mask: Ops[2], NumElts: cast<llvm::FixedVectorType>(Val: Ty)->getNumElements()); |
13670 | |
13671 | return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask: MaskVec, PassThru: Ops[1]); |
13672 | } |
13673 | |
13674 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, |
13675 | ArrayRef<Value *> Ops) { |
13676 | auto *ResultTy = cast<llvm::VectorType>(Val: Ops[1]->getType()); |
13677 | Value *Ptr = Ops[0]; |
13678 | |
13679 | Value *MaskVec = getMaskVecValue( |
13680 | CGF, Mask: Ops[2], NumElts: cast<FixedVectorType>(Val: ResultTy)->getNumElements()); |
13681 | |
13682 | llvm::Function *F = CGF.CGM.getIntrinsic(IID: Intrinsic::masked_expandload, |
13683 | Tys: ResultTy); |
13684 | return CGF.Builder.CreateCall(Callee: F, Args: { Ptr, MaskVec, Ops[1] }); |
13685 | } |
13686 | |
13687 | static Value *EmitX86CompressExpand(CodeGenFunction &CGF, |
13688 | ArrayRef<Value *> Ops, |
13689 | bool IsCompress) { |
13690 | auto *ResultTy = cast<llvm::FixedVectorType>(Val: Ops[1]->getType()); |
13691 | |
13692 | Value *MaskVec = getMaskVecValue(CGF, Mask: Ops[2], NumElts: ResultTy->getNumElements()); |
13693 | |
13694 | Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress |
13695 | : Intrinsic::x86_avx512_mask_expand; |
13696 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, Tys: ResultTy); |
13697 | return CGF.Builder.CreateCall(Callee: F, Args: { Ops[0], Ops[1], MaskVec }); |
13698 | } |
13699 | |
13700 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, |
13701 | ArrayRef<Value *> Ops) { |
13702 | auto *ResultTy = cast<llvm::FixedVectorType>(Val: Ops[1]->getType()); |
13703 | Value *Ptr = Ops[0]; |
13704 | |
13705 | Value *MaskVec = getMaskVecValue(CGF, Mask: Ops[2], NumElts: ResultTy->getNumElements()); |
13706 | |
13707 | llvm::Function *F = CGF.CGM.getIntrinsic(IID: Intrinsic::masked_compressstore, |
13708 | Tys: ResultTy); |
13709 | return CGF.Builder.CreateCall(Callee: F, Args: { Ops[1], Ptr, MaskVec }); |
13710 | } |
13711 | |
13712 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, |
13713 | ArrayRef<Value *> Ops, |
13714 | bool InvertLHS = false) { |
13715 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13716 | Value *LHS = getMaskVecValue(CGF, Mask: Ops[0], NumElts); |
13717 | Value *RHS = getMaskVecValue(CGF, Mask: Ops[1], NumElts); |
13718 | |
13719 | if (InvertLHS) |
13720 | LHS = CGF.Builder.CreateNot(V: LHS); |
13721 | |
13722 | return CGF.Builder.CreateBitCast(V: CGF.Builder.CreateBinOp(Opc, LHS, RHS), |
13723 | DestTy: Ops[0]->getType()); |
13724 | } |
13725 | |
13726 | static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1, |
13727 | Value *Amt, bool IsRight) { |
13728 | llvm::Type *Ty = Op0->getType(); |
13729 | |
13730 | // Amount may be scalar immediate, in which case create a splat vector. |
13731 | // Funnel shifts amounts are treated as modulo and types are all power-of-2 so |
13732 | // we only care about the lowest log2 bits anyway. |
13733 | if (Amt->getType() != Ty) { |
13734 | unsigned NumElts = cast<llvm::FixedVectorType>(Val: Ty)->getNumElements(); |
13735 | Amt = CGF.Builder.CreateIntCast(V: Amt, DestTy: Ty->getScalarType(), isSigned: false); |
13736 | Amt = CGF.Builder.CreateVectorSplat(NumElts, V: Amt); |
13737 | } |
13738 | |
13739 | unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl; |
13740 | Function *F = CGF.CGM.getIntrinsic(IID, Tys: Ty); |
13741 | return CGF.Builder.CreateCall(Callee: F, Args: {Op0, Op1, Amt}); |
13742 | } |
13743 | |
13744 | static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
13745 | bool IsSigned) { |
13746 | Value *Op0 = Ops[0]; |
13747 | Value *Op1 = Ops[1]; |
13748 | llvm::Type *Ty = Op0->getType(); |
13749 | uint64_t Imm = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0x7; |
13750 | |
13751 | CmpInst::Predicate Pred; |
13752 | switch (Imm) { |
13753 | case 0x0: |
13754 | Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
13755 | break; |
13756 | case 0x1: |
13757 | Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
13758 | break; |
13759 | case 0x2: |
13760 | Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
13761 | break; |
13762 | case 0x3: |
13763 | Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
13764 | break; |
13765 | case 0x4: |
13766 | Pred = ICmpInst::ICMP_EQ; |
13767 | break; |
13768 | case 0x5: |
13769 | Pred = ICmpInst::ICMP_NE; |
13770 | break; |
13771 | case 0x6: |
13772 | return llvm::Constant::getNullValue(Ty); // FALSE |
13773 | case 0x7: |
13774 | return llvm::Constant::getAllOnesValue(Ty); // TRUE |
13775 | default: |
13776 | llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate" ); |
13777 | } |
13778 | |
13779 | Value *Cmp = CGF.Builder.CreateICmp(P: Pred, LHS: Op0, RHS: Op1); |
13780 | Value *Res = CGF.Builder.CreateSExt(V: Cmp, DestTy: Ty); |
13781 | return Res; |
13782 | } |
13783 | |
13784 | static Value *EmitX86Select(CodeGenFunction &CGF, |
13785 | Value *Mask, Value *Op0, Value *Op1) { |
13786 | |
13787 | // If the mask is all ones just return first argument. |
13788 | if (const auto *C = dyn_cast<Constant>(Val: Mask)) |
13789 | if (C->isAllOnesValue()) |
13790 | return Op0; |
13791 | |
13792 | Mask = getMaskVecValue( |
13793 | CGF, Mask, NumElts: cast<llvm::FixedVectorType>(Val: Op0->getType())->getNumElements()); |
13794 | |
13795 | return CGF.Builder.CreateSelect(C: Mask, True: Op0, False: Op1); |
13796 | } |
13797 | |
13798 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, |
13799 | Value *Mask, Value *Op0, Value *Op1) { |
13800 | // If the mask is all ones just return first argument. |
13801 | if (const auto *C = dyn_cast<Constant>(Val: Mask)) |
13802 | if (C->isAllOnesValue()) |
13803 | return Op0; |
13804 | |
13805 | auto *MaskTy = llvm::FixedVectorType::get( |
13806 | ElementType: CGF.Builder.getInt1Ty(), NumElts: Mask->getType()->getIntegerBitWidth()); |
13807 | Mask = CGF.Builder.CreateBitCast(V: Mask, DestTy: MaskTy); |
13808 | Mask = CGF.Builder.CreateExtractElement(Vec: Mask, Idx: (uint64_t)0); |
13809 | return CGF.Builder.CreateSelect(C: Mask, True: Op0, False: Op1); |
13810 | } |
13811 | |
13812 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, |
13813 | unsigned NumElts, Value *MaskIn) { |
13814 | if (MaskIn) { |
13815 | const auto *C = dyn_cast<Constant>(Val: MaskIn); |
13816 | if (!C || !C->isAllOnesValue()) |
13817 | Cmp = CGF.Builder.CreateAnd(LHS: Cmp, RHS: getMaskVecValue(CGF, Mask: MaskIn, NumElts)); |
13818 | } |
13819 | |
13820 | if (NumElts < 8) { |
13821 | int Indices[8]; |
13822 | for (unsigned i = 0; i != NumElts; ++i) |
13823 | Indices[i] = i; |
13824 | for (unsigned i = NumElts; i != 8; ++i) |
13825 | Indices[i] = i % NumElts + NumElts; |
13826 | Cmp = CGF.Builder.CreateShuffleVector( |
13827 | V1: Cmp, V2: llvm::Constant::getNullValue(Ty: Cmp->getType()), Mask: Indices); |
13828 | } |
13829 | |
13830 | return CGF.Builder.CreateBitCast(V: Cmp, |
13831 | DestTy: IntegerType::get(C&: CGF.getLLVMContext(), |
13832 | NumBits: std::max(a: NumElts, b: 8U))); |
13833 | } |
13834 | |
13835 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, |
13836 | bool Signed, ArrayRef<Value *> Ops) { |
13837 | assert((Ops.size() == 2 || Ops.size() == 4) && |
13838 | "Unexpected number of arguments" ); |
13839 | unsigned NumElts = |
13840 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
13841 | Value *Cmp; |
13842 | |
13843 | if (CC == 3) { |
13844 | Cmp = Constant::getNullValue( |
13845 | Ty: llvm::FixedVectorType::get(ElementType: CGF.Builder.getInt1Ty(), NumElts)); |
13846 | } else if (CC == 7) { |
13847 | Cmp = Constant::getAllOnesValue( |
13848 | Ty: llvm::FixedVectorType::get(ElementType: CGF.Builder.getInt1Ty(), NumElts)); |
13849 | } else { |
13850 | ICmpInst::Predicate Pred; |
13851 | switch (CC) { |
13852 | default: llvm_unreachable("Unknown condition code" ); |
13853 | case 0: Pred = ICmpInst::ICMP_EQ; break; |
13854 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; |
13855 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; |
13856 | case 4: Pred = ICmpInst::ICMP_NE; break; |
13857 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; |
13858 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; |
13859 | } |
13860 | Cmp = CGF.Builder.CreateICmp(P: Pred, LHS: Ops[0], RHS: Ops[1]); |
13861 | } |
13862 | |
13863 | Value *MaskIn = nullptr; |
13864 | if (Ops.size() == 4) |
13865 | MaskIn = Ops[3]; |
13866 | |
13867 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); |
13868 | } |
13869 | |
13870 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { |
13871 | Value *Zero = Constant::getNullValue(Ty: In->getType()); |
13872 | return EmitX86MaskedCompare(CGF, CC: 1, Signed: true, Ops: { In, Zero }); |
13873 | } |
13874 | |
13875 | static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E, |
13876 | ArrayRef<Value *> Ops, bool IsSigned) { |
13877 | unsigned Rnd = cast<llvm::ConstantInt>(Val: Ops[3])->getZExtValue(); |
13878 | llvm::Type *Ty = Ops[1]->getType(); |
13879 | |
13880 | Value *Res; |
13881 | if (Rnd != 4) { |
13882 | Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round |
13883 | : Intrinsic::x86_avx512_uitofp_round; |
13884 | Function *F = CGF.CGM.getIntrinsic(IID, Tys: { Ty, Ops[0]->getType() }); |
13885 | Res = CGF.Builder.CreateCall(Callee: F, Args: { Ops[0], Ops[3] }); |
13886 | } else { |
13887 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
13888 | Res = IsSigned ? CGF.Builder.CreateSIToFP(V: Ops[0], DestTy: Ty) |
13889 | : CGF.Builder.CreateUIToFP(V: Ops[0], DestTy: Ty); |
13890 | } |
13891 | |
13892 | return EmitX86Select(CGF, Mask: Ops[2], Op0: Res, Op1: Ops[1]); |
13893 | } |
13894 | |
13895 | // Lowers X86 FMA intrinsics to IR. |
13896 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
13897 | ArrayRef<Value *> Ops, unsigned BuiltinID, |
13898 | bool IsAddSub) { |
13899 | |
13900 | bool Subtract = false; |
13901 | Intrinsic::ID IID = Intrinsic::not_intrinsic; |
13902 | switch (BuiltinID) { |
13903 | default: break; |
13904 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
13905 | Subtract = true; |
13906 | [[fallthrough]]; |
13907 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
13908 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
13909 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
13910 | IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512; |
13911 | break; |
13912 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
13913 | Subtract = true; |
13914 | [[fallthrough]]; |
13915 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
13916 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
13917 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
13918 | IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512; |
13919 | break; |
13920 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
13921 | Subtract = true; |
13922 | [[fallthrough]]; |
13923 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
13924 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
13925 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
13926 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; |
13927 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
13928 | Subtract = true; |
13929 | [[fallthrough]]; |
13930 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
13931 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
13932 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
13933 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; |
13934 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
13935 | Subtract = true; |
13936 | [[fallthrough]]; |
13937 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
13938 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
13939 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
13940 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; |
13941 | break; |
13942 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
13943 | Subtract = true; |
13944 | [[fallthrough]]; |
13945 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
13946 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
13947 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
13948 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; |
13949 | break; |
13950 | } |
13951 | |
13952 | Value *A = Ops[0]; |
13953 | Value *B = Ops[1]; |
13954 | Value *C = Ops[2]; |
13955 | |
13956 | if (Subtract) |
13957 | C = CGF.Builder.CreateFNeg(V: C); |
13958 | |
13959 | Value *Res; |
13960 | |
13961 | // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding). |
13962 | if (IID != Intrinsic::not_intrinsic && |
13963 | (cast<llvm::ConstantInt>(Val: Ops.back())->getZExtValue() != (uint64_t)4 || |
13964 | IsAddSub)) { |
13965 | Function *Intr = CGF.CGM.getIntrinsic(IID); |
13966 | Res = CGF.Builder.CreateCall(Callee: Intr, Args: {A, B, C, Ops.back() }); |
13967 | } else { |
13968 | llvm::Type *Ty = A->getType(); |
13969 | Function *FMA; |
13970 | if (CGF.Builder.getIsFPConstrained()) { |
13971 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
13972 | FMA = CGF.CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: Ty); |
13973 | Res = CGF.Builder.CreateConstrainedFPCall(Callee: FMA, Args: {A, B, C}); |
13974 | } else { |
13975 | FMA = CGF.CGM.getIntrinsic(IID: Intrinsic::fma, Tys: Ty); |
13976 | Res = CGF.Builder.CreateCall(Callee: FMA, Args: {A, B, C}); |
13977 | } |
13978 | } |
13979 | |
13980 | // Handle any required masking. |
13981 | Value *MaskFalseVal = nullptr; |
13982 | switch (BuiltinID) { |
13983 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
13984 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
13985 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
13986 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
13987 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
13988 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
13989 | MaskFalseVal = Ops[0]; |
13990 | break; |
13991 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
13992 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
13993 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
13994 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
13995 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
13996 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
13997 | MaskFalseVal = Constant::getNullValue(Ty: Ops[0]->getType()); |
13998 | break; |
13999 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
14000 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
14001 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
14002 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
14003 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
14004 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
14005 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
14006 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
14007 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
14008 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
14009 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
14010 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
14011 | MaskFalseVal = Ops[2]; |
14012 | break; |
14013 | } |
14014 | |
14015 | if (MaskFalseVal) |
14016 | return EmitX86Select(CGF, Mask: Ops[3], Op0: Res, Op1: MaskFalseVal); |
14017 | |
14018 | return Res; |
14019 | } |
14020 | |
14021 | static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
14022 | MutableArrayRef<Value *> Ops, Value *Upper, |
14023 | bool ZeroMask = false, unsigned PTIdx = 0, |
14024 | bool NegAcc = false) { |
14025 | unsigned Rnd = 4; |
14026 | if (Ops.size() > 4) |
14027 | Rnd = cast<llvm::ConstantInt>(Val: Ops[4])->getZExtValue(); |
14028 | |
14029 | if (NegAcc) |
14030 | Ops[2] = CGF.Builder.CreateFNeg(V: Ops[2]); |
14031 | |
14032 | Ops[0] = CGF.Builder.CreateExtractElement(Vec: Ops[0], Idx: (uint64_t)0); |
14033 | Ops[1] = CGF.Builder.CreateExtractElement(Vec: Ops[1], Idx: (uint64_t)0); |
14034 | Ops[2] = CGF.Builder.CreateExtractElement(Vec: Ops[2], Idx: (uint64_t)0); |
14035 | Value *Res; |
14036 | if (Rnd != 4) { |
14037 | Intrinsic::ID IID; |
14038 | |
14039 | switch (Ops[0]->getType()->getPrimitiveSizeInBits()) { |
14040 | case 16: |
14041 | IID = Intrinsic::x86_avx512fp16_vfmadd_f16; |
14042 | break; |
14043 | case 32: |
14044 | IID = Intrinsic::x86_avx512_vfmadd_f32; |
14045 | break; |
14046 | case 64: |
14047 | IID = Intrinsic::x86_avx512_vfmadd_f64; |
14048 | break; |
14049 | default: |
14050 | llvm_unreachable("Unexpected size" ); |
14051 | } |
14052 | Res = CGF.Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID), |
14053 | Args: {Ops[0], Ops[1], Ops[2], Ops[4]}); |
14054 | } else if (CGF.Builder.getIsFPConstrained()) { |
14055 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
14056 | Function *FMA = CGF.CGM.getIntrinsic( |
14057 | IID: Intrinsic::experimental_constrained_fma, Tys: Ops[0]->getType()); |
14058 | Res = CGF.Builder.CreateConstrainedFPCall(Callee: FMA, Args: Ops.slice(N: 0, M: 3)); |
14059 | } else { |
14060 | Function *FMA = CGF.CGM.getIntrinsic(IID: Intrinsic::fma, Tys: Ops[0]->getType()); |
14061 | Res = CGF.Builder.CreateCall(Callee: FMA, Args: Ops.slice(N: 0, M: 3)); |
14062 | } |
14063 | // If we have more than 3 arguments, we need to do masking. |
14064 | if (Ops.size() > 3) { |
14065 | Value *PassThru = ZeroMask ? Constant::getNullValue(Ty: Res->getType()) |
14066 | : Ops[PTIdx]; |
14067 | |
14068 | // If we negated the accumulator and the its the PassThru value we need to |
14069 | // bypass the negate. Conveniently Upper should be the same thing in this |
14070 | // case. |
14071 | if (NegAcc && PTIdx == 2) |
14072 | PassThru = CGF.Builder.CreateExtractElement(Vec: Upper, Idx: (uint64_t)0); |
14073 | |
14074 | Res = EmitX86ScalarSelect(CGF, Mask: Ops[3], Op0: Res, Op1: PassThru); |
14075 | } |
14076 | return CGF.Builder.CreateInsertElement(Vec: Upper, NewElt: Res, Idx: (uint64_t)0); |
14077 | } |
14078 | |
14079 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, |
14080 | ArrayRef<Value *> Ops) { |
14081 | llvm::Type *Ty = Ops[0]->getType(); |
14082 | // Arguments have a vXi32 type so cast to vXi64. |
14083 | Ty = llvm::FixedVectorType::get(ElementType: CGF.Int64Ty, |
14084 | NumElts: Ty->getPrimitiveSizeInBits() / 64); |
14085 | Value *LHS = CGF.Builder.CreateBitCast(V: Ops[0], DestTy: Ty); |
14086 | Value *RHS = CGF.Builder.CreateBitCast(V: Ops[1], DestTy: Ty); |
14087 | |
14088 | if (IsSigned) { |
14089 | // Shift left then arithmetic shift right. |
14090 | Constant *ShiftAmt = ConstantInt::get(Ty, V: 32); |
14091 | LHS = CGF.Builder.CreateShl(LHS, RHS: ShiftAmt); |
14092 | LHS = CGF.Builder.CreateAShr(LHS, RHS: ShiftAmt); |
14093 | RHS = CGF.Builder.CreateShl(LHS: RHS, RHS: ShiftAmt); |
14094 | RHS = CGF.Builder.CreateAShr(LHS: RHS, RHS: ShiftAmt); |
14095 | } else { |
14096 | // Clear the upper bits. |
14097 | Constant *Mask = ConstantInt::get(Ty, V: 0xffffffff); |
14098 | LHS = CGF.Builder.CreateAnd(LHS, RHS: Mask); |
14099 | RHS = CGF.Builder.CreateAnd(LHS: RHS, RHS: Mask); |
14100 | } |
14101 | |
14102 | return CGF.Builder.CreateMul(LHS, RHS); |
14103 | } |
14104 | |
14105 | // Emit a masked pternlog intrinsic. This only exists because the header has to |
14106 | // use a macro and we aren't able to pass the input argument to a pternlog |
14107 | // builtin and a select builtin without evaluating it twice. |
14108 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, |
14109 | ArrayRef<Value *> Ops) { |
14110 | llvm::Type *Ty = Ops[0]->getType(); |
14111 | |
14112 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); |
14113 | unsigned EltWidth = Ty->getScalarSizeInBits(); |
14114 | Intrinsic::ID IID; |
14115 | if (VecWidth == 128 && EltWidth == 32) |
14116 | IID = Intrinsic::x86_avx512_pternlog_d_128; |
14117 | else if (VecWidth == 256 && EltWidth == 32) |
14118 | IID = Intrinsic::x86_avx512_pternlog_d_256; |
14119 | else if (VecWidth == 512 && EltWidth == 32) |
14120 | IID = Intrinsic::x86_avx512_pternlog_d_512; |
14121 | else if (VecWidth == 128 && EltWidth == 64) |
14122 | IID = Intrinsic::x86_avx512_pternlog_q_128; |
14123 | else if (VecWidth == 256 && EltWidth == 64) |
14124 | IID = Intrinsic::x86_avx512_pternlog_q_256; |
14125 | else if (VecWidth == 512 && EltWidth == 64) |
14126 | IID = Intrinsic::x86_avx512_pternlog_q_512; |
14127 | else |
14128 | llvm_unreachable("Unexpected intrinsic" ); |
14129 | |
14130 | Value *Ternlog = CGF.Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID), |
14131 | Args: Ops.drop_back()); |
14132 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; |
14133 | return EmitX86Select(CGF, Mask: Ops[4], Op0: Ternlog, Op1: PassThru); |
14134 | } |
14135 | |
14136 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, |
14137 | llvm::Type *DstTy) { |
14138 | unsigned NumberOfElements = |
14139 | cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements(); |
14140 | Value *Mask = getMaskVecValue(CGF, Mask: Op, NumElts: NumberOfElements); |
14141 | return CGF.Builder.CreateSExt(V: Mask, DestTy: DstTy, Name: "vpmovm2" ); |
14142 | } |
14143 | |
14144 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { |
14145 | const Expr *CPUExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
14146 | StringRef CPUStr = cast<clang::StringLiteral>(Val: CPUExpr)->getString(); |
14147 | return EmitX86CpuIs(CPUStr); |
14148 | } |
14149 | |
14150 | // Convert F16 halfs to floats. |
14151 | static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF, |
14152 | ArrayRef<Value *> Ops, |
14153 | llvm::Type *DstTy) { |
14154 | assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && |
14155 | "Unknown cvtph2ps intrinsic" ); |
14156 | |
14157 | // If the SAE intrinsic doesn't use default rounding then we can't upgrade. |
14158 | if (Ops.size() == 4 && cast<llvm::ConstantInt>(Val: Ops[3])->getZExtValue() != 4) { |
14159 | Function *F = |
14160 | CGF.CGM.getIntrinsic(IID: Intrinsic::x86_avx512_mask_vcvtph2ps_512); |
14161 | return CGF.Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1], Ops[2], Ops[3]}); |
14162 | } |
14163 | |
14164 | unsigned NumDstElts = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements(); |
14165 | Value *Src = Ops[0]; |
14166 | |
14167 | // Extract the subvector. |
14168 | if (NumDstElts != |
14169 | cast<llvm::FixedVectorType>(Val: Src->getType())->getNumElements()) { |
14170 | assert(NumDstElts == 4 && "Unexpected vector size" ); |
14171 | Src = CGF.Builder.CreateShuffleVector(V: Src, Mask: ArrayRef<int>{0, 1, 2, 3}); |
14172 | } |
14173 | |
14174 | // Bitcast from vXi16 to vXf16. |
14175 | auto *HalfTy = llvm::FixedVectorType::get( |
14176 | ElementType: llvm::Type::getHalfTy(C&: CGF.getLLVMContext()), NumElts: NumDstElts); |
14177 | Src = CGF.Builder.CreateBitCast(V: Src, DestTy: HalfTy); |
14178 | |
14179 | // Perform the fp-extension. |
14180 | Value *Res = CGF.Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "cvtph2ps" ); |
14181 | |
14182 | if (Ops.size() >= 3) |
14183 | Res = EmitX86Select(CGF, Mask: Ops[2], Op0: Res, Op1: Ops[1]); |
14184 | return Res; |
14185 | } |
14186 | |
14187 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { |
14188 | |
14189 | llvm::Type *Int32Ty = Builder.getInt32Ty(); |
14190 | |
14191 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
14192 | // filled in: |
14193 | // unsigned int __cpu_vendor; |
14194 | // unsigned int __cpu_type; |
14195 | // unsigned int __cpu_subtype; |
14196 | // unsigned int __cpu_features[1]; |
14197 | llvm::Type *STy = llvm::StructType::get(elt1: Int32Ty, elts: Int32Ty, elts: Int32Ty, |
14198 | elts: llvm::ArrayType::get(ElementType: Int32Ty, NumElements: 1)); |
14199 | |
14200 | // Grab the global __cpu_model. |
14201 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(Ty: STy, Name: "__cpu_model" ); |
14202 | cast<llvm::GlobalValue>(Val: CpuModel)->setDSOLocal(true); |
14203 | |
14204 | // Calculate the index needed to access the correct field based on the |
14205 | // range. Also adjust the expected value. |
14206 | unsigned Index; |
14207 | unsigned Value; |
14208 | std::tie(args&: Index, args&: Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
14209 | #define X86_VENDOR(ENUM, STRING) \ |
14210 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) |
14211 | #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \ |
14212 | .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
14213 | #define X86_CPU_TYPE(ENUM, STR) \ |
14214 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
14215 | #define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) \ |
14216 | .Case(ALIAS, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
14217 | #define X86_CPU_SUBTYPE(ENUM, STR) \ |
14218 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
14219 | #include "llvm/TargetParser/X86TargetParser.def" |
14220 | .Default(Value: {0, 0}); |
14221 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs" ); |
14222 | |
14223 | // Grab the appropriate field from __cpu_model. |
14224 | llvm::Value *Idxs[] = {ConstantInt::get(Ty: Int32Ty, V: 0), |
14225 | ConstantInt::get(Ty: Int32Ty, V: Index)}; |
14226 | llvm::Value *CpuValue = Builder.CreateInBoundsGEP(Ty: STy, Ptr: CpuModel, IdxList: Idxs); |
14227 | CpuValue = Builder.CreateAlignedLoad(Ty: Int32Ty, Addr: CpuValue, |
14228 | Align: CharUnits::fromQuantity(Quantity: 4)); |
14229 | |
14230 | // Check the value of the field against the requested value. |
14231 | return Builder.CreateICmpEQ(LHS: CpuValue, |
14232 | RHS: llvm::ConstantInt::get(Ty: Int32Ty, V: Value)); |
14233 | } |
14234 | |
14235 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { |
14236 | const Expr *FeatureExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
14237 | StringRef FeatureStr = cast<StringLiteral>(Val: FeatureExpr)->getString(); |
14238 | if (!getContext().getTargetInfo().validateCpuSupports(Name: FeatureStr)) |
14239 | return Builder.getFalse(); |
14240 | return EmitX86CpuSupports(FeatureStrs: FeatureStr); |
14241 | } |
14242 | |
14243 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { |
14244 | return EmitX86CpuSupports(FeatureMask: llvm::X86::getCpuSupportsMask(FeatureStrs)); |
14245 | } |
14246 | |
14247 | llvm::Value * |
14248 | CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) { |
14249 | Value *Result = Builder.getTrue(); |
14250 | if (FeatureMask[0] != 0) { |
14251 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
14252 | // filled in: |
14253 | // unsigned int __cpu_vendor; |
14254 | // unsigned int __cpu_type; |
14255 | // unsigned int __cpu_subtype; |
14256 | // unsigned int __cpu_features[1]; |
14257 | llvm::Type *STy = llvm::StructType::get(elt1: Int32Ty, elts: Int32Ty, elts: Int32Ty, |
14258 | elts: llvm::ArrayType::get(ElementType: Int32Ty, NumElements: 1)); |
14259 | |
14260 | // Grab the global __cpu_model. |
14261 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(Ty: STy, Name: "__cpu_model" ); |
14262 | cast<llvm::GlobalValue>(Val: CpuModel)->setDSOLocal(true); |
14263 | |
14264 | // Grab the first (0th) element from the field __cpu_features off of the |
14265 | // global in the struct STy. |
14266 | Value *Idxs[] = {Builder.getInt32(C: 0), Builder.getInt32(C: 3), |
14267 | Builder.getInt32(C: 0)}; |
14268 | Value *CpuFeatures = Builder.CreateInBoundsGEP(Ty: STy, Ptr: CpuModel, IdxList: Idxs); |
14269 | Value *Features = Builder.CreateAlignedLoad(Ty: Int32Ty, Addr: CpuFeatures, |
14270 | Align: CharUnits::fromQuantity(Quantity: 4)); |
14271 | |
14272 | // Check the value of the bit corresponding to the feature requested. |
14273 | Value *Mask = Builder.getInt32(C: FeatureMask[0]); |
14274 | Value *Bitset = Builder.CreateAnd(LHS: Features, RHS: Mask); |
14275 | Value *Cmp = Builder.CreateICmpEQ(LHS: Bitset, RHS: Mask); |
14276 | Result = Builder.CreateAnd(LHS: Result, RHS: Cmp); |
14277 | } |
14278 | |
14279 | llvm::Type *ATy = llvm::ArrayType::get(ElementType: Int32Ty, NumElements: 3); |
14280 | llvm::Constant *CpuFeatures2 = |
14281 | CGM.CreateRuntimeVariable(Ty: ATy, Name: "__cpu_features2" ); |
14282 | cast<llvm::GlobalValue>(Val: CpuFeatures2)->setDSOLocal(true); |
14283 | for (int i = 1; i != 4; ++i) { |
14284 | const uint32_t M = FeatureMask[i]; |
14285 | if (!M) |
14286 | continue; |
14287 | Value *Idxs[] = {Builder.getInt32(C: 0), Builder.getInt32(C: i - 1)}; |
14288 | Value *Features = Builder.CreateAlignedLoad( |
14289 | Ty: Int32Ty, Addr: Builder.CreateInBoundsGEP(Ty: ATy, Ptr: CpuFeatures2, IdxList: Idxs), |
14290 | Align: CharUnits::fromQuantity(Quantity: 4)); |
14291 | // Check the value of the bit corresponding to the feature requested. |
14292 | Value *Mask = Builder.getInt32(C: M); |
14293 | Value *Bitset = Builder.CreateAnd(LHS: Features, RHS: Mask); |
14294 | Value *Cmp = Builder.CreateICmpEQ(LHS: Bitset, RHS: Mask); |
14295 | Result = Builder.CreateAnd(LHS: Result, RHS: Cmp); |
14296 | } |
14297 | |
14298 | return Result; |
14299 | } |
14300 | |
14301 | Value *CodeGenFunction::EmitAArch64CpuInit() { |
14302 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
14303 | llvm::FunctionCallee Func = |
14304 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "__init_cpu_features_resolver" ); |
14305 | cast<llvm::GlobalValue>(Val: Func.getCallee())->setDSOLocal(true); |
14306 | cast<llvm::GlobalValue>(Val: Func.getCallee()) |
14307 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
14308 | return Builder.CreateCall(Callee: Func); |
14309 | } |
14310 | |
14311 | Value *CodeGenFunction::EmitX86CpuInit() { |
14312 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, |
14313 | /*Variadic*/ isVarArg: false); |
14314 | llvm::FunctionCallee Func = |
14315 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "__cpu_indicator_init" ); |
14316 | cast<llvm::GlobalValue>(Val: Func.getCallee())->setDSOLocal(true); |
14317 | cast<llvm::GlobalValue>(Val: Func.getCallee()) |
14318 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
14319 | return Builder.CreateCall(Callee: Func); |
14320 | } |
14321 | |
14322 | Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) { |
14323 | const Expr *ArgExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
14324 | StringRef ArgStr = cast<StringLiteral>(Val: ArgExpr)->getString(); |
14325 | llvm::SmallVector<StringRef, 8> Features; |
14326 | ArgStr.split(A&: Features, Separator: "+" ); |
14327 | for (auto &Feature : Features) { |
14328 | Feature = Feature.trim(); |
14329 | if (!llvm::AArch64::parseFMVExtension(Extension: Feature)) |
14330 | return Builder.getFalse(); |
14331 | if (Feature != "default" ) |
14332 | Features.push_back(Elt: Feature); |
14333 | } |
14334 | return EmitAArch64CpuSupports(FeatureStrs: Features); |
14335 | } |
14336 | |
14337 | llvm::Value * |
14338 | CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) { |
14339 | uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeatureStrs: FeaturesStrs); |
14340 | Value *Result = Builder.getTrue(); |
14341 | if (FeaturesMask != 0) { |
14342 | // Get features from structure in runtime library |
14343 | // struct { |
14344 | // unsigned long long features; |
14345 | // } __aarch64_cpu_features; |
14346 | llvm::Type *STy = llvm::StructType::get(elt1: Int64Ty); |
14347 | llvm::Constant *AArch64CPUFeatures = |
14348 | CGM.CreateRuntimeVariable(Ty: STy, Name: "__aarch64_cpu_features" ); |
14349 | cast<llvm::GlobalValue>(Val: AArch64CPUFeatures)->setDSOLocal(true); |
14350 | llvm::Value *CpuFeatures = Builder.CreateGEP( |
14351 | Ty: STy, Ptr: AArch64CPUFeatures, |
14352 | IdxList: {ConstantInt::get(Ty: Int32Ty, V: 0), ConstantInt::get(Ty: Int32Ty, V: 0)}); |
14353 | Value *Features = Builder.CreateAlignedLoad(Ty: Int64Ty, Addr: CpuFeatures, |
14354 | Align: CharUnits::fromQuantity(Quantity: 8)); |
14355 | Value *Mask = Builder.getInt64(C: FeaturesMask); |
14356 | Value *Bitset = Builder.CreateAnd(LHS: Features, RHS: Mask); |
14357 | Value *Cmp = Builder.CreateICmpEQ(LHS: Bitset, RHS: Mask); |
14358 | Result = Builder.CreateAnd(LHS: Result, RHS: Cmp); |
14359 | } |
14360 | return Result; |
14361 | } |
14362 | |
14363 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, |
14364 | const CallExpr *E) { |
14365 | if (BuiltinID == Builtin::BI__builtin_cpu_is) |
14366 | return EmitX86CpuIs(E); |
14367 | if (BuiltinID == Builtin::BI__builtin_cpu_supports) |
14368 | return EmitX86CpuSupports(E); |
14369 | if (BuiltinID == Builtin::BI__builtin_cpu_init) |
14370 | return EmitX86CpuInit(); |
14371 | |
14372 | // Handle MSVC intrinsics before argument evaluation to prevent double |
14373 | // evaluation. |
14374 | if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) |
14375 | return EmitMSVCBuiltinExpr(BuiltinID: *MsvcIntId, E); |
14376 | |
14377 | SmallVector<Value*, 4> Ops; |
14378 | bool IsMaskFCmp = false; |
14379 | bool IsConjFMA = false; |
14380 | |
14381 | // Find out if any arguments are required to be integer constant expressions. |
14382 | unsigned ICEArguments = 0; |
14383 | ASTContext::GetBuiltinTypeError Error; |
14384 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
14385 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
14386 | |
14387 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
14388 | Ops.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E)); |
14389 | } |
14390 | |
14391 | // These exist so that the builtin that takes an immediate can be bounds |
14392 | // checked by clang to avoid passing bad immediates to the backend. Since |
14393 | // AVX has a larger immediate than SSE we would need separate builtins to |
14394 | // do the different bounds checking. Rather than create a clang specific |
14395 | // SSE only builtin, this implements eight separate builtins to match gcc |
14396 | // implementation. |
14397 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { |
14398 | Ops.push_back(Elt: llvm::ConstantInt::get(Ty: Int8Ty, V: Imm)); |
14399 | llvm::Function *F = CGM.getIntrinsic(IID: ID); |
14400 | return Builder.CreateCall(Callee: F, Args: Ops); |
14401 | }; |
14402 | |
14403 | // For the vector forms of FP comparisons, translate the builtins directly to |
14404 | // IR. |
14405 | // TODO: The builtins could be removed if the SSE header files used vector |
14406 | // extension comparisons directly (vector ordered/unordered may need |
14407 | // additional support via __builtin_isnan()). |
14408 | auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred, |
14409 | bool IsSignaling) { |
14410 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14411 | Value *Cmp; |
14412 | if (IsSignaling) |
14413 | Cmp = Builder.CreateFCmpS(P: Pred, LHS: Ops[0], RHS: Ops[1]); |
14414 | else |
14415 | Cmp = Builder.CreateFCmp(P: Pred, LHS: Ops[0], RHS: Ops[1]); |
14416 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Val: Ops[0]->getType()); |
14417 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(VTy: FPVecTy); |
14418 | Value *Sext = Builder.CreateSExt(V: Cmp, DestTy: IntVecTy); |
14419 | return Builder.CreateBitCast(V: Sext, DestTy: FPVecTy); |
14420 | }; |
14421 | |
14422 | switch (BuiltinID) { |
14423 | default: return nullptr; |
14424 | case X86::BI_mm_prefetch: { |
14425 | Value *Address = Ops[0]; |
14426 | ConstantInt *C = cast<ConstantInt>(Val: Ops[1]); |
14427 | Value *RW = ConstantInt::get(Ty: Int32Ty, V: (C->getZExtValue() >> 2) & 0x1); |
14428 | Value *Locality = ConstantInt::get(Ty: Int32Ty, V: C->getZExtValue() & 0x3); |
14429 | Value *Data = ConstantInt::get(Ty: Int32Ty, V: 1); |
14430 | Function *F = CGM.getIntrinsic(IID: Intrinsic::prefetch, Tys: Address->getType()); |
14431 | return Builder.CreateCall(Callee: F, Args: {Address, RW, Locality, Data}); |
14432 | } |
14433 | case X86::BI_mm_clflush: { |
14434 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse2_clflush), |
14435 | Args: Ops[0]); |
14436 | } |
14437 | case X86::BI_mm_lfence: { |
14438 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse2_lfence)); |
14439 | } |
14440 | case X86::BI_mm_mfence: { |
14441 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse2_mfence)); |
14442 | } |
14443 | case X86::BI_mm_sfence: { |
14444 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse_sfence)); |
14445 | } |
14446 | case X86::BI_mm_pause: { |
14447 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse2_pause)); |
14448 | } |
14449 | case X86::BI__rdtsc: { |
14450 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_rdtsc)); |
14451 | } |
14452 | case X86::BI__builtin_ia32_rdtscp: { |
14453 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_rdtscp)); |
14454 | Builder.CreateDefaultAlignedStore(Val: Builder.CreateExtractValue(Agg: Call, Idxs: 1), |
14455 | Addr: Ops[0]); |
14456 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
14457 | } |
14458 | case X86::BI__builtin_ia32_lzcnt_u16: |
14459 | case X86::BI__builtin_ia32_lzcnt_u32: |
14460 | case X86::BI__builtin_ia32_lzcnt_u64: { |
14461 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Ops[0]->getType()); |
14462 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)}); |
14463 | } |
14464 | case X86::BI__builtin_ia32_tzcnt_u16: |
14465 | case X86::BI__builtin_ia32_tzcnt_u32: |
14466 | case X86::BI__builtin_ia32_tzcnt_u64: { |
14467 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: Ops[0]->getType()); |
14468 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)}); |
14469 | } |
14470 | case X86::BI__builtin_ia32_undef128: |
14471 | case X86::BI__builtin_ia32_undef256: |
14472 | case X86::BI__builtin_ia32_undef512: |
14473 | // The x86 definition of "undef" is not the same as the LLVM definition |
14474 | // (PR32176). We leave optimizing away an unnecessary zero constant to the |
14475 | // IR optimizer and backend. |
14476 | // TODO: If we had a "freeze" IR instruction to generate a fixed undef |
14477 | // value, we should use that here instead of a zero. |
14478 | return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType())); |
14479 | case X86::BI__builtin_ia32_vec_init_v8qi: |
14480 | case X86::BI__builtin_ia32_vec_init_v4hi: |
14481 | case X86::BI__builtin_ia32_vec_init_v2si: |
14482 | return Builder.CreateBitCast(V: BuildVector(Ops), |
14483 | DestTy: llvm::Type::getX86_MMXTy(C&: getLLVMContext())); |
14484 | case X86::BI__builtin_ia32_vec_ext_v2si: |
14485 | case X86::BI__builtin_ia32_vec_ext_v16qi: |
14486 | case X86::BI__builtin_ia32_vec_ext_v8hi: |
14487 | case X86::BI__builtin_ia32_vec_ext_v4si: |
14488 | case X86::BI__builtin_ia32_vec_ext_v4sf: |
14489 | case X86::BI__builtin_ia32_vec_ext_v2di: |
14490 | case X86::BI__builtin_ia32_vec_ext_v32qi: |
14491 | case X86::BI__builtin_ia32_vec_ext_v16hi: |
14492 | case X86::BI__builtin_ia32_vec_ext_v8si: |
14493 | case X86::BI__builtin_ia32_vec_ext_v4di: { |
14494 | unsigned NumElts = |
14495 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
14496 | uint64_t Index = cast<ConstantInt>(Val: Ops[1])->getZExtValue(); |
14497 | Index &= NumElts - 1; |
14498 | // These builtins exist so we can ensure the index is an ICE and in range. |
14499 | // Otherwise we could just do this in the header file. |
14500 | return Builder.CreateExtractElement(Vec: Ops[0], Idx: Index); |
14501 | } |
14502 | case X86::BI__builtin_ia32_vec_set_v16qi: |
14503 | case X86::BI__builtin_ia32_vec_set_v8hi: |
14504 | case X86::BI__builtin_ia32_vec_set_v4si: |
14505 | case X86::BI__builtin_ia32_vec_set_v2di: |
14506 | case X86::BI__builtin_ia32_vec_set_v32qi: |
14507 | case X86::BI__builtin_ia32_vec_set_v16hi: |
14508 | case X86::BI__builtin_ia32_vec_set_v8si: |
14509 | case X86::BI__builtin_ia32_vec_set_v4di: { |
14510 | unsigned NumElts = |
14511 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
14512 | unsigned Index = cast<ConstantInt>(Val: Ops[2])->getZExtValue(); |
14513 | Index &= NumElts - 1; |
14514 | // These builtins exist so we can ensure the index is an ICE and in range. |
14515 | // Otherwise we could just do this in the header file. |
14516 | return Builder.CreateInsertElement(Vec: Ops[0], NewElt: Ops[1], Idx: Index); |
14517 | } |
14518 | case X86::BI_mm_setcsr: |
14519 | case X86::BI__builtin_ia32_ldmxcsr: { |
14520 | RawAddress Tmp = CreateMemTemp(T: E->getArg(Arg: 0)->getType()); |
14521 | Builder.CreateStore(Val: Ops[0], Addr: Tmp); |
14522 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse_ldmxcsr), |
14523 | Args: Tmp.getPointer()); |
14524 | } |
14525 | case X86::BI_mm_getcsr: |
14526 | case X86::BI__builtin_ia32_stmxcsr: { |
14527 | RawAddress Tmp = CreateMemTemp(T: E->getType()); |
14528 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_sse_stmxcsr), |
14529 | Args: Tmp.getPointer()); |
14530 | return Builder.CreateLoad(Addr: Tmp, Name: "stmxcsr" ); |
14531 | } |
14532 | case X86::BI__builtin_ia32_xsave: |
14533 | case X86::BI__builtin_ia32_xsave64: |
14534 | case X86::BI__builtin_ia32_xrstor: |
14535 | case X86::BI__builtin_ia32_xrstor64: |
14536 | case X86::BI__builtin_ia32_xsaveopt: |
14537 | case X86::BI__builtin_ia32_xsaveopt64: |
14538 | case X86::BI__builtin_ia32_xrstors: |
14539 | case X86::BI__builtin_ia32_xrstors64: |
14540 | case X86::BI__builtin_ia32_xsavec: |
14541 | case X86::BI__builtin_ia32_xsavec64: |
14542 | case X86::BI__builtin_ia32_xsaves: |
14543 | case X86::BI__builtin_ia32_xsaves64: |
14544 | case X86::BI__builtin_ia32_xsetbv: |
14545 | case X86::BI_xsetbv: { |
14546 | Intrinsic::ID ID; |
14547 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ |
14548 | case X86::BI__builtin_ia32_##NAME: \ |
14549 | ID = Intrinsic::x86_##NAME; \ |
14550 | break |
14551 | switch (BuiltinID) { |
14552 | default: llvm_unreachable("Unsupported intrinsic!" ); |
14553 | INTRINSIC_X86_XSAVE_ID(xsave); |
14554 | INTRINSIC_X86_XSAVE_ID(xsave64); |
14555 | INTRINSIC_X86_XSAVE_ID(xrstor); |
14556 | INTRINSIC_X86_XSAVE_ID(xrstor64); |
14557 | INTRINSIC_X86_XSAVE_ID(xsaveopt); |
14558 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); |
14559 | INTRINSIC_X86_XSAVE_ID(xrstors); |
14560 | INTRINSIC_X86_XSAVE_ID(xrstors64); |
14561 | INTRINSIC_X86_XSAVE_ID(xsavec); |
14562 | INTRINSIC_X86_XSAVE_ID(xsavec64); |
14563 | INTRINSIC_X86_XSAVE_ID(xsaves); |
14564 | INTRINSIC_X86_XSAVE_ID(xsaves64); |
14565 | INTRINSIC_X86_XSAVE_ID(xsetbv); |
14566 | case X86::BI_xsetbv: |
14567 | ID = Intrinsic::x86_xsetbv; |
14568 | break; |
14569 | } |
14570 | #undef INTRINSIC_X86_XSAVE_ID |
14571 | Value *Mhi = Builder.CreateTrunc( |
14572 | V: Builder.CreateLShr(LHS: Ops[1], RHS: ConstantInt::get(Ty: Int64Ty, V: 32)), DestTy: Int32Ty); |
14573 | Value *Mlo = Builder.CreateTrunc(V: Ops[1], DestTy: Int32Ty); |
14574 | Ops[1] = Mhi; |
14575 | Ops.push_back(Elt: Mlo); |
14576 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops); |
14577 | } |
14578 | case X86::BI__builtin_ia32_xgetbv: |
14579 | case X86::BI_xgetbv: |
14580 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::x86_xgetbv), Args: Ops); |
14581 | case X86::BI__builtin_ia32_storedqudi128_mask: |
14582 | case X86::BI__builtin_ia32_storedqusi128_mask: |
14583 | case X86::BI__builtin_ia32_storedquhi128_mask: |
14584 | case X86::BI__builtin_ia32_storedquqi128_mask: |
14585 | case X86::BI__builtin_ia32_storeupd128_mask: |
14586 | case X86::BI__builtin_ia32_storeups128_mask: |
14587 | case X86::BI__builtin_ia32_storedqudi256_mask: |
14588 | case X86::BI__builtin_ia32_storedqusi256_mask: |
14589 | case X86::BI__builtin_ia32_storedquhi256_mask: |
14590 | case X86::BI__builtin_ia32_storedquqi256_mask: |
14591 | case X86::BI__builtin_ia32_storeupd256_mask: |
14592 | case X86::BI__builtin_ia32_storeups256_mask: |
14593 | case X86::BI__builtin_ia32_storedqudi512_mask: |
14594 | case X86::BI__builtin_ia32_storedqusi512_mask: |
14595 | case X86::BI__builtin_ia32_storedquhi512_mask: |
14596 | case X86::BI__builtin_ia32_storedquqi512_mask: |
14597 | case X86::BI__builtin_ia32_storeupd512_mask: |
14598 | case X86::BI__builtin_ia32_storeups512_mask: |
14599 | return EmitX86MaskedStore(CGF&: *this, Ops, Alignment: Align(1)); |
14600 | |
14601 | case X86::BI__builtin_ia32_storesh128_mask: |
14602 | case X86::BI__builtin_ia32_storess128_mask: |
14603 | case X86::BI__builtin_ia32_storesd128_mask: |
14604 | return EmitX86MaskedStore(CGF&: *this, Ops, Alignment: Align(1)); |
14605 | |
14606 | case X86::BI__builtin_ia32_vpopcntb_128: |
14607 | case X86::BI__builtin_ia32_vpopcntd_128: |
14608 | case X86::BI__builtin_ia32_vpopcntq_128: |
14609 | case X86::BI__builtin_ia32_vpopcntw_128: |
14610 | case X86::BI__builtin_ia32_vpopcntb_256: |
14611 | case X86::BI__builtin_ia32_vpopcntd_256: |
14612 | case X86::BI__builtin_ia32_vpopcntq_256: |
14613 | case X86::BI__builtin_ia32_vpopcntw_256: |
14614 | case X86::BI__builtin_ia32_vpopcntb_512: |
14615 | case X86::BI__builtin_ia32_vpopcntd_512: |
14616 | case X86::BI__builtin_ia32_vpopcntq_512: |
14617 | case X86::BI__builtin_ia32_vpopcntw_512: { |
14618 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
14619 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ResultType); |
14620 | return Builder.CreateCall(Callee: F, Args: Ops); |
14621 | } |
14622 | case X86::BI__builtin_ia32_cvtmask2b128: |
14623 | case X86::BI__builtin_ia32_cvtmask2b256: |
14624 | case X86::BI__builtin_ia32_cvtmask2b512: |
14625 | case X86::BI__builtin_ia32_cvtmask2w128: |
14626 | case X86::BI__builtin_ia32_cvtmask2w256: |
14627 | case X86::BI__builtin_ia32_cvtmask2w512: |
14628 | case X86::BI__builtin_ia32_cvtmask2d128: |
14629 | case X86::BI__builtin_ia32_cvtmask2d256: |
14630 | case X86::BI__builtin_ia32_cvtmask2d512: |
14631 | case X86::BI__builtin_ia32_cvtmask2q128: |
14632 | case X86::BI__builtin_ia32_cvtmask2q256: |
14633 | case X86::BI__builtin_ia32_cvtmask2q512: |
14634 | return EmitX86SExtMask(CGF&: *this, Op: Ops[0], DstTy: ConvertType(T: E->getType())); |
14635 | |
14636 | case X86::BI__builtin_ia32_cvtb2mask128: |
14637 | case X86::BI__builtin_ia32_cvtb2mask256: |
14638 | case X86::BI__builtin_ia32_cvtb2mask512: |
14639 | case X86::BI__builtin_ia32_cvtw2mask128: |
14640 | case X86::BI__builtin_ia32_cvtw2mask256: |
14641 | case X86::BI__builtin_ia32_cvtw2mask512: |
14642 | case X86::BI__builtin_ia32_cvtd2mask128: |
14643 | case X86::BI__builtin_ia32_cvtd2mask256: |
14644 | case X86::BI__builtin_ia32_cvtd2mask512: |
14645 | case X86::BI__builtin_ia32_cvtq2mask128: |
14646 | case X86::BI__builtin_ia32_cvtq2mask256: |
14647 | case X86::BI__builtin_ia32_cvtq2mask512: |
14648 | return EmitX86ConvertToMask(CGF&: *this, In: Ops[0]); |
14649 | |
14650 | case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
14651 | case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
14652 | case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
14653 | case X86::BI__builtin_ia32_vcvtw2ph512_mask: |
14654 | case X86::BI__builtin_ia32_vcvtdq2ph512_mask: |
14655 | case X86::BI__builtin_ia32_vcvtqq2ph512_mask: |
14656 | return EmitX86ConvertIntToFp(CGF&: *this, E, Ops, /*IsSigned*/ true); |
14657 | case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
14658 | case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
14659 | case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
14660 | case X86::BI__builtin_ia32_vcvtuw2ph512_mask: |
14661 | case X86::BI__builtin_ia32_vcvtudq2ph512_mask: |
14662 | case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: |
14663 | return EmitX86ConvertIntToFp(CGF&: *this, E, Ops, /*IsSigned*/ false); |
14664 | |
14665 | case X86::BI__builtin_ia32_vfmaddss3: |
14666 | case X86::BI__builtin_ia32_vfmaddsd3: |
14667 | case X86::BI__builtin_ia32_vfmaddsh3_mask: |
14668 | case X86::BI__builtin_ia32_vfmaddss3_mask: |
14669 | case X86::BI__builtin_ia32_vfmaddsd3_mask: |
14670 | return EmitScalarFMAExpr(CGF&: *this, E, Ops, Upper: Ops[0]); |
14671 | case X86::BI__builtin_ia32_vfmaddss: |
14672 | case X86::BI__builtin_ia32_vfmaddsd: |
14673 | return EmitScalarFMAExpr(CGF&: *this, E, Ops, |
14674 | Upper: Constant::getNullValue(Ty: Ops[0]->getType())); |
14675 | case X86::BI__builtin_ia32_vfmaddsh3_maskz: |
14676 | case X86::BI__builtin_ia32_vfmaddss3_maskz: |
14677 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
14678 | return EmitScalarFMAExpr(CGF&: *this, E, Ops, Upper: Ops[0], /*ZeroMask*/ true); |
14679 | case X86::BI__builtin_ia32_vfmaddsh3_mask3: |
14680 | case X86::BI__builtin_ia32_vfmaddss3_mask3: |
14681 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
14682 | return EmitScalarFMAExpr(CGF&: *this, E, Ops, Upper: Ops[2], /*ZeroMask*/ false, PTIdx: 2); |
14683 | case X86::BI__builtin_ia32_vfmsubsh3_mask3: |
14684 | case X86::BI__builtin_ia32_vfmsubss3_mask3: |
14685 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: |
14686 | return EmitScalarFMAExpr(CGF&: *this, E, Ops, Upper: Ops[2], /*ZeroMask*/ false, PTIdx: 2, |
14687 | /*NegAcc*/ true); |
14688 | case X86::BI__builtin_ia32_vfmaddph: |
14689 | case X86::BI__builtin_ia32_vfmaddps: |
14690 | case X86::BI__builtin_ia32_vfmaddpd: |
14691 | case X86::BI__builtin_ia32_vfmaddph256: |
14692 | case X86::BI__builtin_ia32_vfmaddps256: |
14693 | case X86::BI__builtin_ia32_vfmaddpd256: |
14694 | case X86::BI__builtin_ia32_vfmaddph512_mask: |
14695 | case X86::BI__builtin_ia32_vfmaddph512_maskz: |
14696 | case X86::BI__builtin_ia32_vfmaddph512_mask3: |
14697 | case X86::BI__builtin_ia32_vfmaddps512_mask: |
14698 | case X86::BI__builtin_ia32_vfmaddps512_maskz: |
14699 | case X86::BI__builtin_ia32_vfmaddps512_mask3: |
14700 | case X86::BI__builtin_ia32_vfmsubps512_mask3: |
14701 | case X86::BI__builtin_ia32_vfmaddpd512_mask: |
14702 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
14703 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
14704 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
14705 | case X86::BI__builtin_ia32_vfmsubph512_mask3: |
14706 | return EmitX86FMAExpr(CGF&: *this, E, Ops, BuiltinID, /*IsAddSub*/ false); |
14707 | case X86::BI__builtin_ia32_vfmaddsubph512_mask: |
14708 | case X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
14709 | case X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
14710 | case X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
14711 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
14712 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
14713 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
14714 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
14715 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
14716 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
14717 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
14718 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
14719 | return EmitX86FMAExpr(CGF&: *this, E, Ops, BuiltinID, /*IsAddSub*/ true); |
14720 | |
14721 | case X86::BI__builtin_ia32_movdqa32store128_mask: |
14722 | case X86::BI__builtin_ia32_movdqa64store128_mask: |
14723 | case X86::BI__builtin_ia32_storeaps128_mask: |
14724 | case X86::BI__builtin_ia32_storeapd128_mask: |
14725 | case X86::BI__builtin_ia32_movdqa32store256_mask: |
14726 | case X86::BI__builtin_ia32_movdqa64store256_mask: |
14727 | case X86::BI__builtin_ia32_storeaps256_mask: |
14728 | case X86::BI__builtin_ia32_storeapd256_mask: |
14729 | case X86::BI__builtin_ia32_movdqa32store512_mask: |
14730 | case X86::BI__builtin_ia32_movdqa64store512_mask: |
14731 | case X86::BI__builtin_ia32_storeaps512_mask: |
14732 | case X86::BI__builtin_ia32_storeapd512_mask: |
14733 | return EmitX86MaskedStore( |
14734 | CGF&: *this, Ops, |
14735 | Alignment: getContext().getTypeAlignInChars(T: E->getArg(Arg: 1)->getType()).getAsAlign()); |
14736 | |
14737 | case X86::BI__builtin_ia32_loadups128_mask: |
14738 | case X86::BI__builtin_ia32_loadups256_mask: |
14739 | case X86::BI__builtin_ia32_loadups512_mask: |
14740 | case X86::BI__builtin_ia32_loadupd128_mask: |
14741 | case X86::BI__builtin_ia32_loadupd256_mask: |
14742 | case X86::BI__builtin_ia32_loadupd512_mask: |
14743 | case X86::BI__builtin_ia32_loaddquqi128_mask: |
14744 | case X86::BI__builtin_ia32_loaddquqi256_mask: |
14745 | case X86::BI__builtin_ia32_loaddquqi512_mask: |
14746 | case X86::BI__builtin_ia32_loaddquhi128_mask: |
14747 | case X86::BI__builtin_ia32_loaddquhi256_mask: |
14748 | case X86::BI__builtin_ia32_loaddquhi512_mask: |
14749 | case X86::BI__builtin_ia32_loaddqusi128_mask: |
14750 | case X86::BI__builtin_ia32_loaddqusi256_mask: |
14751 | case X86::BI__builtin_ia32_loaddqusi512_mask: |
14752 | case X86::BI__builtin_ia32_loaddqudi128_mask: |
14753 | case X86::BI__builtin_ia32_loaddqudi256_mask: |
14754 | case X86::BI__builtin_ia32_loaddqudi512_mask: |
14755 | return EmitX86MaskedLoad(CGF&: *this, Ops, Alignment: Align(1)); |
14756 | |
14757 | case X86::BI__builtin_ia32_loadsh128_mask: |
14758 | case X86::BI__builtin_ia32_loadss128_mask: |
14759 | case X86::BI__builtin_ia32_loadsd128_mask: |
14760 | return EmitX86MaskedLoad(CGF&: *this, Ops, Alignment: Align(1)); |
14761 | |
14762 | case X86::BI__builtin_ia32_loadaps128_mask: |
14763 | case X86::BI__builtin_ia32_loadaps256_mask: |
14764 | case X86::BI__builtin_ia32_loadaps512_mask: |
14765 | case X86::BI__builtin_ia32_loadapd128_mask: |
14766 | case X86::BI__builtin_ia32_loadapd256_mask: |
14767 | case X86::BI__builtin_ia32_loadapd512_mask: |
14768 | case X86::BI__builtin_ia32_movdqa32load128_mask: |
14769 | case X86::BI__builtin_ia32_movdqa32load256_mask: |
14770 | case X86::BI__builtin_ia32_movdqa32load512_mask: |
14771 | case X86::BI__builtin_ia32_movdqa64load128_mask: |
14772 | case X86::BI__builtin_ia32_movdqa64load256_mask: |
14773 | case X86::BI__builtin_ia32_movdqa64load512_mask: |
14774 | return EmitX86MaskedLoad( |
14775 | CGF&: *this, Ops, |
14776 | Alignment: getContext().getTypeAlignInChars(T: E->getArg(Arg: 1)->getType()).getAsAlign()); |
14777 | |
14778 | case X86::BI__builtin_ia32_expandloaddf128_mask: |
14779 | case X86::BI__builtin_ia32_expandloaddf256_mask: |
14780 | case X86::BI__builtin_ia32_expandloaddf512_mask: |
14781 | case X86::BI__builtin_ia32_expandloadsf128_mask: |
14782 | case X86::BI__builtin_ia32_expandloadsf256_mask: |
14783 | case X86::BI__builtin_ia32_expandloadsf512_mask: |
14784 | case X86::BI__builtin_ia32_expandloaddi128_mask: |
14785 | case X86::BI__builtin_ia32_expandloaddi256_mask: |
14786 | case X86::BI__builtin_ia32_expandloaddi512_mask: |
14787 | case X86::BI__builtin_ia32_expandloadsi128_mask: |
14788 | case X86::BI__builtin_ia32_expandloadsi256_mask: |
14789 | case X86::BI__builtin_ia32_expandloadsi512_mask: |
14790 | case X86::BI__builtin_ia32_expandloadhi128_mask: |
14791 | case X86::BI__builtin_ia32_expandloadhi256_mask: |
14792 | case X86::BI__builtin_ia32_expandloadhi512_mask: |
14793 | case X86::BI__builtin_ia32_expandloadqi128_mask: |
14794 | case X86::BI__builtin_ia32_expandloadqi256_mask: |
14795 | case X86::BI__builtin_ia32_expandloadqi512_mask: |
14796 | return EmitX86ExpandLoad(CGF&: *this, Ops); |
14797 | |
14798 | case X86::BI__builtin_ia32_compressstoredf128_mask: |
14799 | case X86::BI__builtin_ia32_compressstoredf256_mask: |
14800 | case X86::BI__builtin_ia32_compressstoredf512_mask: |
14801 | case X86::BI__builtin_ia32_compressstoresf128_mask: |
14802 | case X86::BI__builtin_ia32_compressstoresf256_mask: |
14803 | case X86::BI__builtin_ia32_compressstoresf512_mask: |
14804 | case X86::BI__builtin_ia32_compressstoredi128_mask: |
14805 | case X86::BI__builtin_ia32_compressstoredi256_mask: |
14806 | case X86::BI__builtin_ia32_compressstoredi512_mask: |
14807 | case X86::BI__builtin_ia32_compressstoresi128_mask: |
14808 | case X86::BI__builtin_ia32_compressstoresi256_mask: |
14809 | case X86::BI__builtin_ia32_compressstoresi512_mask: |
14810 | case X86::BI__builtin_ia32_compressstorehi128_mask: |
14811 | case X86::BI__builtin_ia32_compressstorehi256_mask: |
14812 | case X86::BI__builtin_ia32_compressstorehi512_mask: |
14813 | case X86::BI__builtin_ia32_compressstoreqi128_mask: |
14814 | case X86::BI__builtin_ia32_compressstoreqi256_mask: |
14815 | case X86::BI__builtin_ia32_compressstoreqi512_mask: |
14816 | return EmitX86CompressStore(CGF&: *this, Ops); |
14817 | |
14818 | case X86::BI__builtin_ia32_expanddf128_mask: |
14819 | case X86::BI__builtin_ia32_expanddf256_mask: |
14820 | case X86::BI__builtin_ia32_expanddf512_mask: |
14821 | case X86::BI__builtin_ia32_expandsf128_mask: |
14822 | case X86::BI__builtin_ia32_expandsf256_mask: |
14823 | case X86::BI__builtin_ia32_expandsf512_mask: |
14824 | case X86::BI__builtin_ia32_expanddi128_mask: |
14825 | case X86::BI__builtin_ia32_expanddi256_mask: |
14826 | case X86::BI__builtin_ia32_expanddi512_mask: |
14827 | case X86::BI__builtin_ia32_expandsi128_mask: |
14828 | case X86::BI__builtin_ia32_expandsi256_mask: |
14829 | case X86::BI__builtin_ia32_expandsi512_mask: |
14830 | case X86::BI__builtin_ia32_expandhi128_mask: |
14831 | case X86::BI__builtin_ia32_expandhi256_mask: |
14832 | case X86::BI__builtin_ia32_expandhi512_mask: |
14833 | case X86::BI__builtin_ia32_expandqi128_mask: |
14834 | case X86::BI__builtin_ia32_expandqi256_mask: |
14835 | case X86::BI__builtin_ia32_expandqi512_mask: |
14836 | return EmitX86CompressExpand(CGF&: *this, Ops, /*IsCompress*/false); |
14837 | |
14838 | case X86::BI__builtin_ia32_compressdf128_mask: |
14839 | case X86::BI__builtin_ia32_compressdf256_mask: |
14840 | case X86::BI__builtin_ia32_compressdf512_mask: |
14841 | case X86::BI__builtin_ia32_compresssf128_mask: |
14842 | case X86::BI__builtin_ia32_compresssf256_mask: |
14843 | case X86::BI__builtin_ia32_compresssf512_mask: |
14844 | case X86::BI__builtin_ia32_compressdi128_mask: |
14845 | case X86::BI__builtin_ia32_compressdi256_mask: |
14846 | case X86::BI__builtin_ia32_compressdi512_mask: |
14847 | case X86::BI__builtin_ia32_compresssi128_mask: |
14848 | case X86::BI__builtin_ia32_compresssi256_mask: |
14849 | case X86::BI__builtin_ia32_compresssi512_mask: |
14850 | case X86::BI__builtin_ia32_compresshi128_mask: |
14851 | case X86::BI__builtin_ia32_compresshi256_mask: |
14852 | case X86::BI__builtin_ia32_compresshi512_mask: |
14853 | case X86::BI__builtin_ia32_compressqi128_mask: |
14854 | case X86::BI__builtin_ia32_compressqi256_mask: |
14855 | case X86::BI__builtin_ia32_compressqi512_mask: |
14856 | return EmitX86CompressExpand(CGF&: *this, Ops, /*IsCompress*/true); |
14857 | |
14858 | case X86::BI__builtin_ia32_gather3div2df: |
14859 | case X86::BI__builtin_ia32_gather3div2di: |
14860 | case X86::BI__builtin_ia32_gather3div4df: |
14861 | case X86::BI__builtin_ia32_gather3div4di: |
14862 | case X86::BI__builtin_ia32_gather3div4sf: |
14863 | case X86::BI__builtin_ia32_gather3div4si: |
14864 | case X86::BI__builtin_ia32_gather3div8sf: |
14865 | case X86::BI__builtin_ia32_gather3div8si: |
14866 | case X86::BI__builtin_ia32_gather3siv2df: |
14867 | case X86::BI__builtin_ia32_gather3siv2di: |
14868 | case X86::BI__builtin_ia32_gather3siv4df: |
14869 | case X86::BI__builtin_ia32_gather3siv4di: |
14870 | case X86::BI__builtin_ia32_gather3siv4sf: |
14871 | case X86::BI__builtin_ia32_gather3siv4si: |
14872 | case X86::BI__builtin_ia32_gather3siv8sf: |
14873 | case X86::BI__builtin_ia32_gather3siv8si: |
14874 | case X86::BI__builtin_ia32_gathersiv8df: |
14875 | case X86::BI__builtin_ia32_gathersiv16sf: |
14876 | case X86::BI__builtin_ia32_gatherdiv8df: |
14877 | case X86::BI__builtin_ia32_gatherdiv16sf: |
14878 | case X86::BI__builtin_ia32_gathersiv8di: |
14879 | case X86::BI__builtin_ia32_gathersiv16si: |
14880 | case X86::BI__builtin_ia32_gatherdiv8di: |
14881 | case X86::BI__builtin_ia32_gatherdiv16si: { |
14882 | Intrinsic::ID IID; |
14883 | switch (BuiltinID) { |
14884 | default: llvm_unreachable("Unexpected builtin" ); |
14885 | case X86::BI__builtin_ia32_gather3div2df: |
14886 | IID = Intrinsic::x86_avx512_mask_gather3div2_df; |
14887 | break; |
14888 | case X86::BI__builtin_ia32_gather3div2di: |
14889 | IID = Intrinsic::x86_avx512_mask_gather3div2_di; |
14890 | break; |
14891 | case X86::BI__builtin_ia32_gather3div4df: |
14892 | IID = Intrinsic::x86_avx512_mask_gather3div4_df; |
14893 | break; |
14894 | case X86::BI__builtin_ia32_gather3div4di: |
14895 | IID = Intrinsic::x86_avx512_mask_gather3div4_di; |
14896 | break; |
14897 | case X86::BI__builtin_ia32_gather3div4sf: |
14898 | IID = Intrinsic::x86_avx512_mask_gather3div4_sf; |
14899 | break; |
14900 | case X86::BI__builtin_ia32_gather3div4si: |
14901 | IID = Intrinsic::x86_avx512_mask_gather3div4_si; |
14902 | break; |
14903 | case X86::BI__builtin_ia32_gather3div8sf: |
14904 | IID = Intrinsic::x86_avx512_mask_gather3div8_sf; |
14905 | break; |
14906 | case X86::BI__builtin_ia32_gather3div8si: |
14907 | IID = Intrinsic::x86_avx512_mask_gather3div8_si; |
14908 | break; |
14909 | case X86::BI__builtin_ia32_gather3siv2df: |
14910 | IID = Intrinsic::x86_avx512_mask_gather3siv2_df; |
14911 | break; |
14912 | case X86::BI__builtin_ia32_gather3siv2di: |
14913 | IID = Intrinsic::x86_avx512_mask_gather3siv2_di; |
14914 | break; |
14915 | case X86::BI__builtin_ia32_gather3siv4df: |
14916 | IID = Intrinsic::x86_avx512_mask_gather3siv4_df; |
14917 | break; |
14918 | case X86::BI__builtin_ia32_gather3siv4di: |
14919 | IID = Intrinsic::x86_avx512_mask_gather3siv4_di; |
14920 | break; |
14921 | case X86::BI__builtin_ia32_gather3siv4sf: |
14922 | IID = Intrinsic::x86_avx512_mask_gather3siv4_sf; |
14923 | break; |
14924 | case X86::BI__builtin_ia32_gather3siv4si: |
14925 | IID = Intrinsic::x86_avx512_mask_gather3siv4_si; |
14926 | break; |
14927 | case X86::BI__builtin_ia32_gather3siv8sf: |
14928 | IID = Intrinsic::x86_avx512_mask_gather3siv8_sf; |
14929 | break; |
14930 | case X86::BI__builtin_ia32_gather3siv8si: |
14931 | IID = Intrinsic::x86_avx512_mask_gather3siv8_si; |
14932 | break; |
14933 | case X86::BI__builtin_ia32_gathersiv8df: |
14934 | IID = Intrinsic::x86_avx512_mask_gather_dpd_512; |
14935 | break; |
14936 | case X86::BI__builtin_ia32_gathersiv16sf: |
14937 | IID = Intrinsic::x86_avx512_mask_gather_dps_512; |
14938 | break; |
14939 | case X86::BI__builtin_ia32_gatherdiv8df: |
14940 | IID = Intrinsic::x86_avx512_mask_gather_qpd_512; |
14941 | break; |
14942 | case X86::BI__builtin_ia32_gatherdiv16sf: |
14943 | IID = Intrinsic::x86_avx512_mask_gather_qps_512; |
14944 | break; |
14945 | case X86::BI__builtin_ia32_gathersiv8di: |
14946 | IID = Intrinsic::x86_avx512_mask_gather_dpq_512; |
14947 | break; |
14948 | case X86::BI__builtin_ia32_gathersiv16si: |
14949 | IID = Intrinsic::x86_avx512_mask_gather_dpi_512; |
14950 | break; |
14951 | case X86::BI__builtin_ia32_gatherdiv8di: |
14952 | IID = Intrinsic::x86_avx512_mask_gather_qpq_512; |
14953 | break; |
14954 | case X86::BI__builtin_ia32_gatherdiv16si: |
14955 | IID = Intrinsic::x86_avx512_mask_gather_qpi_512; |
14956 | break; |
14957 | } |
14958 | |
14959 | unsigned MinElts = std::min( |
14960 | a: cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(), |
14961 | b: cast<llvm::FixedVectorType>(Val: Ops[2]->getType())->getNumElements()); |
14962 | Ops[3] = getMaskVecValue(CGF&: *this, Mask: Ops[3], NumElts: MinElts); |
14963 | Function *Intr = CGM.getIntrinsic(IID); |
14964 | return Builder.CreateCall(Callee: Intr, Args: Ops); |
14965 | } |
14966 | |
14967 | case X86::BI__builtin_ia32_scattersiv8df: |
14968 | case X86::BI__builtin_ia32_scattersiv16sf: |
14969 | case X86::BI__builtin_ia32_scatterdiv8df: |
14970 | case X86::BI__builtin_ia32_scatterdiv16sf: |
14971 | case X86::BI__builtin_ia32_scattersiv8di: |
14972 | case X86::BI__builtin_ia32_scattersiv16si: |
14973 | case X86::BI__builtin_ia32_scatterdiv8di: |
14974 | case X86::BI__builtin_ia32_scatterdiv16si: |
14975 | case X86::BI__builtin_ia32_scatterdiv2df: |
14976 | case X86::BI__builtin_ia32_scatterdiv2di: |
14977 | case X86::BI__builtin_ia32_scatterdiv4df: |
14978 | case X86::BI__builtin_ia32_scatterdiv4di: |
14979 | case X86::BI__builtin_ia32_scatterdiv4sf: |
14980 | case X86::BI__builtin_ia32_scatterdiv4si: |
14981 | case X86::BI__builtin_ia32_scatterdiv8sf: |
14982 | case X86::BI__builtin_ia32_scatterdiv8si: |
14983 | case X86::BI__builtin_ia32_scattersiv2df: |
14984 | case X86::BI__builtin_ia32_scattersiv2di: |
14985 | case X86::BI__builtin_ia32_scattersiv4df: |
14986 | case X86::BI__builtin_ia32_scattersiv4di: |
14987 | case X86::BI__builtin_ia32_scattersiv4sf: |
14988 | case X86::BI__builtin_ia32_scattersiv4si: |
14989 | case X86::BI__builtin_ia32_scattersiv8sf: |
14990 | case X86::BI__builtin_ia32_scattersiv8si: { |
14991 | Intrinsic::ID IID; |
14992 | switch (BuiltinID) { |
14993 | default: llvm_unreachable("Unexpected builtin" ); |
14994 | case X86::BI__builtin_ia32_scattersiv8df: |
14995 | IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; |
14996 | break; |
14997 | case X86::BI__builtin_ia32_scattersiv16sf: |
14998 | IID = Intrinsic::x86_avx512_mask_scatter_dps_512; |
14999 | break; |
15000 | case X86::BI__builtin_ia32_scatterdiv8df: |
15001 | IID = Intrinsic::x86_avx512_mask_scatter_qpd_512; |
15002 | break; |
15003 | case X86::BI__builtin_ia32_scatterdiv16sf: |
15004 | IID = Intrinsic::x86_avx512_mask_scatter_qps_512; |
15005 | break; |
15006 | case X86::BI__builtin_ia32_scattersiv8di: |
15007 | IID = Intrinsic::x86_avx512_mask_scatter_dpq_512; |
15008 | break; |
15009 | case X86::BI__builtin_ia32_scattersiv16si: |
15010 | IID = Intrinsic::x86_avx512_mask_scatter_dpi_512; |
15011 | break; |
15012 | case X86::BI__builtin_ia32_scatterdiv8di: |
15013 | IID = Intrinsic::x86_avx512_mask_scatter_qpq_512; |
15014 | break; |
15015 | case X86::BI__builtin_ia32_scatterdiv16si: |
15016 | IID = Intrinsic::x86_avx512_mask_scatter_qpi_512; |
15017 | break; |
15018 | case X86::BI__builtin_ia32_scatterdiv2df: |
15019 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_df; |
15020 | break; |
15021 | case X86::BI__builtin_ia32_scatterdiv2di: |
15022 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_di; |
15023 | break; |
15024 | case X86::BI__builtin_ia32_scatterdiv4df: |
15025 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_df; |
15026 | break; |
15027 | case X86::BI__builtin_ia32_scatterdiv4di: |
15028 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_di; |
15029 | break; |
15030 | case X86::BI__builtin_ia32_scatterdiv4sf: |
15031 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf; |
15032 | break; |
15033 | case X86::BI__builtin_ia32_scatterdiv4si: |
15034 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_si; |
15035 | break; |
15036 | case X86::BI__builtin_ia32_scatterdiv8sf: |
15037 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf; |
15038 | break; |
15039 | case X86::BI__builtin_ia32_scatterdiv8si: |
15040 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_si; |
15041 | break; |
15042 | case X86::BI__builtin_ia32_scattersiv2df: |
15043 | IID = Intrinsic::x86_avx512_mask_scattersiv2_df; |
15044 | break; |
15045 | case X86::BI__builtin_ia32_scattersiv2di: |
15046 | IID = Intrinsic::x86_avx512_mask_scattersiv2_di; |
15047 | break; |
15048 | case X86::BI__builtin_ia32_scattersiv4df: |
15049 | IID = Intrinsic::x86_avx512_mask_scattersiv4_df; |
15050 | break; |
15051 | case X86::BI__builtin_ia32_scattersiv4di: |
15052 | IID = Intrinsic::x86_avx512_mask_scattersiv4_di; |
15053 | break; |
15054 | case X86::BI__builtin_ia32_scattersiv4sf: |
15055 | IID = Intrinsic::x86_avx512_mask_scattersiv4_sf; |
15056 | break; |
15057 | case X86::BI__builtin_ia32_scattersiv4si: |
15058 | IID = Intrinsic::x86_avx512_mask_scattersiv4_si; |
15059 | break; |
15060 | case X86::BI__builtin_ia32_scattersiv8sf: |
15061 | IID = Intrinsic::x86_avx512_mask_scattersiv8_sf; |
15062 | break; |
15063 | case X86::BI__builtin_ia32_scattersiv8si: |
15064 | IID = Intrinsic::x86_avx512_mask_scattersiv8_si; |
15065 | break; |
15066 | } |
15067 | |
15068 | unsigned MinElts = std::min( |
15069 | a: cast<llvm::FixedVectorType>(Val: Ops[2]->getType())->getNumElements(), |
15070 | b: cast<llvm::FixedVectorType>(Val: Ops[3]->getType())->getNumElements()); |
15071 | Ops[1] = getMaskVecValue(CGF&: *this, Mask: Ops[1], NumElts: MinElts); |
15072 | Function *Intr = CGM.getIntrinsic(IID); |
15073 | return Builder.CreateCall(Callee: Intr, Args: Ops); |
15074 | } |
15075 | |
15076 | case X86::BI__builtin_ia32_vextractf128_pd256: |
15077 | case X86::BI__builtin_ia32_vextractf128_ps256: |
15078 | case X86::BI__builtin_ia32_vextractf128_si256: |
15079 | case X86::BI__builtin_ia32_extract128i256: |
15080 | case X86::BI__builtin_ia32_extractf64x4_mask: |
15081 | case X86::BI__builtin_ia32_extractf32x4_mask: |
15082 | case X86::BI__builtin_ia32_extracti64x4_mask: |
15083 | case X86::BI__builtin_ia32_extracti32x4_mask: |
15084 | case X86::BI__builtin_ia32_extractf32x8_mask: |
15085 | case X86::BI__builtin_ia32_extracti32x8_mask: |
15086 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
15087 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
15088 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
15089 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
15090 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
15091 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { |
15092 | auto *DstTy = cast<llvm::FixedVectorType>(Val: ConvertType(T: E->getType())); |
15093 | unsigned NumElts = DstTy->getNumElements(); |
15094 | unsigned SrcNumElts = |
15095 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15096 | unsigned SubVectors = SrcNumElts / NumElts; |
15097 | unsigned Index = cast<ConstantInt>(Val: Ops[1])->getZExtValue(); |
15098 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors" ); |
15099 | Index &= SubVectors - 1; // Remove any extra bits. |
15100 | Index *= NumElts; |
15101 | |
15102 | int Indices[16]; |
15103 | for (unsigned i = 0; i != NumElts; ++i) |
15104 | Indices[i] = i + Index; |
15105 | |
15106 | Value *Res = Builder.CreateShuffleVector(V: Ops[0], Mask: ArrayRef(Indices, NumElts), |
15107 | Name: "extract" ); |
15108 | |
15109 | if (Ops.size() == 4) |
15110 | Res = EmitX86Select(CGF&: *this, Mask: Ops[3], Op0: Res, Op1: Ops[2]); |
15111 | |
15112 | return Res; |
15113 | } |
15114 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
15115 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
15116 | case X86::BI__builtin_ia32_vinsertf128_si256: |
15117 | case X86::BI__builtin_ia32_insert128i256: |
15118 | case X86::BI__builtin_ia32_insertf64x4: |
15119 | case X86::BI__builtin_ia32_insertf32x4: |
15120 | case X86::BI__builtin_ia32_inserti64x4: |
15121 | case X86::BI__builtin_ia32_inserti32x4: |
15122 | case X86::BI__builtin_ia32_insertf32x8: |
15123 | case X86::BI__builtin_ia32_inserti32x8: |
15124 | case X86::BI__builtin_ia32_insertf32x4_256: |
15125 | case X86::BI__builtin_ia32_inserti32x4_256: |
15126 | case X86::BI__builtin_ia32_insertf64x2_256: |
15127 | case X86::BI__builtin_ia32_inserti64x2_256: |
15128 | case X86::BI__builtin_ia32_insertf64x2_512: |
15129 | case X86::BI__builtin_ia32_inserti64x2_512: { |
15130 | unsigned DstNumElts = |
15131 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15132 | unsigned SrcNumElts = |
15133 | cast<llvm::FixedVectorType>(Val: Ops[1]->getType())->getNumElements(); |
15134 | unsigned SubVectors = DstNumElts / SrcNumElts; |
15135 | unsigned Index = cast<ConstantInt>(Val: Ops[2])->getZExtValue(); |
15136 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors" ); |
15137 | Index &= SubVectors - 1; // Remove any extra bits. |
15138 | Index *= SrcNumElts; |
15139 | |
15140 | int Indices[16]; |
15141 | for (unsigned i = 0; i != DstNumElts; ++i) |
15142 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; |
15143 | |
15144 | Value *Op1 = Builder.CreateShuffleVector( |
15145 | V: Ops[1], Mask: ArrayRef(Indices, DstNumElts), Name: "widen" ); |
15146 | |
15147 | for (unsigned i = 0; i != DstNumElts; ++i) { |
15148 | if (i >= Index && i < (Index + SrcNumElts)) |
15149 | Indices[i] = (i - Index) + DstNumElts; |
15150 | else |
15151 | Indices[i] = i; |
15152 | } |
15153 | |
15154 | return Builder.CreateShuffleVector(V1: Ops[0], V2: Op1, |
15155 | Mask: ArrayRef(Indices, DstNumElts), Name: "insert" ); |
15156 | } |
15157 | case X86::BI__builtin_ia32_pmovqd512_mask: |
15158 | case X86::BI__builtin_ia32_pmovwb512_mask: { |
15159 | Value *Res = Builder.CreateTrunc(V: Ops[0], DestTy: Ops[1]->getType()); |
15160 | return EmitX86Select(CGF&: *this, Mask: Ops[2], Op0: Res, Op1: Ops[1]); |
15161 | } |
15162 | case X86::BI__builtin_ia32_pmovdb512_mask: |
15163 | case X86::BI__builtin_ia32_pmovdw512_mask: |
15164 | case X86::BI__builtin_ia32_pmovqw512_mask: { |
15165 | if (const auto *C = dyn_cast<Constant>(Val: Ops[2])) |
15166 | if (C->isAllOnesValue()) |
15167 | return Builder.CreateTrunc(V: Ops[0], DestTy: Ops[1]->getType()); |
15168 | |
15169 | Intrinsic::ID IID; |
15170 | switch (BuiltinID) { |
15171 | default: llvm_unreachable("Unsupported intrinsic!" ); |
15172 | case X86::BI__builtin_ia32_pmovdb512_mask: |
15173 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; |
15174 | break; |
15175 | case X86::BI__builtin_ia32_pmovdw512_mask: |
15176 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; |
15177 | break; |
15178 | case X86::BI__builtin_ia32_pmovqw512_mask: |
15179 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; |
15180 | break; |
15181 | } |
15182 | |
15183 | Function *Intr = CGM.getIntrinsic(IID); |
15184 | return Builder.CreateCall(Callee: Intr, Args: Ops); |
15185 | } |
15186 | case X86::BI__builtin_ia32_pblendw128: |
15187 | case X86::BI__builtin_ia32_blendpd: |
15188 | case X86::BI__builtin_ia32_blendps: |
15189 | case X86::BI__builtin_ia32_blendpd256: |
15190 | case X86::BI__builtin_ia32_blendps256: |
15191 | case X86::BI__builtin_ia32_pblendw256: |
15192 | case X86::BI__builtin_ia32_pblendd128: |
15193 | case X86::BI__builtin_ia32_pblendd256: { |
15194 | unsigned NumElts = |
15195 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15196 | unsigned Imm = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue(); |
15197 | |
15198 | int Indices[16]; |
15199 | // If there are more than 8 elements, the immediate is used twice so make |
15200 | // sure we handle that. |
15201 | for (unsigned i = 0; i != NumElts; ++i) |
15202 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; |
15203 | |
15204 | return Builder.CreateShuffleVector(V1: Ops[0], V2: Ops[1], |
15205 | Mask: ArrayRef(Indices, NumElts), Name: "blend" ); |
15206 | } |
15207 | case X86::BI__builtin_ia32_pshuflw: |
15208 | case X86::BI__builtin_ia32_pshuflw256: |
15209 | case X86::BI__builtin_ia32_pshuflw512: { |
15210 | uint32_t Imm = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue(); |
15211 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15212 | unsigned NumElts = Ty->getNumElements(); |
15213 | |
15214 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
15215 | Imm = (Imm & 0xff) * 0x01010101; |
15216 | |
15217 | int Indices[32]; |
15218 | for (unsigned l = 0; l != NumElts; l += 8) { |
15219 | for (unsigned i = 0; i != 4; ++i) { |
15220 | Indices[l + i] = l + (Imm & 3); |
15221 | Imm >>= 2; |
15222 | } |
15223 | for (unsigned i = 4; i != 8; ++i) |
15224 | Indices[l + i] = l + i; |
15225 | } |
15226 | |
15227 | return Builder.CreateShuffleVector(V: Ops[0], Mask: ArrayRef(Indices, NumElts), |
15228 | Name: "pshuflw" ); |
15229 | } |
15230 | case X86::BI__builtin_ia32_pshufhw: |
15231 | case X86::BI__builtin_ia32_pshufhw256: |
15232 | case X86::BI__builtin_ia32_pshufhw512: { |
15233 | uint32_t Imm = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue(); |
15234 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15235 | unsigned NumElts = Ty->getNumElements(); |
15236 | |
15237 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
15238 | Imm = (Imm & 0xff) * 0x01010101; |
15239 | |
15240 | int Indices[32]; |
15241 | for (unsigned l = 0; l != NumElts; l += 8) { |
15242 | for (unsigned i = 0; i != 4; ++i) |
15243 | Indices[l + i] = l + i; |
15244 | for (unsigned i = 4; i != 8; ++i) { |
15245 | Indices[l + i] = l + 4 + (Imm & 3); |
15246 | Imm >>= 2; |
15247 | } |
15248 | } |
15249 | |
15250 | return Builder.CreateShuffleVector(V: Ops[0], Mask: ArrayRef(Indices, NumElts), |
15251 | Name: "pshufhw" ); |
15252 | } |
15253 | case X86::BI__builtin_ia32_pshufd: |
15254 | case X86::BI__builtin_ia32_pshufd256: |
15255 | case X86::BI__builtin_ia32_pshufd512: |
15256 | case X86::BI__builtin_ia32_vpermilpd: |
15257 | case X86::BI__builtin_ia32_vpermilps: |
15258 | case X86::BI__builtin_ia32_vpermilpd256: |
15259 | case X86::BI__builtin_ia32_vpermilps256: |
15260 | case X86::BI__builtin_ia32_vpermilpd512: |
15261 | case X86::BI__builtin_ia32_vpermilps512: { |
15262 | uint32_t Imm = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue(); |
15263 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15264 | unsigned NumElts = Ty->getNumElements(); |
15265 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
15266 | unsigned NumLaneElts = NumElts / NumLanes; |
15267 | |
15268 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
15269 | Imm = (Imm & 0xff) * 0x01010101; |
15270 | |
15271 | int Indices[16]; |
15272 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
15273 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
15274 | Indices[i + l] = (Imm % NumLaneElts) + l; |
15275 | Imm /= NumLaneElts; |
15276 | } |
15277 | } |
15278 | |
15279 | return Builder.CreateShuffleVector(V: Ops[0], Mask: ArrayRef(Indices, NumElts), |
15280 | Name: "permil" ); |
15281 | } |
15282 | case X86::BI__builtin_ia32_shufpd: |
15283 | case X86::BI__builtin_ia32_shufpd256: |
15284 | case X86::BI__builtin_ia32_shufpd512: |
15285 | case X86::BI__builtin_ia32_shufps: |
15286 | case X86::BI__builtin_ia32_shufps256: |
15287 | case X86::BI__builtin_ia32_shufps512: { |
15288 | uint32_t Imm = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue(); |
15289 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15290 | unsigned NumElts = Ty->getNumElements(); |
15291 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
15292 | unsigned NumLaneElts = NumElts / NumLanes; |
15293 | |
15294 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
15295 | Imm = (Imm & 0xff) * 0x01010101; |
15296 | |
15297 | int Indices[16]; |
15298 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
15299 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
15300 | unsigned Index = Imm % NumLaneElts; |
15301 | Imm /= NumLaneElts; |
15302 | if (i >= (NumLaneElts / 2)) |
15303 | Index += NumElts; |
15304 | Indices[l + i] = l + Index; |
15305 | } |
15306 | } |
15307 | |
15308 | return Builder.CreateShuffleVector(V1: Ops[0], V2: Ops[1], |
15309 | Mask: ArrayRef(Indices, NumElts), Name: "shufp" ); |
15310 | } |
15311 | case X86::BI__builtin_ia32_permdi256: |
15312 | case X86::BI__builtin_ia32_permdf256: |
15313 | case X86::BI__builtin_ia32_permdi512: |
15314 | case X86::BI__builtin_ia32_permdf512: { |
15315 | unsigned Imm = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue(); |
15316 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15317 | unsigned NumElts = Ty->getNumElements(); |
15318 | |
15319 | // These intrinsics operate on 256-bit lanes of four 64-bit elements. |
15320 | int Indices[8]; |
15321 | for (unsigned l = 0; l != NumElts; l += 4) |
15322 | for (unsigned i = 0; i != 4; ++i) |
15323 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); |
15324 | |
15325 | return Builder.CreateShuffleVector(V: Ops[0], Mask: ArrayRef(Indices, NumElts), |
15326 | Name: "perm" ); |
15327 | } |
15328 | case X86::BI__builtin_ia32_palignr128: |
15329 | case X86::BI__builtin_ia32_palignr256: |
15330 | case X86::BI__builtin_ia32_palignr512: { |
15331 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0xff; |
15332 | |
15333 | unsigned NumElts = |
15334 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15335 | assert(NumElts % 16 == 0); |
15336 | |
15337 | // If palignr is shifting the pair of vectors more than the size of two |
15338 | // lanes, emit zero. |
15339 | if (ShiftVal >= 32) |
15340 | return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType())); |
15341 | |
15342 | // If palignr is shifting the pair of input vectors more than one lane, |
15343 | // but less than two lanes, convert to shifting in zeroes. |
15344 | if (ShiftVal > 16) { |
15345 | ShiftVal -= 16; |
15346 | Ops[1] = Ops[0]; |
15347 | Ops[0] = llvm::Constant::getNullValue(Ty: Ops[0]->getType()); |
15348 | } |
15349 | |
15350 | int Indices[64]; |
15351 | // 256-bit palignr operates on 128-bit lanes so we need to handle that |
15352 | for (unsigned l = 0; l != NumElts; l += 16) { |
15353 | for (unsigned i = 0; i != 16; ++i) { |
15354 | unsigned Idx = ShiftVal + i; |
15355 | if (Idx >= 16) |
15356 | Idx += NumElts - 16; // End of lane, switch operand. |
15357 | Indices[l + i] = Idx + l; |
15358 | } |
15359 | } |
15360 | |
15361 | return Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[0], |
15362 | Mask: ArrayRef(Indices, NumElts), Name: "palignr" ); |
15363 | } |
15364 | case X86::BI__builtin_ia32_alignd128: |
15365 | case X86::BI__builtin_ia32_alignd256: |
15366 | case X86::BI__builtin_ia32_alignd512: |
15367 | case X86::BI__builtin_ia32_alignq128: |
15368 | case X86::BI__builtin_ia32_alignq256: |
15369 | case X86::BI__builtin_ia32_alignq512: { |
15370 | unsigned NumElts = |
15371 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15372 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0xff; |
15373 | |
15374 | // Mask the shift amount to width of a vector. |
15375 | ShiftVal &= NumElts - 1; |
15376 | |
15377 | int Indices[16]; |
15378 | for (unsigned i = 0; i != NumElts; ++i) |
15379 | Indices[i] = i + ShiftVal; |
15380 | |
15381 | return Builder.CreateShuffleVector(V1: Ops[1], V2: Ops[0], |
15382 | Mask: ArrayRef(Indices, NumElts), Name: "valign" ); |
15383 | } |
15384 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
15385 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
15386 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
15387 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
15388 | case X86::BI__builtin_ia32_shuf_f32x4: |
15389 | case X86::BI__builtin_ia32_shuf_f64x2: |
15390 | case X86::BI__builtin_ia32_shuf_i32x4: |
15391 | case X86::BI__builtin_ia32_shuf_i64x2: { |
15392 | unsigned Imm = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue(); |
15393 | auto *Ty = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15394 | unsigned NumElts = Ty->getNumElements(); |
15395 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; |
15396 | unsigned NumLaneElts = NumElts / NumLanes; |
15397 | |
15398 | int Indices[16]; |
15399 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
15400 | unsigned Index = (Imm % NumLanes) * NumLaneElts; |
15401 | Imm /= NumLanes; // Discard the bits we just used. |
15402 | if (l >= (NumElts / 2)) |
15403 | Index += NumElts; // Switch to other source. |
15404 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
15405 | Indices[l + i] = Index + i; |
15406 | } |
15407 | } |
15408 | |
15409 | return Builder.CreateShuffleVector(V1: Ops[0], V2: Ops[1], |
15410 | Mask: ArrayRef(Indices, NumElts), Name: "shuf" ); |
15411 | } |
15412 | |
15413 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
15414 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
15415 | case X86::BI__builtin_ia32_vperm2f128_si256: |
15416 | case X86::BI__builtin_ia32_permti256: { |
15417 | unsigned Imm = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue(); |
15418 | unsigned NumElts = |
15419 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
15420 | |
15421 | // This takes a very simple approach since there are two lanes and a |
15422 | // shuffle can have 2 inputs. So we reserve the first input for the first |
15423 | // lane and the second input for the second lane. This may result in |
15424 | // duplicate sources, but this can be dealt with in the backend. |
15425 | |
15426 | Value *OutOps[2]; |
15427 | int Indices[8]; |
15428 | for (unsigned l = 0; l != 2; ++l) { |
15429 | // Determine the source for this lane. |
15430 | if (Imm & (1 << ((l * 4) + 3))) |
15431 | OutOps[l] = llvm::ConstantAggregateZero::get(Ty: Ops[0]->getType()); |
15432 | else if (Imm & (1 << ((l * 4) + 1))) |
15433 | OutOps[l] = Ops[1]; |
15434 | else |
15435 | OutOps[l] = Ops[0]; |
15436 | |
15437 | for (unsigned i = 0; i != NumElts/2; ++i) { |
15438 | // Start with ith element of the source for this lane. |
15439 | unsigned Idx = (l * NumElts) + i; |
15440 | // If bit 0 of the immediate half is set, switch to the high half of |
15441 | // the source. |
15442 | if (Imm & (1 << (l * 4))) |
15443 | Idx += NumElts/2; |
15444 | Indices[(l * (NumElts/2)) + i] = Idx; |
15445 | } |
15446 | } |
15447 | |
15448 | return Builder.CreateShuffleVector(V1: OutOps[0], V2: OutOps[1], |
15449 | Mask: ArrayRef(Indices, NumElts), Name: "vperm" ); |
15450 | } |
15451 | |
15452 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
15453 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
15454 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
15455 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue() & 0xff; |
15456 | auto *ResultType = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15457 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
15458 | unsigned NumElts = ResultType->getNumElements() * 8; |
15459 | |
15460 | // If pslldq is shifting the vector more than 15 bytes, emit zero. |
15461 | if (ShiftVal >= 16) |
15462 | return llvm::Constant::getNullValue(Ty: ResultType); |
15463 | |
15464 | int Indices[64]; |
15465 | // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that |
15466 | for (unsigned l = 0; l != NumElts; l += 16) { |
15467 | for (unsigned i = 0; i != 16; ++i) { |
15468 | unsigned Idx = NumElts + i - ShiftVal; |
15469 | if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. |
15470 | Indices[l + i] = Idx + l; |
15471 | } |
15472 | } |
15473 | |
15474 | auto *VecTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts); |
15475 | Value *Cast = Builder.CreateBitCast(V: Ops[0], DestTy: VecTy, Name: "cast" ); |
15476 | Value *Zero = llvm::Constant::getNullValue(Ty: VecTy); |
15477 | Value *SV = Builder.CreateShuffleVector( |
15478 | V1: Zero, V2: Cast, Mask: ArrayRef(Indices, NumElts), Name: "pslldq" ); |
15479 | return Builder.CreateBitCast(V: SV, DestTy: Ops[0]->getType(), Name: "cast" ); |
15480 | } |
15481 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
15482 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
15483 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
15484 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue() & 0xff; |
15485 | auto *ResultType = cast<llvm::FixedVectorType>(Val: Ops[0]->getType()); |
15486 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
15487 | unsigned NumElts = ResultType->getNumElements() * 8; |
15488 | |
15489 | // If psrldq is shifting the vector more than 15 bytes, emit zero. |
15490 | if (ShiftVal >= 16) |
15491 | return llvm::Constant::getNullValue(Ty: ResultType); |
15492 | |
15493 | int Indices[64]; |
15494 | // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that |
15495 | for (unsigned l = 0; l != NumElts; l += 16) { |
15496 | for (unsigned i = 0; i != 16; ++i) { |
15497 | unsigned Idx = i + ShiftVal; |
15498 | if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. |
15499 | Indices[l + i] = Idx + l; |
15500 | } |
15501 | } |
15502 | |
15503 | auto *VecTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts); |
15504 | Value *Cast = Builder.CreateBitCast(V: Ops[0], DestTy: VecTy, Name: "cast" ); |
15505 | Value *Zero = llvm::Constant::getNullValue(Ty: VecTy); |
15506 | Value *SV = Builder.CreateShuffleVector( |
15507 | V1: Cast, V2: Zero, Mask: ArrayRef(Indices, NumElts), Name: "psrldq" ); |
15508 | return Builder.CreateBitCast(V: SV, DestTy: ResultType, Name: "cast" ); |
15509 | } |
15510 | case X86::BI__builtin_ia32_kshiftliqi: |
15511 | case X86::BI__builtin_ia32_kshiftlihi: |
15512 | case X86::BI__builtin_ia32_kshiftlisi: |
15513 | case X86::BI__builtin_ia32_kshiftlidi: { |
15514 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue() & 0xff; |
15515 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15516 | |
15517 | if (ShiftVal >= NumElts) |
15518 | return llvm::Constant::getNullValue(Ty: Ops[0]->getType()); |
15519 | |
15520 | Value *In = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15521 | |
15522 | int Indices[64]; |
15523 | for (unsigned i = 0; i != NumElts; ++i) |
15524 | Indices[i] = NumElts + i - ShiftVal; |
15525 | |
15526 | Value *Zero = llvm::Constant::getNullValue(Ty: In->getType()); |
15527 | Value *SV = Builder.CreateShuffleVector( |
15528 | V1: Zero, V2: In, Mask: ArrayRef(Indices, NumElts), Name: "kshiftl" ); |
15529 | return Builder.CreateBitCast(V: SV, DestTy: Ops[0]->getType()); |
15530 | } |
15531 | case X86::BI__builtin_ia32_kshiftriqi: |
15532 | case X86::BI__builtin_ia32_kshiftrihi: |
15533 | case X86::BI__builtin_ia32_kshiftrisi: |
15534 | case X86::BI__builtin_ia32_kshiftridi: { |
15535 | unsigned ShiftVal = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue() & 0xff; |
15536 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15537 | |
15538 | if (ShiftVal >= NumElts) |
15539 | return llvm::Constant::getNullValue(Ty: Ops[0]->getType()); |
15540 | |
15541 | Value *In = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15542 | |
15543 | int Indices[64]; |
15544 | for (unsigned i = 0; i != NumElts; ++i) |
15545 | Indices[i] = i + ShiftVal; |
15546 | |
15547 | Value *Zero = llvm::Constant::getNullValue(Ty: In->getType()); |
15548 | Value *SV = Builder.CreateShuffleVector( |
15549 | V1: In, V2: Zero, Mask: ArrayRef(Indices, NumElts), Name: "kshiftr" ); |
15550 | return Builder.CreateBitCast(V: SV, DestTy: Ops[0]->getType()); |
15551 | } |
15552 | case X86::BI__builtin_ia32_movnti: |
15553 | case X86::BI__builtin_ia32_movnti64: |
15554 | case X86::BI__builtin_ia32_movntsd: |
15555 | case X86::BI__builtin_ia32_movntss: { |
15556 | llvm::MDNode *Node = llvm::MDNode::get( |
15557 | Context&: getLLVMContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
15558 | |
15559 | Value *Ptr = Ops[0]; |
15560 | Value *Src = Ops[1]; |
15561 | |
15562 | // Extract the 0'th element of the source vector. |
15563 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || |
15564 | BuiltinID == X86::BI__builtin_ia32_movntss) |
15565 | Src = Builder.CreateExtractElement(Vec: Src, Idx: (uint64_t)0, Name: "extract" ); |
15566 | |
15567 | // Unaligned nontemporal store of the scalar value. |
15568 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Val: Src, Addr: Ptr); |
15569 | SI->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node); |
15570 | SI->setAlignment(llvm::Align(1)); |
15571 | return SI; |
15572 | } |
15573 | // Rotate is a special case of funnel shift - 1st 2 args are the same. |
15574 | case X86::BI__builtin_ia32_vprotb: |
15575 | case X86::BI__builtin_ia32_vprotw: |
15576 | case X86::BI__builtin_ia32_vprotd: |
15577 | case X86::BI__builtin_ia32_vprotq: |
15578 | case X86::BI__builtin_ia32_vprotbi: |
15579 | case X86::BI__builtin_ia32_vprotwi: |
15580 | case X86::BI__builtin_ia32_vprotdi: |
15581 | case X86::BI__builtin_ia32_vprotqi: |
15582 | case X86::BI__builtin_ia32_prold128: |
15583 | case X86::BI__builtin_ia32_prold256: |
15584 | case X86::BI__builtin_ia32_prold512: |
15585 | case X86::BI__builtin_ia32_prolq128: |
15586 | case X86::BI__builtin_ia32_prolq256: |
15587 | case X86::BI__builtin_ia32_prolq512: |
15588 | case X86::BI__builtin_ia32_prolvd128: |
15589 | case X86::BI__builtin_ia32_prolvd256: |
15590 | case X86::BI__builtin_ia32_prolvd512: |
15591 | case X86::BI__builtin_ia32_prolvq128: |
15592 | case X86::BI__builtin_ia32_prolvq256: |
15593 | case X86::BI__builtin_ia32_prolvq512: |
15594 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[0], Op1: Ops[0], Amt: Ops[1], IsRight: false); |
15595 | case X86::BI__builtin_ia32_prord128: |
15596 | case X86::BI__builtin_ia32_prord256: |
15597 | case X86::BI__builtin_ia32_prord512: |
15598 | case X86::BI__builtin_ia32_prorq128: |
15599 | case X86::BI__builtin_ia32_prorq256: |
15600 | case X86::BI__builtin_ia32_prorq512: |
15601 | case X86::BI__builtin_ia32_prorvd128: |
15602 | case X86::BI__builtin_ia32_prorvd256: |
15603 | case X86::BI__builtin_ia32_prorvd512: |
15604 | case X86::BI__builtin_ia32_prorvq128: |
15605 | case X86::BI__builtin_ia32_prorvq256: |
15606 | case X86::BI__builtin_ia32_prorvq512: |
15607 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[0], Op1: Ops[0], Amt: Ops[1], IsRight: true); |
15608 | case X86::BI__builtin_ia32_selectb_128: |
15609 | case X86::BI__builtin_ia32_selectb_256: |
15610 | case X86::BI__builtin_ia32_selectb_512: |
15611 | case X86::BI__builtin_ia32_selectw_128: |
15612 | case X86::BI__builtin_ia32_selectw_256: |
15613 | case X86::BI__builtin_ia32_selectw_512: |
15614 | case X86::BI__builtin_ia32_selectd_128: |
15615 | case X86::BI__builtin_ia32_selectd_256: |
15616 | case X86::BI__builtin_ia32_selectd_512: |
15617 | case X86::BI__builtin_ia32_selectq_128: |
15618 | case X86::BI__builtin_ia32_selectq_256: |
15619 | case X86::BI__builtin_ia32_selectq_512: |
15620 | case X86::BI__builtin_ia32_selectph_128: |
15621 | case X86::BI__builtin_ia32_selectph_256: |
15622 | case X86::BI__builtin_ia32_selectph_512: |
15623 | case X86::BI__builtin_ia32_selectpbf_128: |
15624 | case X86::BI__builtin_ia32_selectpbf_256: |
15625 | case X86::BI__builtin_ia32_selectpbf_512: |
15626 | case X86::BI__builtin_ia32_selectps_128: |
15627 | case X86::BI__builtin_ia32_selectps_256: |
15628 | case X86::BI__builtin_ia32_selectps_512: |
15629 | case X86::BI__builtin_ia32_selectpd_128: |
15630 | case X86::BI__builtin_ia32_selectpd_256: |
15631 | case X86::BI__builtin_ia32_selectpd_512: |
15632 | return EmitX86Select(CGF&: *this, Mask: Ops[0], Op0: Ops[1], Op1: Ops[2]); |
15633 | case X86::BI__builtin_ia32_selectsh_128: |
15634 | case X86::BI__builtin_ia32_selectsbf_128: |
15635 | case X86::BI__builtin_ia32_selectss_128: |
15636 | case X86::BI__builtin_ia32_selectsd_128: { |
15637 | Value *A = Builder.CreateExtractElement(Vec: Ops[1], Idx: (uint64_t)0); |
15638 | Value *B = Builder.CreateExtractElement(Vec: Ops[2], Idx: (uint64_t)0); |
15639 | A = EmitX86ScalarSelect(CGF&: *this, Mask: Ops[0], Op0: A, Op1: B); |
15640 | return Builder.CreateInsertElement(Vec: Ops[1], NewElt: A, Idx: (uint64_t)0); |
15641 | } |
15642 | case X86::BI__builtin_ia32_cmpb128_mask: |
15643 | case X86::BI__builtin_ia32_cmpb256_mask: |
15644 | case X86::BI__builtin_ia32_cmpb512_mask: |
15645 | case X86::BI__builtin_ia32_cmpw128_mask: |
15646 | case X86::BI__builtin_ia32_cmpw256_mask: |
15647 | case X86::BI__builtin_ia32_cmpw512_mask: |
15648 | case X86::BI__builtin_ia32_cmpd128_mask: |
15649 | case X86::BI__builtin_ia32_cmpd256_mask: |
15650 | case X86::BI__builtin_ia32_cmpd512_mask: |
15651 | case X86::BI__builtin_ia32_cmpq128_mask: |
15652 | case X86::BI__builtin_ia32_cmpq256_mask: |
15653 | case X86::BI__builtin_ia32_cmpq512_mask: { |
15654 | unsigned CC = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0x7; |
15655 | return EmitX86MaskedCompare(CGF&: *this, CC, Signed: true, Ops); |
15656 | } |
15657 | case X86::BI__builtin_ia32_ucmpb128_mask: |
15658 | case X86::BI__builtin_ia32_ucmpb256_mask: |
15659 | case X86::BI__builtin_ia32_ucmpb512_mask: |
15660 | case X86::BI__builtin_ia32_ucmpw128_mask: |
15661 | case X86::BI__builtin_ia32_ucmpw256_mask: |
15662 | case X86::BI__builtin_ia32_ucmpw512_mask: |
15663 | case X86::BI__builtin_ia32_ucmpd128_mask: |
15664 | case X86::BI__builtin_ia32_ucmpd256_mask: |
15665 | case X86::BI__builtin_ia32_ucmpd512_mask: |
15666 | case X86::BI__builtin_ia32_ucmpq128_mask: |
15667 | case X86::BI__builtin_ia32_ucmpq256_mask: |
15668 | case X86::BI__builtin_ia32_ucmpq512_mask: { |
15669 | unsigned CC = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0x7; |
15670 | return EmitX86MaskedCompare(CGF&: *this, CC, Signed: false, Ops); |
15671 | } |
15672 | case X86::BI__builtin_ia32_vpcomb: |
15673 | case X86::BI__builtin_ia32_vpcomw: |
15674 | case X86::BI__builtin_ia32_vpcomd: |
15675 | case X86::BI__builtin_ia32_vpcomq: |
15676 | return EmitX86vpcom(CGF&: *this, Ops, IsSigned: true); |
15677 | case X86::BI__builtin_ia32_vpcomub: |
15678 | case X86::BI__builtin_ia32_vpcomuw: |
15679 | case X86::BI__builtin_ia32_vpcomud: |
15680 | case X86::BI__builtin_ia32_vpcomuq: |
15681 | return EmitX86vpcom(CGF&: *this, Ops, IsSigned: false); |
15682 | |
15683 | case X86::BI__builtin_ia32_kortestcqi: |
15684 | case X86::BI__builtin_ia32_kortestchi: |
15685 | case X86::BI__builtin_ia32_kortestcsi: |
15686 | case X86::BI__builtin_ia32_kortestcdi: { |
15687 | Value *Or = EmitX86MaskLogic(CGF&: *this, Opc: Instruction::Or, Ops); |
15688 | Value *C = llvm::Constant::getAllOnesValue(Ty: Ops[0]->getType()); |
15689 | Value *Cmp = Builder.CreateICmpEQ(LHS: Or, RHS: C); |
15690 | return Builder.CreateZExt(V: Cmp, DestTy: ConvertType(T: E->getType())); |
15691 | } |
15692 | case X86::BI__builtin_ia32_kortestzqi: |
15693 | case X86::BI__builtin_ia32_kortestzhi: |
15694 | case X86::BI__builtin_ia32_kortestzsi: |
15695 | case X86::BI__builtin_ia32_kortestzdi: { |
15696 | Value *Or = EmitX86MaskLogic(CGF&: *this, Opc: Instruction::Or, Ops); |
15697 | Value *C = llvm::Constant::getNullValue(Ty: Ops[0]->getType()); |
15698 | Value *Cmp = Builder.CreateICmpEQ(LHS: Or, RHS: C); |
15699 | return Builder.CreateZExt(V: Cmp, DestTy: ConvertType(T: E->getType())); |
15700 | } |
15701 | |
15702 | case X86::BI__builtin_ia32_ktestcqi: |
15703 | case X86::BI__builtin_ia32_ktestzqi: |
15704 | case X86::BI__builtin_ia32_ktestchi: |
15705 | case X86::BI__builtin_ia32_ktestzhi: |
15706 | case X86::BI__builtin_ia32_ktestcsi: |
15707 | case X86::BI__builtin_ia32_ktestzsi: |
15708 | case X86::BI__builtin_ia32_ktestcdi: |
15709 | case X86::BI__builtin_ia32_ktestzdi: { |
15710 | Intrinsic::ID IID; |
15711 | switch (BuiltinID) { |
15712 | default: llvm_unreachable("Unsupported intrinsic!" ); |
15713 | case X86::BI__builtin_ia32_ktestcqi: |
15714 | IID = Intrinsic::x86_avx512_ktestc_b; |
15715 | break; |
15716 | case X86::BI__builtin_ia32_ktestzqi: |
15717 | IID = Intrinsic::x86_avx512_ktestz_b; |
15718 | break; |
15719 | case X86::BI__builtin_ia32_ktestchi: |
15720 | IID = Intrinsic::x86_avx512_ktestc_w; |
15721 | break; |
15722 | case X86::BI__builtin_ia32_ktestzhi: |
15723 | IID = Intrinsic::x86_avx512_ktestz_w; |
15724 | break; |
15725 | case X86::BI__builtin_ia32_ktestcsi: |
15726 | IID = Intrinsic::x86_avx512_ktestc_d; |
15727 | break; |
15728 | case X86::BI__builtin_ia32_ktestzsi: |
15729 | IID = Intrinsic::x86_avx512_ktestz_d; |
15730 | break; |
15731 | case X86::BI__builtin_ia32_ktestcdi: |
15732 | IID = Intrinsic::x86_avx512_ktestc_q; |
15733 | break; |
15734 | case X86::BI__builtin_ia32_ktestzdi: |
15735 | IID = Intrinsic::x86_avx512_ktestz_q; |
15736 | break; |
15737 | } |
15738 | |
15739 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15740 | Value *LHS = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15741 | Value *RHS = getMaskVecValue(CGF&: *this, Mask: Ops[1], NumElts); |
15742 | Function *Intr = CGM.getIntrinsic(IID); |
15743 | return Builder.CreateCall(Callee: Intr, Args: {LHS, RHS}); |
15744 | } |
15745 | |
15746 | case X86::BI__builtin_ia32_kaddqi: |
15747 | case X86::BI__builtin_ia32_kaddhi: |
15748 | case X86::BI__builtin_ia32_kaddsi: |
15749 | case X86::BI__builtin_ia32_kadddi: { |
15750 | Intrinsic::ID IID; |
15751 | switch (BuiltinID) { |
15752 | default: llvm_unreachable("Unsupported intrinsic!" ); |
15753 | case X86::BI__builtin_ia32_kaddqi: |
15754 | IID = Intrinsic::x86_avx512_kadd_b; |
15755 | break; |
15756 | case X86::BI__builtin_ia32_kaddhi: |
15757 | IID = Intrinsic::x86_avx512_kadd_w; |
15758 | break; |
15759 | case X86::BI__builtin_ia32_kaddsi: |
15760 | IID = Intrinsic::x86_avx512_kadd_d; |
15761 | break; |
15762 | case X86::BI__builtin_ia32_kadddi: |
15763 | IID = Intrinsic::x86_avx512_kadd_q; |
15764 | break; |
15765 | } |
15766 | |
15767 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15768 | Value *LHS = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15769 | Value *RHS = getMaskVecValue(CGF&: *this, Mask: Ops[1], NumElts); |
15770 | Function *Intr = CGM.getIntrinsic(IID); |
15771 | Value *Res = Builder.CreateCall(Callee: Intr, Args: {LHS, RHS}); |
15772 | return Builder.CreateBitCast(V: Res, DestTy: Ops[0]->getType()); |
15773 | } |
15774 | case X86::BI__builtin_ia32_kandqi: |
15775 | case X86::BI__builtin_ia32_kandhi: |
15776 | case X86::BI__builtin_ia32_kandsi: |
15777 | case X86::BI__builtin_ia32_kanddi: |
15778 | return EmitX86MaskLogic(CGF&: *this, Opc: Instruction::And, Ops); |
15779 | case X86::BI__builtin_ia32_kandnqi: |
15780 | case X86::BI__builtin_ia32_kandnhi: |
15781 | case X86::BI__builtin_ia32_kandnsi: |
15782 | case X86::BI__builtin_ia32_kandndi: |
15783 | return EmitX86MaskLogic(CGF&: *this, Opc: Instruction::And, Ops, InvertLHS: true); |
15784 | case X86::BI__builtin_ia32_korqi: |
15785 | case X86::BI__builtin_ia32_korhi: |
15786 | case X86::BI__builtin_ia32_korsi: |
15787 | case X86::BI__builtin_ia32_kordi: |
15788 | return EmitX86MaskLogic(CGF&: *this, Opc: Instruction::Or, Ops); |
15789 | case X86::BI__builtin_ia32_kxnorqi: |
15790 | case X86::BI__builtin_ia32_kxnorhi: |
15791 | case X86::BI__builtin_ia32_kxnorsi: |
15792 | case X86::BI__builtin_ia32_kxnordi: |
15793 | return EmitX86MaskLogic(CGF&: *this, Opc: Instruction::Xor, Ops, InvertLHS: true); |
15794 | case X86::BI__builtin_ia32_kxorqi: |
15795 | case X86::BI__builtin_ia32_kxorhi: |
15796 | case X86::BI__builtin_ia32_kxorsi: |
15797 | case X86::BI__builtin_ia32_kxordi: |
15798 | return EmitX86MaskLogic(CGF&: *this, Opc: Instruction::Xor, Ops); |
15799 | case X86::BI__builtin_ia32_knotqi: |
15800 | case X86::BI__builtin_ia32_knothi: |
15801 | case X86::BI__builtin_ia32_knotsi: |
15802 | case X86::BI__builtin_ia32_knotdi: { |
15803 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15804 | Value *Res = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15805 | return Builder.CreateBitCast(V: Builder.CreateNot(V: Res), |
15806 | DestTy: Ops[0]->getType()); |
15807 | } |
15808 | case X86::BI__builtin_ia32_kmovb: |
15809 | case X86::BI__builtin_ia32_kmovw: |
15810 | case X86::BI__builtin_ia32_kmovd: |
15811 | case X86::BI__builtin_ia32_kmovq: { |
15812 | // Bitcast to vXi1 type and then back to integer. This gets the mask |
15813 | // register type into the IR, but might be optimized out depending on |
15814 | // what's around it. |
15815 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15816 | Value *Res = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15817 | return Builder.CreateBitCast(V: Res, DestTy: Ops[0]->getType()); |
15818 | } |
15819 | |
15820 | case X86::BI__builtin_ia32_kunpckdi: |
15821 | case X86::BI__builtin_ia32_kunpcksi: |
15822 | case X86::BI__builtin_ia32_kunpckhi: { |
15823 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
15824 | Value *LHS = getMaskVecValue(CGF&: *this, Mask: Ops[0], NumElts); |
15825 | Value *RHS = getMaskVecValue(CGF&: *this, Mask: Ops[1], NumElts); |
15826 | int Indices[64]; |
15827 | for (unsigned i = 0; i != NumElts; ++i) |
15828 | Indices[i] = i; |
15829 | |
15830 | // First extract half of each vector. This gives better codegen than |
15831 | // doing it in a single shuffle. |
15832 | LHS = Builder.CreateShuffleVector(V1: LHS, V2: LHS, Mask: ArrayRef(Indices, NumElts / 2)); |
15833 | RHS = Builder.CreateShuffleVector(V1: RHS, V2: RHS, Mask: ArrayRef(Indices, NumElts / 2)); |
15834 | // Concat the vectors. |
15835 | // NOTE: Operands are swapped to match the intrinsic definition. |
15836 | Value *Res = |
15837 | Builder.CreateShuffleVector(V1: RHS, V2: LHS, Mask: ArrayRef(Indices, NumElts)); |
15838 | return Builder.CreateBitCast(V: Res, DestTy: Ops[0]->getType()); |
15839 | } |
15840 | |
15841 | case X86::BI__builtin_ia32_vplzcntd_128: |
15842 | case X86::BI__builtin_ia32_vplzcntd_256: |
15843 | case X86::BI__builtin_ia32_vplzcntd_512: |
15844 | case X86::BI__builtin_ia32_vplzcntq_128: |
15845 | case X86::BI__builtin_ia32_vplzcntq_256: |
15846 | case X86::BI__builtin_ia32_vplzcntq_512: { |
15847 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Ops[0]->getType()); |
15848 | return Builder.CreateCall(Callee: F, Args: {Ops[0],Builder.getInt1(V: false)}); |
15849 | } |
15850 | case X86::BI__builtin_ia32_sqrtss: |
15851 | case X86::BI__builtin_ia32_sqrtsd: { |
15852 | Value *A = Builder.CreateExtractElement(Vec: Ops[0], Idx: (uint64_t)0); |
15853 | Function *F; |
15854 | if (Builder.getIsFPConstrained()) { |
15855 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
15856 | F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_sqrt, |
15857 | Tys: A->getType()); |
15858 | A = Builder.CreateConstrainedFPCall(Callee: F, Args: {A}); |
15859 | } else { |
15860 | F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: A->getType()); |
15861 | A = Builder.CreateCall(Callee: F, Args: {A}); |
15862 | } |
15863 | return Builder.CreateInsertElement(Vec: Ops[0], NewElt: A, Idx: (uint64_t)0); |
15864 | } |
15865 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
15866 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
15867 | case X86::BI__builtin_ia32_sqrtss_round_mask: { |
15868 | unsigned CC = cast<llvm::ConstantInt>(Val: Ops[4])->getZExtValue(); |
15869 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
15870 | // otherwise keep the intrinsic. |
15871 | if (CC != 4) { |
15872 | Intrinsic::ID IID; |
15873 | |
15874 | switch (BuiltinID) { |
15875 | default: |
15876 | llvm_unreachable("Unsupported intrinsic!" ); |
15877 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
15878 | IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh; |
15879 | break; |
15880 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
15881 | IID = Intrinsic::x86_avx512_mask_sqrt_sd; |
15882 | break; |
15883 | case X86::BI__builtin_ia32_sqrtss_round_mask: |
15884 | IID = Intrinsic::x86_avx512_mask_sqrt_ss; |
15885 | break; |
15886 | } |
15887 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
15888 | } |
15889 | Value *A = Builder.CreateExtractElement(Vec: Ops[1], Idx: (uint64_t)0); |
15890 | Function *F; |
15891 | if (Builder.getIsFPConstrained()) { |
15892 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
15893 | F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_sqrt, |
15894 | Tys: A->getType()); |
15895 | A = Builder.CreateConstrainedFPCall(Callee: F, Args: A); |
15896 | } else { |
15897 | F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: A->getType()); |
15898 | A = Builder.CreateCall(Callee: F, Args: A); |
15899 | } |
15900 | Value *Src = Builder.CreateExtractElement(Vec: Ops[2], Idx: (uint64_t)0); |
15901 | A = EmitX86ScalarSelect(CGF&: *this, Mask: Ops[3], Op0: A, Op1: Src); |
15902 | return Builder.CreateInsertElement(Vec: Ops[0], NewElt: A, Idx: (uint64_t)0); |
15903 | } |
15904 | case X86::BI__builtin_ia32_sqrtpd256: |
15905 | case X86::BI__builtin_ia32_sqrtpd: |
15906 | case X86::BI__builtin_ia32_sqrtps256: |
15907 | case X86::BI__builtin_ia32_sqrtps: |
15908 | case X86::BI__builtin_ia32_sqrtph256: |
15909 | case X86::BI__builtin_ia32_sqrtph: |
15910 | case X86::BI__builtin_ia32_sqrtph512: |
15911 | case X86::BI__builtin_ia32_sqrtps512: |
15912 | case X86::BI__builtin_ia32_sqrtpd512: { |
15913 | if (Ops.size() == 2) { |
15914 | unsigned CC = cast<llvm::ConstantInt>(Val: Ops[1])->getZExtValue(); |
15915 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
15916 | // otherwise keep the intrinsic. |
15917 | if (CC != 4) { |
15918 | Intrinsic::ID IID; |
15919 | |
15920 | switch (BuiltinID) { |
15921 | default: |
15922 | llvm_unreachable("Unsupported intrinsic!" ); |
15923 | case X86::BI__builtin_ia32_sqrtph512: |
15924 | IID = Intrinsic::x86_avx512fp16_sqrt_ph_512; |
15925 | break; |
15926 | case X86::BI__builtin_ia32_sqrtps512: |
15927 | IID = Intrinsic::x86_avx512_sqrt_ps_512; |
15928 | break; |
15929 | case X86::BI__builtin_ia32_sqrtpd512: |
15930 | IID = Intrinsic::x86_avx512_sqrt_pd_512; |
15931 | break; |
15932 | } |
15933 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
15934 | } |
15935 | } |
15936 | if (Builder.getIsFPConstrained()) { |
15937 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
15938 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_sqrt, |
15939 | Tys: Ops[0]->getType()); |
15940 | return Builder.CreateConstrainedFPCall(Callee: F, Args: Ops[0]); |
15941 | } else { |
15942 | Function *F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: Ops[0]->getType()); |
15943 | return Builder.CreateCall(Callee: F, Args: Ops[0]); |
15944 | } |
15945 | } |
15946 | |
15947 | case X86::BI__builtin_ia32_pmuludq128: |
15948 | case X86::BI__builtin_ia32_pmuludq256: |
15949 | case X86::BI__builtin_ia32_pmuludq512: |
15950 | return EmitX86Muldq(CGF&: *this, /*IsSigned*/false, Ops); |
15951 | |
15952 | case X86::BI__builtin_ia32_pmuldq128: |
15953 | case X86::BI__builtin_ia32_pmuldq256: |
15954 | case X86::BI__builtin_ia32_pmuldq512: |
15955 | return EmitX86Muldq(CGF&: *this, /*IsSigned*/true, Ops); |
15956 | |
15957 | case X86::BI__builtin_ia32_pternlogd512_mask: |
15958 | case X86::BI__builtin_ia32_pternlogq512_mask: |
15959 | case X86::BI__builtin_ia32_pternlogd128_mask: |
15960 | case X86::BI__builtin_ia32_pternlogd256_mask: |
15961 | case X86::BI__builtin_ia32_pternlogq128_mask: |
15962 | case X86::BI__builtin_ia32_pternlogq256_mask: |
15963 | return EmitX86Ternlog(CGF&: *this, /*ZeroMask*/false, Ops); |
15964 | |
15965 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
15966 | case X86::BI__builtin_ia32_pternlogq512_maskz: |
15967 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
15968 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
15969 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
15970 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
15971 | return EmitX86Ternlog(CGF&: *this, /*ZeroMask*/true, Ops); |
15972 | |
15973 | case X86::BI__builtin_ia32_vpshldd128: |
15974 | case X86::BI__builtin_ia32_vpshldd256: |
15975 | case X86::BI__builtin_ia32_vpshldd512: |
15976 | case X86::BI__builtin_ia32_vpshldq128: |
15977 | case X86::BI__builtin_ia32_vpshldq256: |
15978 | case X86::BI__builtin_ia32_vpshldq512: |
15979 | case X86::BI__builtin_ia32_vpshldw128: |
15980 | case X86::BI__builtin_ia32_vpshldw256: |
15981 | case X86::BI__builtin_ia32_vpshldw512: |
15982 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[0], Op1: Ops[1], Amt: Ops[2], IsRight: false); |
15983 | |
15984 | case X86::BI__builtin_ia32_vpshrdd128: |
15985 | case X86::BI__builtin_ia32_vpshrdd256: |
15986 | case X86::BI__builtin_ia32_vpshrdd512: |
15987 | case X86::BI__builtin_ia32_vpshrdq128: |
15988 | case X86::BI__builtin_ia32_vpshrdq256: |
15989 | case X86::BI__builtin_ia32_vpshrdq512: |
15990 | case X86::BI__builtin_ia32_vpshrdw128: |
15991 | case X86::BI__builtin_ia32_vpshrdw256: |
15992 | case X86::BI__builtin_ia32_vpshrdw512: |
15993 | // Ops 0 and 1 are swapped. |
15994 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[1], Op1: Ops[0], Amt: Ops[2], IsRight: true); |
15995 | |
15996 | case X86::BI__builtin_ia32_vpshldvd128: |
15997 | case X86::BI__builtin_ia32_vpshldvd256: |
15998 | case X86::BI__builtin_ia32_vpshldvd512: |
15999 | case X86::BI__builtin_ia32_vpshldvq128: |
16000 | case X86::BI__builtin_ia32_vpshldvq256: |
16001 | case X86::BI__builtin_ia32_vpshldvq512: |
16002 | case X86::BI__builtin_ia32_vpshldvw128: |
16003 | case X86::BI__builtin_ia32_vpshldvw256: |
16004 | case X86::BI__builtin_ia32_vpshldvw512: |
16005 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[0], Op1: Ops[1], Amt: Ops[2], IsRight: false); |
16006 | |
16007 | case X86::BI__builtin_ia32_vpshrdvd128: |
16008 | case X86::BI__builtin_ia32_vpshrdvd256: |
16009 | case X86::BI__builtin_ia32_vpshrdvd512: |
16010 | case X86::BI__builtin_ia32_vpshrdvq128: |
16011 | case X86::BI__builtin_ia32_vpshrdvq256: |
16012 | case X86::BI__builtin_ia32_vpshrdvq512: |
16013 | case X86::BI__builtin_ia32_vpshrdvw128: |
16014 | case X86::BI__builtin_ia32_vpshrdvw256: |
16015 | case X86::BI__builtin_ia32_vpshrdvw512: |
16016 | // Ops 0 and 1 are swapped. |
16017 | return EmitX86FunnelShift(CGF&: *this, Op0: Ops[1], Op1: Ops[0], Amt: Ops[2], IsRight: true); |
16018 | |
16019 | // Reductions |
16020 | case X86::BI__builtin_ia32_reduce_fadd_pd512: |
16021 | case X86::BI__builtin_ia32_reduce_fadd_ps512: |
16022 | case X86::BI__builtin_ia32_reduce_fadd_ph512: |
16023 | case X86::BI__builtin_ia32_reduce_fadd_ph256: |
16024 | case X86::BI__builtin_ia32_reduce_fadd_ph128: { |
16025 | Function *F = |
16026 | CGM.getIntrinsic(IID: Intrinsic::vector_reduce_fadd, Tys: Ops[1]->getType()); |
16027 | IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); |
16028 | Builder.getFastMathFlags().setAllowReassoc(); |
16029 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1]}); |
16030 | } |
16031 | case X86::BI__builtin_ia32_reduce_fmul_pd512: |
16032 | case X86::BI__builtin_ia32_reduce_fmul_ps512: |
16033 | case X86::BI__builtin_ia32_reduce_fmul_ph512: |
16034 | case X86::BI__builtin_ia32_reduce_fmul_ph256: |
16035 | case X86::BI__builtin_ia32_reduce_fmul_ph128: { |
16036 | Function *F = |
16037 | CGM.getIntrinsic(IID: Intrinsic::vector_reduce_fmul, Tys: Ops[1]->getType()); |
16038 | IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); |
16039 | Builder.getFastMathFlags().setAllowReassoc(); |
16040 | return Builder.CreateCall(Callee: F, Args: {Ops[0], Ops[1]}); |
16041 | } |
16042 | case X86::BI__builtin_ia32_reduce_fmax_pd512: |
16043 | case X86::BI__builtin_ia32_reduce_fmax_ps512: |
16044 | case X86::BI__builtin_ia32_reduce_fmax_ph512: |
16045 | case X86::BI__builtin_ia32_reduce_fmax_ph256: |
16046 | case X86::BI__builtin_ia32_reduce_fmax_ph128: { |
16047 | Function *F = |
16048 | CGM.getIntrinsic(IID: Intrinsic::vector_reduce_fmax, Tys: Ops[0]->getType()); |
16049 | IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); |
16050 | Builder.getFastMathFlags().setNoNaNs(); |
16051 | return Builder.CreateCall(Callee: F, Args: {Ops[0]}); |
16052 | } |
16053 | case X86::BI__builtin_ia32_reduce_fmin_pd512: |
16054 | case X86::BI__builtin_ia32_reduce_fmin_ps512: |
16055 | case X86::BI__builtin_ia32_reduce_fmin_ph512: |
16056 | case X86::BI__builtin_ia32_reduce_fmin_ph256: |
16057 | case X86::BI__builtin_ia32_reduce_fmin_ph128: { |
16058 | Function *F = |
16059 | CGM.getIntrinsic(IID: Intrinsic::vector_reduce_fmin, Tys: Ops[0]->getType()); |
16060 | IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); |
16061 | Builder.getFastMathFlags().setNoNaNs(); |
16062 | return Builder.CreateCall(Callee: F, Args: {Ops[0]}); |
16063 | } |
16064 | |
16065 | case X86::BI__builtin_ia32_rdrand16_step: |
16066 | case X86::BI__builtin_ia32_rdrand32_step: |
16067 | case X86::BI__builtin_ia32_rdrand64_step: |
16068 | case X86::BI__builtin_ia32_rdseed16_step: |
16069 | case X86::BI__builtin_ia32_rdseed32_step: |
16070 | case X86::BI__builtin_ia32_rdseed64_step: { |
16071 | Intrinsic::ID ID; |
16072 | switch (BuiltinID) { |
16073 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16074 | case X86::BI__builtin_ia32_rdrand16_step: |
16075 | ID = Intrinsic::x86_rdrand_16; |
16076 | break; |
16077 | case X86::BI__builtin_ia32_rdrand32_step: |
16078 | ID = Intrinsic::x86_rdrand_32; |
16079 | break; |
16080 | case X86::BI__builtin_ia32_rdrand64_step: |
16081 | ID = Intrinsic::x86_rdrand_64; |
16082 | break; |
16083 | case X86::BI__builtin_ia32_rdseed16_step: |
16084 | ID = Intrinsic::x86_rdseed_16; |
16085 | break; |
16086 | case X86::BI__builtin_ia32_rdseed32_step: |
16087 | ID = Intrinsic::x86_rdseed_32; |
16088 | break; |
16089 | case X86::BI__builtin_ia32_rdseed64_step: |
16090 | ID = Intrinsic::x86_rdseed_64; |
16091 | break; |
16092 | } |
16093 | |
16094 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID)); |
16095 | Builder.CreateDefaultAlignedStore(Val: Builder.CreateExtractValue(Agg: Call, Idxs: 0), |
16096 | Addr: Ops[0]); |
16097 | return Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
16098 | } |
16099 | case X86::BI__builtin_ia32_addcarryx_u32: |
16100 | case X86::BI__builtin_ia32_addcarryx_u64: |
16101 | case X86::BI__builtin_ia32_subborrow_u32: |
16102 | case X86::BI__builtin_ia32_subborrow_u64: { |
16103 | Intrinsic::ID IID; |
16104 | switch (BuiltinID) { |
16105 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16106 | case X86::BI__builtin_ia32_addcarryx_u32: |
16107 | IID = Intrinsic::x86_addcarry_32; |
16108 | break; |
16109 | case X86::BI__builtin_ia32_addcarryx_u64: |
16110 | IID = Intrinsic::x86_addcarry_64; |
16111 | break; |
16112 | case X86::BI__builtin_ia32_subborrow_u32: |
16113 | IID = Intrinsic::x86_subborrow_32; |
16114 | break; |
16115 | case X86::BI__builtin_ia32_subborrow_u64: |
16116 | IID = Intrinsic::x86_subborrow_64; |
16117 | break; |
16118 | } |
16119 | |
16120 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), |
16121 | Args: { Ops[0], Ops[1], Ops[2] }); |
16122 | Builder.CreateDefaultAlignedStore(Val: Builder.CreateExtractValue(Agg: Call, Idxs: 1), |
16123 | Addr: Ops[3]); |
16124 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16125 | } |
16126 | |
16127 | case X86::BI__builtin_ia32_fpclassps128_mask: |
16128 | case X86::BI__builtin_ia32_fpclassps256_mask: |
16129 | case X86::BI__builtin_ia32_fpclassps512_mask: |
16130 | case X86::BI__builtin_ia32_fpclassph128_mask: |
16131 | case X86::BI__builtin_ia32_fpclassph256_mask: |
16132 | case X86::BI__builtin_ia32_fpclassph512_mask: |
16133 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
16134 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
16135 | case X86::BI__builtin_ia32_fpclasspd512_mask: { |
16136 | unsigned NumElts = |
16137 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
16138 | Value *MaskIn = Ops[2]; |
16139 | Ops.erase(CI: &Ops[2]); |
16140 | |
16141 | Intrinsic::ID ID; |
16142 | switch (BuiltinID) { |
16143 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16144 | case X86::BI__builtin_ia32_fpclassph128_mask: |
16145 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_128; |
16146 | break; |
16147 | case X86::BI__builtin_ia32_fpclassph256_mask: |
16148 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_256; |
16149 | break; |
16150 | case X86::BI__builtin_ia32_fpclassph512_mask: |
16151 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_512; |
16152 | break; |
16153 | case X86::BI__builtin_ia32_fpclassps128_mask: |
16154 | ID = Intrinsic::x86_avx512_fpclass_ps_128; |
16155 | break; |
16156 | case X86::BI__builtin_ia32_fpclassps256_mask: |
16157 | ID = Intrinsic::x86_avx512_fpclass_ps_256; |
16158 | break; |
16159 | case X86::BI__builtin_ia32_fpclassps512_mask: |
16160 | ID = Intrinsic::x86_avx512_fpclass_ps_512; |
16161 | break; |
16162 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
16163 | ID = Intrinsic::x86_avx512_fpclass_pd_128; |
16164 | break; |
16165 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
16166 | ID = Intrinsic::x86_avx512_fpclass_pd_256; |
16167 | break; |
16168 | case X86::BI__builtin_ia32_fpclasspd512_mask: |
16169 | ID = Intrinsic::x86_avx512_fpclass_pd_512; |
16170 | break; |
16171 | } |
16172 | |
16173 | Value *Fpclass = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops); |
16174 | return EmitX86MaskedCompareResult(CGF&: *this, Cmp: Fpclass, NumElts, MaskIn); |
16175 | } |
16176 | |
16177 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
16178 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
16179 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
16180 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
16181 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
16182 | case X86::BI__builtin_ia32_vp2intersect_d_128: { |
16183 | unsigned NumElts = |
16184 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
16185 | Intrinsic::ID ID; |
16186 | |
16187 | switch (BuiltinID) { |
16188 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16189 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
16190 | ID = Intrinsic::x86_avx512_vp2intersect_q_512; |
16191 | break; |
16192 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
16193 | ID = Intrinsic::x86_avx512_vp2intersect_q_256; |
16194 | break; |
16195 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
16196 | ID = Intrinsic::x86_avx512_vp2intersect_q_128; |
16197 | break; |
16198 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
16199 | ID = Intrinsic::x86_avx512_vp2intersect_d_512; |
16200 | break; |
16201 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
16202 | ID = Intrinsic::x86_avx512_vp2intersect_d_256; |
16203 | break; |
16204 | case X86::BI__builtin_ia32_vp2intersect_d_128: |
16205 | ID = Intrinsic::x86_avx512_vp2intersect_d_128; |
16206 | break; |
16207 | } |
16208 | |
16209 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: {Ops[0], Ops[1]}); |
16210 | Value *Result = Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16211 | Result = EmitX86MaskedCompareResult(CGF&: *this, Cmp: Result, NumElts, MaskIn: nullptr); |
16212 | Builder.CreateDefaultAlignedStore(Val: Result, Addr: Ops[2]); |
16213 | |
16214 | Result = Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
16215 | Result = EmitX86MaskedCompareResult(CGF&: *this, Cmp: Result, NumElts, MaskIn: nullptr); |
16216 | return Builder.CreateDefaultAlignedStore(Val: Result, Addr: Ops[3]); |
16217 | } |
16218 | |
16219 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
16220 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
16221 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
16222 | Intrinsic::ID ID; |
16223 | switch (BuiltinID) { |
16224 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16225 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
16226 | ID = Intrinsic::x86_avx512_pmultishift_qb_128; |
16227 | break; |
16228 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
16229 | ID = Intrinsic::x86_avx512_pmultishift_qb_256; |
16230 | break; |
16231 | case X86::BI__builtin_ia32_vpmultishiftqb512: |
16232 | ID = Intrinsic::x86_avx512_pmultishift_qb_512; |
16233 | break; |
16234 | } |
16235 | |
16236 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops); |
16237 | } |
16238 | |
16239 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
16240 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
16241 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
16242 | unsigned NumElts = |
16243 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
16244 | Value *MaskIn = Ops[2]; |
16245 | Ops.erase(CI: &Ops[2]); |
16246 | |
16247 | Intrinsic::ID ID; |
16248 | switch (BuiltinID) { |
16249 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16250 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
16251 | ID = Intrinsic::x86_avx512_vpshufbitqmb_128; |
16252 | break; |
16253 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
16254 | ID = Intrinsic::x86_avx512_vpshufbitqmb_256; |
16255 | break; |
16256 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: |
16257 | ID = Intrinsic::x86_avx512_vpshufbitqmb_512; |
16258 | break; |
16259 | } |
16260 | |
16261 | Value *Shufbit = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops); |
16262 | return EmitX86MaskedCompareResult(CGF&: *this, Cmp: Shufbit, NumElts, MaskIn); |
16263 | } |
16264 | |
16265 | // packed comparison intrinsics |
16266 | case X86::BI__builtin_ia32_cmpeqps: |
16267 | case X86::BI__builtin_ia32_cmpeqpd: |
16268 | return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false); |
16269 | case X86::BI__builtin_ia32_cmpltps: |
16270 | case X86::BI__builtin_ia32_cmpltpd: |
16271 | return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true); |
16272 | case X86::BI__builtin_ia32_cmpleps: |
16273 | case X86::BI__builtin_ia32_cmplepd: |
16274 | return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true); |
16275 | case X86::BI__builtin_ia32_cmpunordps: |
16276 | case X86::BI__builtin_ia32_cmpunordpd: |
16277 | return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false); |
16278 | case X86::BI__builtin_ia32_cmpneqps: |
16279 | case X86::BI__builtin_ia32_cmpneqpd: |
16280 | return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false); |
16281 | case X86::BI__builtin_ia32_cmpnltps: |
16282 | case X86::BI__builtin_ia32_cmpnltpd: |
16283 | return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true); |
16284 | case X86::BI__builtin_ia32_cmpnleps: |
16285 | case X86::BI__builtin_ia32_cmpnlepd: |
16286 | return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true); |
16287 | case X86::BI__builtin_ia32_cmpordps: |
16288 | case X86::BI__builtin_ia32_cmpordpd: |
16289 | return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false); |
16290 | case X86::BI__builtin_ia32_cmpph128_mask: |
16291 | case X86::BI__builtin_ia32_cmpph256_mask: |
16292 | case X86::BI__builtin_ia32_cmpph512_mask: |
16293 | case X86::BI__builtin_ia32_cmpps128_mask: |
16294 | case X86::BI__builtin_ia32_cmpps256_mask: |
16295 | case X86::BI__builtin_ia32_cmpps512_mask: |
16296 | case X86::BI__builtin_ia32_cmppd128_mask: |
16297 | case X86::BI__builtin_ia32_cmppd256_mask: |
16298 | case X86::BI__builtin_ia32_cmppd512_mask: |
16299 | IsMaskFCmp = true; |
16300 | [[fallthrough]]; |
16301 | case X86::BI__builtin_ia32_cmpps: |
16302 | case X86::BI__builtin_ia32_cmpps256: |
16303 | case X86::BI__builtin_ia32_cmppd: |
16304 | case X86::BI__builtin_ia32_cmppd256: { |
16305 | // Lowering vector comparisons to fcmp instructions, while |
16306 | // ignoring signalling behaviour requested |
16307 | // ignoring rounding mode requested |
16308 | // This is only possible if fp-model is not strict and FENV_ACCESS is off. |
16309 | |
16310 | // The third argument is the comparison condition, and integer in the |
16311 | // range [0, 31] |
16312 | unsigned CC = cast<llvm::ConstantInt>(Val: Ops[2])->getZExtValue() & 0x1f; |
16313 | |
16314 | // Lowering to IR fcmp instruction. |
16315 | // Ignoring requested signaling behaviour, |
16316 | // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT. |
16317 | FCmpInst::Predicate Pred; |
16318 | bool IsSignaling; |
16319 | // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling |
16320 | // behavior is inverted. We'll handle that after the switch. |
16321 | switch (CC & 0xf) { |
16322 | case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break; |
16323 | case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break; |
16324 | case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break; |
16325 | case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break; |
16326 | case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break; |
16327 | case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break; |
16328 | case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break; |
16329 | case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break; |
16330 | case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break; |
16331 | case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break; |
16332 | case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break; |
16333 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break; |
16334 | case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break; |
16335 | case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break; |
16336 | case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break; |
16337 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break; |
16338 | default: llvm_unreachable("Unhandled CC" ); |
16339 | } |
16340 | |
16341 | // Invert the signalling behavior for 16-31. |
16342 | if (CC & 0x10) |
16343 | IsSignaling = !IsSignaling; |
16344 | |
16345 | // If the predicate is true or false and we're using constrained intrinsics, |
16346 | // we don't have a compare intrinsic we can use. Just use the legacy X86 |
16347 | // specific intrinsic. |
16348 | // If the intrinsic is mask enabled and we're using constrained intrinsics, |
16349 | // use the legacy X86 specific intrinsic. |
16350 | if (Builder.getIsFPConstrained() && |
16351 | (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE || |
16352 | IsMaskFCmp)) { |
16353 | |
16354 | Intrinsic::ID IID; |
16355 | switch (BuiltinID) { |
16356 | default: llvm_unreachable("Unexpected builtin" ); |
16357 | case X86::BI__builtin_ia32_cmpps: |
16358 | IID = Intrinsic::x86_sse_cmp_ps; |
16359 | break; |
16360 | case X86::BI__builtin_ia32_cmpps256: |
16361 | IID = Intrinsic::x86_avx_cmp_ps_256; |
16362 | break; |
16363 | case X86::BI__builtin_ia32_cmppd: |
16364 | IID = Intrinsic::x86_sse2_cmp_pd; |
16365 | break; |
16366 | case X86::BI__builtin_ia32_cmppd256: |
16367 | IID = Intrinsic::x86_avx_cmp_pd_256; |
16368 | break; |
16369 | case X86::BI__builtin_ia32_cmpph128_mask: |
16370 | IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_128; |
16371 | break; |
16372 | case X86::BI__builtin_ia32_cmpph256_mask: |
16373 | IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_256; |
16374 | break; |
16375 | case X86::BI__builtin_ia32_cmpph512_mask: |
16376 | IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_512; |
16377 | break; |
16378 | case X86::BI__builtin_ia32_cmpps512_mask: |
16379 | IID = Intrinsic::x86_avx512_mask_cmp_ps_512; |
16380 | break; |
16381 | case X86::BI__builtin_ia32_cmppd512_mask: |
16382 | IID = Intrinsic::x86_avx512_mask_cmp_pd_512; |
16383 | break; |
16384 | case X86::BI__builtin_ia32_cmpps128_mask: |
16385 | IID = Intrinsic::x86_avx512_mask_cmp_ps_128; |
16386 | break; |
16387 | case X86::BI__builtin_ia32_cmpps256_mask: |
16388 | IID = Intrinsic::x86_avx512_mask_cmp_ps_256; |
16389 | break; |
16390 | case X86::BI__builtin_ia32_cmppd128_mask: |
16391 | IID = Intrinsic::x86_avx512_mask_cmp_pd_128; |
16392 | break; |
16393 | case X86::BI__builtin_ia32_cmppd256_mask: |
16394 | IID = Intrinsic::x86_avx512_mask_cmp_pd_256; |
16395 | break; |
16396 | } |
16397 | |
16398 | Function *Intr = CGM.getIntrinsic(IID); |
16399 | if (IsMaskFCmp) { |
16400 | unsigned NumElts = |
16401 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
16402 | Ops[3] = getMaskVecValue(CGF&: *this, Mask: Ops[3], NumElts); |
16403 | Value *Cmp = Builder.CreateCall(Callee: Intr, Args: Ops); |
16404 | return EmitX86MaskedCompareResult(CGF&: *this, Cmp, NumElts, MaskIn: nullptr); |
16405 | } |
16406 | |
16407 | return Builder.CreateCall(Callee: Intr, Args: Ops); |
16408 | } |
16409 | |
16410 | // Builtins without the _mask suffix return a vector of integers |
16411 | // of the same width as the input vectors |
16412 | if (IsMaskFCmp) { |
16413 | // We ignore SAE if strict FP is disabled. We only keep precise |
16414 | // exception behavior under strict FP. |
16415 | // NOTE: If strict FP does ever go through here a CGFPOptionsRAII |
16416 | // object will be required. |
16417 | unsigned NumElts = |
16418 | cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements(); |
16419 | Value *Cmp; |
16420 | if (IsSignaling) |
16421 | Cmp = Builder.CreateFCmpS(P: Pred, LHS: Ops[0], RHS: Ops[1]); |
16422 | else |
16423 | Cmp = Builder.CreateFCmp(P: Pred, LHS: Ops[0], RHS: Ops[1]); |
16424 | return EmitX86MaskedCompareResult(CGF&: *this, Cmp, NumElts, MaskIn: Ops[3]); |
16425 | } |
16426 | |
16427 | return getVectorFCmpIR(Pred, IsSignaling); |
16428 | } |
16429 | |
16430 | // SSE scalar comparison intrinsics |
16431 | case X86::BI__builtin_ia32_cmpeqss: |
16432 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); |
16433 | case X86::BI__builtin_ia32_cmpltss: |
16434 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); |
16435 | case X86::BI__builtin_ia32_cmpless: |
16436 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); |
16437 | case X86::BI__builtin_ia32_cmpunordss: |
16438 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); |
16439 | case X86::BI__builtin_ia32_cmpneqss: |
16440 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); |
16441 | case X86::BI__builtin_ia32_cmpnltss: |
16442 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); |
16443 | case X86::BI__builtin_ia32_cmpnless: |
16444 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); |
16445 | case X86::BI__builtin_ia32_cmpordss: |
16446 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); |
16447 | case X86::BI__builtin_ia32_cmpeqsd: |
16448 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); |
16449 | case X86::BI__builtin_ia32_cmpltsd: |
16450 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); |
16451 | case X86::BI__builtin_ia32_cmplesd: |
16452 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); |
16453 | case X86::BI__builtin_ia32_cmpunordsd: |
16454 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); |
16455 | case X86::BI__builtin_ia32_cmpneqsd: |
16456 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); |
16457 | case X86::BI__builtin_ia32_cmpnltsd: |
16458 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); |
16459 | case X86::BI__builtin_ia32_cmpnlesd: |
16460 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); |
16461 | case X86::BI__builtin_ia32_cmpordsd: |
16462 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); |
16463 | |
16464 | // f16c half2float intrinsics |
16465 | case X86::BI__builtin_ia32_vcvtph2ps: |
16466 | case X86::BI__builtin_ia32_vcvtph2ps256: |
16467 | case X86::BI__builtin_ia32_vcvtph2ps_mask: |
16468 | case X86::BI__builtin_ia32_vcvtph2ps256_mask: |
16469 | case X86::BI__builtin_ia32_vcvtph2ps512_mask: { |
16470 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
16471 | return EmitX86CvtF16ToFloatExpr(CGF&: *this, Ops, DstTy: ConvertType(T: E->getType())); |
16472 | } |
16473 | |
16474 | // AVX512 bf16 intrinsics |
16475 | case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { |
16476 | Ops[2] = getMaskVecValue( |
16477 | CGF&: *this, Mask: Ops[2], |
16478 | NumElts: cast<llvm::FixedVectorType>(Val: Ops[0]->getType())->getNumElements()); |
16479 | Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; |
16480 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
16481 | } |
16482 | case X86::BI__builtin_ia32_cvtsbf162ss_32: |
16483 | return Builder.CreateFPExt(V: Ops[0], DestTy: Builder.getFloatTy()); |
16484 | |
16485 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
16486 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { |
16487 | Intrinsic::ID IID; |
16488 | switch (BuiltinID) { |
16489 | default: llvm_unreachable("Unsupported intrinsic!" ); |
16490 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
16491 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; |
16492 | break; |
16493 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: |
16494 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512; |
16495 | break; |
16496 | } |
16497 | Value *Res = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops[0]); |
16498 | return EmitX86Select(CGF&: *this, Mask: Ops[2], Op0: Res, Op1: Ops[1]); |
16499 | } |
16500 | |
16501 | case X86::BI__cpuid: |
16502 | case X86::BI__cpuidex: { |
16503 | Value *FuncId = EmitScalarExpr(E: E->getArg(Arg: 1)); |
16504 | Value *SubFuncId = BuiltinID == X86::BI__cpuidex |
16505 | ? EmitScalarExpr(E: E->getArg(Arg: 2)) |
16506 | : llvm::ConstantInt::get(Ty: Int32Ty, V: 0); |
16507 | |
16508 | llvm::StructType *CpuidRetTy = |
16509 | llvm::StructType::get(elt1: Int32Ty, elts: Int32Ty, elts: Int32Ty, elts: Int32Ty); |
16510 | llvm::FunctionType *FTy = |
16511 | llvm::FunctionType::get(Result: CpuidRetTy, Params: {Int32Ty, Int32Ty}, isVarArg: false); |
16512 | |
16513 | StringRef Asm, Constraints; |
16514 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) { |
16515 | Asm = "cpuid" ; |
16516 | Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}" ; |
16517 | } else { |
16518 | // x86-64 uses %rbx as the base register, so preserve it. |
16519 | Asm = "xchgq %rbx, ${1:q}\n" |
16520 | "cpuid\n" |
16521 | "xchgq %rbx, ${1:q}" ; |
16522 | Constraints = "={ax},=r,={cx},={dx},0,2" ; |
16523 | } |
16524 | |
16525 | llvm::InlineAsm *IA = llvm::InlineAsm::get(Ty: FTy, AsmString: Asm, Constraints, |
16526 | /*hasSideEffects=*/false); |
16527 | Value *IACall = Builder.CreateCall(Callee: IA, Args: {FuncId, SubFuncId}); |
16528 | Value *BasePtr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
16529 | Value *Store = nullptr; |
16530 | for (unsigned i = 0; i < 4; i++) { |
16531 | Value * = Builder.CreateExtractValue(Agg: IACall, Idxs: i); |
16532 | Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Ty: Int32Ty, Ptr: BasePtr, Idx0: i); |
16533 | Store = Builder.CreateAlignedStore(Val: Extracted, Addr: StorePtr, Align: getIntAlign()); |
16534 | } |
16535 | |
16536 | // Return the last store instruction to signal that we have emitted the |
16537 | // the intrinsic. |
16538 | return Store; |
16539 | } |
16540 | |
16541 | case X86::BI__emul: |
16542 | case X86::BI__emulu: { |
16543 | llvm::Type *Int64Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 64); |
16544 | bool isSigned = (BuiltinID == X86::BI__emul); |
16545 | Value *LHS = Builder.CreateIntCast(V: Ops[0], DestTy: Int64Ty, isSigned); |
16546 | Value *RHS = Builder.CreateIntCast(V: Ops[1], DestTy: Int64Ty, isSigned); |
16547 | return Builder.CreateMul(LHS, RHS, Name: "" , HasNUW: !isSigned, HasNSW: isSigned); |
16548 | } |
16549 | case X86::BI__mulh: |
16550 | case X86::BI__umulh: |
16551 | case X86::BI_mul128: |
16552 | case X86::BI_umul128: { |
16553 | llvm::Type *ResType = ConvertType(T: E->getType()); |
16554 | llvm::Type *Int128Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
16555 | |
16556 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); |
16557 | Value *LHS = Builder.CreateIntCast(V: Ops[0], DestTy: Int128Ty, isSigned: IsSigned); |
16558 | Value *RHS = Builder.CreateIntCast(V: Ops[1], DestTy: Int128Ty, isSigned: IsSigned); |
16559 | |
16560 | Value *MulResult, *HigherBits; |
16561 | if (IsSigned) { |
16562 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
16563 | HigherBits = Builder.CreateAShr(LHS: MulResult, RHS: 64); |
16564 | } else { |
16565 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
16566 | HigherBits = Builder.CreateLShr(LHS: MulResult, RHS: 64); |
16567 | } |
16568 | HigherBits = Builder.CreateIntCast(V: HigherBits, DestTy: ResType, isSigned: IsSigned); |
16569 | |
16570 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) |
16571 | return HigherBits; |
16572 | |
16573 | Address HighBitsAddress = EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)); |
16574 | Builder.CreateStore(Val: HigherBits, Addr: HighBitsAddress); |
16575 | return Builder.CreateIntCast(V: MulResult, DestTy: ResType, isSigned: IsSigned); |
16576 | } |
16577 | |
16578 | case X86::BI__faststorefence: { |
16579 | return Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent, |
16580 | SSID: llvm::SyncScope::System); |
16581 | } |
16582 | case X86::BI__shiftleft128: |
16583 | case X86::BI__shiftright128: { |
16584 | llvm::Function *F = CGM.getIntrinsic( |
16585 | IID: BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr, |
16586 | Tys: Int64Ty); |
16587 | // Flip low/high ops and zero-extend amount to matching type. |
16588 | // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt) |
16589 | // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt) |
16590 | std::swap(a&: Ops[0], b&: Ops[1]); |
16591 | Ops[2] = Builder.CreateZExt(V: Ops[2], DestTy: Int64Ty); |
16592 | return Builder.CreateCall(Callee: F, Args: Ops); |
16593 | } |
16594 | case X86::BI_ReadWriteBarrier: |
16595 | case X86::BI_ReadBarrier: |
16596 | case X86::BI_WriteBarrier: { |
16597 | return Builder.CreateFence(Ordering: llvm::AtomicOrdering::SequentiallyConsistent, |
16598 | SSID: llvm::SyncScope::SingleThread); |
16599 | } |
16600 | |
16601 | case X86::BI_AddressOfReturnAddress: { |
16602 | Function *F = |
16603 | CGM.getIntrinsic(IID: Intrinsic::addressofreturnaddress, Tys: AllocaInt8PtrTy); |
16604 | return Builder.CreateCall(Callee: F); |
16605 | } |
16606 | case X86::BI__stosb: { |
16607 | // We treat __stosb as a volatile memset - it may not generate "rep stosb" |
16608 | // instruction, but it will create a memset that won't be optimized away. |
16609 | return Builder.CreateMemSet(Ptr: Ops[0], Val: Ops[1], Size: Ops[2], Align: Align(1), isVolatile: true); |
16610 | } |
16611 | case X86::BI__ud2: |
16612 | // llvm.trap makes a ud2a instruction on x86. |
16613 | return EmitTrapCall(IntrID: Intrinsic::trap); |
16614 | case X86::BI__int2c: { |
16615 | // This syscall signals a driver assertion failure in x86 NT kernels. |
16616 | llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, isVarArg: false); |
16617 | llvm::InlineAsm *IA = |
16618 | llvm::InlineAsm::get(Ty: FTy, AsmString: "int $$0x2c" , Constraints: "" , /*hasSideEffects=*/true); |
16619 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
16620 | C&: getLLVMContext(), Index: llvm::AttributeList::FunctionIndex, |
16621 | Kinds: llvm::Attribute::NoReturn); |
16622 | llvm::CallInst *CI = Builder.CreateCall(Callee: IA); |
16623 | CI->setAttributes(NoReturnAttr); |
16624 | return CI; |
16625 | } |
16626 | case X86::BI__readfsbyte: |
16627 | case X86::BI__readfsword: |
16628 | case X86::BI__readfsdword: |
16629 | case X86::BI__readfsqword: { |
16630 | llvm::Type *IntTy = ConvertType(T: E->getType()); |
16631 | Value *Ptr = Builder.CreateIntToPtr( |
16632 | V: Ops[0], DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: 257)); |
16633 | LoadInst *Load = Builder.CreateAlignedLoad( |
16634 | Ty: IntTy, Addr: Ptr, Align: getContext().getTypeAlignInChars(T: E->getType())); |
16635 | Load->setVolatile(true); |
16636 | return Load; |
16637 | } |
16638 | case X86::BI__readgsbyte: |
16639 | case X86::BI__readgsword: |
16640 | case X86::BI__readgsdword: |
16641 | case X86::BI__readgsqword: { |
16642 | llvm::Type *IntTy = ConvertType(T: E->getType()); |
16643 | Value *Ptr = Builder.CreateIntToPtr( |
16644 | V: Ops[0], DestTy: llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: 256)); |
16645 | LoadInst *Load = Builder.CreateAlignedLoad( |
16646 | Ty: IntTy, Addr: Ptr, Align: getContext().getTypeAlignInChars(T: E->getType())); |
16647 | Load->setVolatile(true); |
16648 | return Load; |
16649 | } |
16650 | case X86::BI__builtin_ia32_encodekey128_u32: { |
16651 | Intrinsic::ID IID = Intrinsic::x86_encodekey128; |
16652 | |
16653 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {Ops[0], Ops[1]}); |
16654 | |
16655 | for (int i = 0; i < 3; ++i) { |
16656 | Value * = Builder.CreateExtractValue(Agg: Call, Idxs: i + 1); |
16657 | Value *Ptr = Builder.CreateConstGEP1_32(Ty: Int8Ty, Ptr: Ops[2], Idx0: i * 16); |
16658 | Builder.CreateAlignedStore(Val: Extract, Ptr, Align: Align(1)); |
16659 | } |
16660 | |
16661 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16662 | } |
16663 | case X86::BI__builtin_ia32_encodekey256_u32: { |
16664 | Intrinsic::ID IID = Intrinsic::x86_encodekey256; |
16665 | |
16666 | Value *Call = |
16667 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {Ops[0], Ops[1], Ops[2]}); |
16668 | |
16669 | for (int i = 0; i < 4; ++i) { |
16670 | Value * = Builder.CreateExtractValue(Agg: Call, Idxs: i + 1); |
16671 | Value *Ptr = Builder.CreateConstGEP1_32(Ty: Int8Ty, Ptr: Ops[3], Idx0: i * 16); |
16672 | Builder.CreateAlignedStore(Val: Extract, Ptr, Align: Align(1)); |
16673 | } |
16674 | |
16675 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16676 | } |
16677 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
16678 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
16679 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
16680 | case X86::BI__builtin_ia32_aesdec256kl_u8: { |
16681 | Intrinsic::ID IID; |
16682 | StringRef BlockName; |
16683 | switch (BuiltinID) { |
16684 | default: |
16685 | llvm_unreachable("Unexpected builtin" ); |
16686 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
16687 | IID = Intrinsic::x86_aesenc128kl; |
16688 | BlockName = "aesenc128kl" ; |
16689 | break; |
16690 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
16691 | IID = Intrinsic::x86_aesdec128kl; |
16692 | BlockName = "aesdec128kl" ; |
16693 | break; |
16694 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
16695 | IID = Intrinsic::x86_aesenc256kl; |
16696 | BlockName = "aesenc256kl" ; |
16697 | break; |
16698 | case X86::BI__builtin_ia32_aesdec256kl_u8: |
16699 | IID = Intrinsic::x86_aesdec256kl; |
16700 | BlockName = "aesdec256kl" ; |
16701 | break; |
16702 | } |
16703 | |
16704 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: {Ops[1], Ops[2]}); |
16705 | |
16706 | BasicBlock *NoError = |
16707 | createBasicBlock(name: BlockName + "_no_error" , parent: this->CurFn); |
16708 | BasicBlock *Error = createBasicBlock(name: BlockName + "_error" , parent: this->CurFn); |
16709 | BasicBlock *End = createBasicBlock(name: BlockName + "_end" , parent: this->CurFn); |
16710 | |
16711 | Value *Ret = Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16712 | Value *Succ = Builder.CreateTrunc(V: Ret, DestTy: Builder.getInt1Ty()); |
16713 | Value *Out = Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
16714 | Builder.CreateCondBr(Cond: Succ, True: NoError, False: Error); |
16715 | |
16716 | Builder.SetInsertPoint(NoError); |
16717 | Builder.CreateDefaultAlignedStore(Val: Out, Addr: Ops[0]); |
16718 | Builder.CreateBr(Dest: End); |
16719 | |
16720 | Builder.SetInsertPoint(Error); |
16721 | Constant *Zero = llvm::Constant::getNullValue(Ty: Out->getType()); |
16722 | Builder.CreateDefaultAlignedStore(Val: Zero, Addr: Ops[0]); |
16723 | Builder.CreateBr(Dest: End); |
16724 | |
16725 | Builder.SetInsertPoint(End); |
16726 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16727 | } |
16728 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
16729 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
16730 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
16731 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: { |
16732 | Intrinsic::ID IID; |
16733 | StringRef BlockName; |
16734 | switch (BuiltinID) { |
16735 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
16736 | IID = Intrinsic::x86_aesencwide128kl; |
16737 | BlockName = "aesencwide128kl" ; |
16738 | break; |
16739 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
16740 | IID = Intrinsic::x86_aesdecwide128kl; |
16741 | BlockName = "aesdecwide128kl" ; |
16742 | break; |
16743 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
16744 | IID = Intrinsic::x86_aesencwide256kl; |
16745 | BlockName = "aesencwide256kl" ; |
16746 | break; |
16747 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: |
16748 | IID = Intrinsic::x86_aesdecwide256kl; |
16749 | BlockName = "aesdecwide256kl" ; |
16750 | break; |
16751 | } |
16752 | |
16753 | llvm::Type *Ty = FixedVectorType::get(ElementType: Builder.getInt64Ty(), NumElts: 2); |
16754 | Value *InOps[9]; |
16755 | InOps[0] = Ops[2]; |
16756 | for (int i = 0; i != 8; ++i) { |
16757 | Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ptr: Ops[1], Idx0: i); |
16758 | InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align: Align(16)); |
16759 | } |
16760 | |
16761 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: InOps); |
16762 | |
16763 | BasicBlock *NoError = |
16764 | createBasicBlock(name: BlockName + "_no_error" , parent: this->CurFn); |
16765 | BasicBlock *Error = createBasicBlock(name: BlockName + "_error" , parent: this->CurFn); |
16766 | BasicBlock *End = createBasicBlock(name: BlockName + "_end" , parent: this->CurFn); |
16767 | |
16768 | Value *Ret = Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16769 | Value *Succ = Builder.CreateTrunc(V: Ret, DestTy: Builder.getInt1Ty()); |
16770 | Builder.CreateCondBr(Cond: Succ, True: NoError, False: Error); |
16771 | |
16772 | Builder.SetInsertPoint(NoError); |
16773 | for (int i = 0; i != 8; ++i) { |
16774 | Value * = Builder.CreateExtractValue(Agg: Call, Idxs: i + 1); |
16775 | Value *Ptr = Builder.CreateConstGEP1_32(Ty: Extract->getType(), Ptr: Ops[0], Idx0: i); |
16776 | Builder.CreateAlignedStore(Val: Extract, Ptr, Align: Align(16)); |
16777 | } |
16778 | Builder.CreateBr(Dest: End); |
16779 | |
16780 | Builder.SetInsertPoint(Error); |
16781 | for (int i = 0; i != 8; ++i) { |
16782 | Value *Out = Builder.CreateExtractValue(Agg: Call, Idxs: i + 1); |
16783 | Constant *Zero = llvm::Constant::getNullValue(Ty: Out->getType()); |
16784 | Value *Ptr = Builder.CreateConstGEP1_32(Ty: Out->getType(), Ptr: Ops[0], Idx0: i); |
16785 | Builder.CreateAlignedStore(Val: Zero, Ptr, Align: Align(16)); |
16786 | } |
16787 | Builder.CreateBr(Dest: End); |
16788 | |
16789 | Builder.SetInsertPoint(End); |
16790 | return Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
16791 | } |
16792 | case X86::BI__builtin_ia32_vfcmaddcph512_mask: |
16793 | IsConjFMA = true; |
16794 | [[fallthrough]]; |
16795 | case X86::BI__builtin_ia32_vfmaddcph512_mask: { |
16796 | Intrinsic::ID IID = IsConjFMA |
16797 | ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512 |
16798 | : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512; |
16799 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
16800 | return EmitX86Select(CGF&: *this, Mask: Ops[3], Op0: Call, Op1: Ops[0]); |
16801 | } |
16802 | case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: |
16803 | IsConjFMA = true; |
16804 | [[fallthrough]]; |
16805 | case X86::BI__builtin_ia32_vfmaddcsh_round_mask: { |
16806 | Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh |
16807 | : Intrinsic::x86_avx512fp16_mask_vfmadd_csh; |
16808 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
16809 | Value *And = Builder.CreateAnd(LHS: Ops[3], RHS: llvm::ConstantInt::get(Ty: Int8Ty, V: 1)); |
16810 | return EmitX86Select(CGF&: *this, Mask: And, Op0: Call, Op1: Ops[0]); |
16811 | } |
16812 | case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: |
16813 | IsConjFMA = true; |
16814 | [[fallthrough]]; |
16815 | case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: { |
16816 | Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh |
16817 | : Intrinsic::x86_avx512fp16_mask_vfmadd_csh; |
16818 | Value *Call = Builder.CreateCall(Callee: CGM.getIntrinsic(IID), Args: Ops); |
16819 | static constexpr int Mask[] = {0, 5, 6, 7}; |
16820 | return Builder.CreateShuffleVector(V1: Call, V2: Ops[2], Mask); |
16821 | } |
16822 | case X86::BI__builtin_ia32_prefetchi: |
16823 | return Builder.CreateCall( |
16824 | Callee: CGM.getIntrinsic(IID: Intrinsic::prefetch, Tys: Ops[0]->getType()), |
16825 | Args: {Ops[0], llvm::ConstantInt::get(Ty: Int32Ty, V: 0), Ops[1], |
16826 | llvm::ConstantInt::get(Ty: Int32Ty, V: 0)}); |
16827 | } |
16828 | } |
16829 | |
16830 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, |
16831 | const CallExpr *E) { |
16832 | // Do not emit the builtin arguments in the arguments of a function call, |
16833 | // because the evaluation order of function arguments is not specified in C++. |
16834 | // This is important when testing to ensure the arguments are emitted in the |
16835 | // same order every time. Eg: |
16836 | // Instead of: |
16837 | // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)), |
16838 | // EmitScalarExpr(E->getArg(1)), "swdiv"); |
16839 | // Use: |
16840 | // Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16841 | // Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16842 | // return Builder.CreateFDiv(Op0, Op1, "swdiv") |
16843 | |
16844 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16845 | |
16846 | #include "llvm/TargetParser/PPCTargetParser.def" |
16847 | auto GenAIXPPCBuiltinCpuExpr = [&](unsigned SupportMethod, unsigned FieldIdx, |
16848 | unsigned Mask, CmpInst::Predicate CompOp, |
16849 | unsigned OpValue) -> Value * { |
16850 | if (SupportMethod == BUILTIN_PPC_FALSE) |
16851 | return llvm::ConstantInt::getFalse(Ty: ConvertType(T: E->getType())); |
16852 | |
16853 | if (SupportMethod == BUILTIN_PPC_TRUE) |
16854 | return llvm::ConstantInt::getTrue(Ty: ConvertType(T: E->getType())); |
16855 | |
16856 | assert(SupportMethod <= SYS_CALL && "Invalid value for SupportMethod." ); |
16857 | |
16858 | llvm::Value *FieldValue = nullptr; |
16859 | if (SupportMethod == USE_SYS_CONF) { |
16860 | llvm::Type *STy = llvm::StructType::get(PPC_SYSTEMCONFIG_TYPE); |
16861 | llvm::Constant *SysConf = |
16862 | CGM.CreateRuntimeVariable(Ty: STy, Name: "_system_configuration" ); |
16863 | |
16864 | // Grab the appropriate field from _system_configuration. |
16865 | llvm::Value *Idxs[] = {ConstantInt::get(Ty: Int32Ty, V: 0), |
16866 | ConstantInt::get(Ty: Int32Ty, V: FieldIdx)}; |
16867 | |
16868 | FieldValue = Builder.CreateInBoundsGEP(Ty: STy, Ptr: SysConf, IdxList: Idxs); |
16869 | FieldValue = Builder.CreateAlignedLoad(Ty: Int32Ty, Addr: FieldValue, |
16870 | Align: CharUnits::fromQuantity(Quantity: 4)); |
16871 | } else if (SupportMethod == SYS_CALL) { |
16872 | llvm::FunctionType *FTy = |
16873 | llvm::FunctionType::get(Result: Int64Ty, Params: Int32Ty, isVarArg: false); |
16874 | llvm::FunctionCallee Func = |
16875 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "getsystemcfg" ); |
16876 | |
16877 | FieldValue = |
16878 | Builder.CreateCall(Callee: Func, Args: {ConstantInt::get(Ty: Int32Ty, V: FieldIdx)}); |
16879 | } |
16880 | assert(FieldValue && |
16881 | "SupportMethod value is not defined in PPCTargetParser.def." ); |
16882 | |
16883 | if (Mask) |
16884 | FieldValue = Builder.CreateAnd(LHS: FieldValue, RHS: Mask); |
16885 | |
16886 | llvm::Type *ValueType = FieldValue->getType(); |
16887 | bool IsValueType64Bit = ValueType->isIntegerTy(Bitwidth: 64); |
16888 | assert( |
16889 | (IsValueType64Bit || ValueType->isIntegerTy(32)) && |
16890 | "Only 32/64-bit integers are supported in GenAIXPPCBuiltinCpuExpr()." ); |
16891 | |
16892 | return Builder.CreateICmp( |
16893 | P: CompOp, LHS: FieldValue, |
16894 | RHS: ConstantInt::get(Ty: IsValueType64Bit ? Int64Ty : Int32Ty, V: OpValue)); |
16895 | }; |
16896 | |
16897 | switch (BuiltinID) { |
16898 | default: return nullptr; |
16899 | |
16900 | case Builtin::BI__builtin_cpu_is: { |
16901 | const Expr *CPUExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
16902 | StringRef CPUStr = cast<clang::StringLiteral>(Val: CPUExpr)->getString(); |
16903 | llvm::Triple Triple = getTarget().getTriple(); |
16904 | |
16905 | unsigned LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue; |
16906 | typedef std::tuple<unsigned, unsigned, unsigned, unsigned> CPUInfo; |
16907 | |
16908 | std::tie(args&: LinuxSupportMethod, args&: LinuxIDValue, args&: AIXSupportMethod, args&: AIXIDValue) = |
16909 | static_cast<CPUInfo>(StringSwitch<CPUInfo>(CPUStr) |
16910 | #define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \ |
16911 | AIXID) \ |
16912 | .Case(NAME, {Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID}) |
16913 | #include "llvm/TargetParser/PPCTargetParser.def" |
16914 | .Default(Value: {BUILTIN_PPC_UNSUPPORTED, 0, |
16915 | BUILTIN_PPC_UNSUPPORTED, 0})); |
16916 | |
16917 | if (Triple.isOSAIX()) { |
16918 | assert((AIXSupportMethod != BUILTIN_PPC_UNSUPPORTED) && |
16919 | "Invalid CPU name. Missed by SemaChecking?" ); |
16920 | return GenAIXPPCBuiltinCpuExpr(AIXSupportMethod, AIX_SYSCON_IMPL_IDX, 0, |
16921 | ICmpInst::ICMP_EQ, AIXIDValue); |
16922 | } |
16923 | |
16924 | assert(Triple.isOSLinux() && |
16925 | "__builtin_cpu_is() is only supported for AIX and Linux." ); |
16926 | |
16927 | assert((LinuxSupportMethod != BUILTIN_PPC_UNSUPPORTED) && |
16928 | "Invalid CPU name. Missed by SemaChecking?" ); |
16929 | |
16930 | if (LinuxSupportMethod == BUILTIN_PPC_FALSE) |
16931 | return llvm::ConstantInt::getFalse(Ty: ConvertType(T: E->getType())); |
16932 | |
16933 | Value *Op0 = llvm::ConstantInt::get(Ty: Int32Ty, PPC_FAWORD_CPUID); |
16934 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_fixed_addr_ld); |
16935 | Value *TheCall = Builder.CreateCall(Callee: F, Args: {Op0}, Name: "cpu_is" ); |
16936 | return Builder.CreateICmpEQ(LHS: TheCall, |
16937 | RHS: llvm::ConstantInt::get(Ty: Int32Ty, V: LinuxIDValue)); |
16938 | } |
16939 | case Builtin::BI__builtin_cpu_supports: { |
16940 | llvm::Triple Triple = getTarget().getTriple(); |
16941 | const Expr *CPUExpr = E->getArg(Arg: 0)->IgnoreParenCasts(); |
16942 | StringRef CPUStr = cast<clang::StringLiteral>(Val: CPUExpr)->getString(); |
16943 | if (Triple.isOSAIX()) { |
16944 | unsigned SupportMethod, FieldIdx, Mask, Value; |
16945 | CmpInst::Predicate CompOp; |
16946 | typedef std::tuple<unsigned, unsigned, unsigned, CmpInst::Predicate, |
16947 | unsigned> |
16948 | CPUSupportType; |
16949 | std::tie(args&: SupportMethod, args&: FieldIdx, args&: Mask, args&: CompOp, args&: Value) = |
16950 | static_cast<CPUSupportType>(StringSwitch<CPUSupportType>(CPUStr) |
16951 | #define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \ |
16952 | VALUE) \ |
16953 | .Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE}) |
16954 | #include "llvm/TargetParser/PPCTargetParser.def" |
16955 | .Default(Value: {BUILTIN_PPC_FALSE, 0, 0, |
16956 | CmpInst::Predicate(), 0})); |
16957 | return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp, |
16958 | Value); |
16959 | } |
16960 | |
16961 | assert(Triple.isOSLinux() && |
16962 | "__builtin_cpu_supports() is only supported for AIX and Linux." ); |
16963 | unsigned FeatureWord; |
16964 | unsigned BitMask; |
16965 | std::tie(args&: FeatureWord, args&: BitMask) = |
16966 | StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
16967 | #define PPC_LNX_FEATURE(Name, Description, EnumName, Bitmask, FA_WORD) \ |
16968 | .Case(Name, {FA_WORD, Bitmask}) |
16969 | #include "llvm/TargetParser/PPCTargetParser.def" |
16970 | .Default(Value: {0, 0}); |
16971 | if (!BitMask) |
16972 | return Builder.getFalse(); |
16973 | Value *Op0 = llvm::ConstantInt::get(Ty: Int32Ty, V: FeatureWord); |
16974 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_fixed_addr_ld); |
16975 | Value *TheCall = Builder.CreateCall(Callee: F, Args: {Op0}, Name: "cpu_supports" ); |
16976 | Value *Mask = |
16977 | Builder.CreateAnd(LHS: TheCall, RHS: llvm::ConstantInt::get(Ty: Int32Ty, V: BitMask)); |
16978 | return Builder.CreateICmpNE(LHS: Mask, RHS: llvm::Constant::getNullValue(Ty: Int32Ty)); |
16979 | #undef PPC_FAWORD_HWCAP |
16980 | #undef PPC_FAWORD_HWCAP2 |
16981 | #undef PPC_FAWORD_CPUID |
16982 | } |
16983 | |
16984 | // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we |
16985 | // call __builtin_readcyclecounter. |
16986 | case PPC::BI__builtin_ppc_get_timebase: |
16987 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::readcyclecounter)); |
16988 | |
16989 | // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr |
16990 | case PPC::BI__builtin_altivec_lvx: |
16991 | case PPC::BI__builtin_altivec_lvxl: |
16992 | case PPC::BI__builtin_altivec_lvebx: |
16993 | case PPC::BI__builtin_altivec_lvehx: |
16994 | case PPC::BI__builtin_altivec_lvewx: |
16995 | case PPC::BI__builtin_altivec_lvsl: |
16996 | case PPC::BI__builtin_altivec_lvsr: |
16997 | case PPC::BI__builtin_vsx_lxvd2x: |
16998 | case PPC::BI__builtin_vsx_lxvw4x: |
16999 | case PPC::BI__builtin_vsx_lxvd2x_be: |
17000 | case PPC::BI__builtin_vsx_lxvw4x_be: |
17001 | case PPC::BI__builtin_vsx_lxvl: |
17002 | case PPC::BI__builtin_vsx_lxvll: |
17003 | { |
17004 | SmallVector<Value *, 2> Ops; |
17005 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
17006 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
17007 | if (!(BuiltinID == PPC::BI__builtin_vsx_lxvl || |
17008 | BuiltinID == PPC::BI__builtin_vsx_lxvll)) { |
17009 | Ops[0] = Builder.CreateGEP(Ty: Int8Ty, Ptr: Ops[1], IdxList: Ops[0]); |
17010 | Ops.pop_back(); |
17011 | } |
17012 | |
17013 | switch (BuiltinID) { |
17014 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!" ); |
17015 | case PPC::BI__builtin_altivec_lvx: |
17016 | ID = Intrinsic::ppc_altivec_lvx; |
17017 | break; |
17018 | case PPC::BI__builtin_altivec_lvxl: |
17019 | ID = Intrinsic::ppc_altivec_lvxl; |
17020 | break; |
17021 | case PPC::BI__builtin_altivec_lvebx: |
17022 | ID = Intrinsic::ppc_altivec_lvebx; |
17023 | break; |
17024 | case PPC::BI__builtin_altivec_lvehx: |
17025 | ID = Intrinsic::ppc_altivec_lvehx; |
17026 | break; |
17027 | case PPC::BI__builtin_altivec_lvewx: |
17028 | ID = Intrinsic::ppc_altivec_lvewx; |
17029 | break; |
17030 | case PPC::BI__builtin_altivec_lvsl: |
17031 | ID = Intrinsic::ppc_altivec_lvsl; |
17032 | break; |
17033 | case PPC::BI__builtin_altivec_lvsr: |
17034 | ID = Intrinsic::ppc_altivec_lvsr; |
17035 | break; |
17036 | case PPC::BI__builtin_vsx_lxvd2x: |
17037 | ID = Intrinsic::ppc_vsx_lxvd2x; |
17038 | break; |
17039 | case PPC::BI__builtin_vsx_lxvw4x: |
17040 | ID = Intrinsic::ppc_vsx_lxvw4x; |
17041 | break; |
17042 | case PPC::BI__builtin_vsx_lxvd2x_be: |
17043 | ID = Intrinsic::ppc_vsx_lxvd2x_be; |
17044 | break; |
17045 | case PPC::BI__builtin_vsx_lxvw4x_be: |
17046 | ID = Intrinsic::ppc_vsx_lxvw4x_be; |
17047 | break; |
17048 | case PPC::BI__builtin_vsx_lxvl: |
17049 | ID = Intrinsic::ppc_vsx_lxvl; |
17050 | break; |
17051 | case PPC::BI__builtin_vsx_lxvll: |
17052 | ID = Intrinsic::ppc_vsx_lxvll; |
17053 | break; |
17054 | } |
17055 | llvm::Function *F = CGM.getIntrinsic(IID: ID); |
17056 | return Builder.CreateCall(Callee: F, Args: Ops, Name: "" ); |
17057 | } |
17058 | |
17059 | // vec_st, vec_xst_be |
17060 | case PPC::BI__builtin_altivec_stvx: |
17061 | case PPC::BI__builtin_altivec_stvxl: |
17062 | case PPC::BI__builtin_altivec_stvebx: |
17063 | case PPC::BI__builtin_altivec_stvehx: |
17064 | case PPC::BI__builtin_altivec_stvewx: |
17065 | case PPC::BI__builtin_vsx_stxvd2x: |
17066 | case PPC::BI__builtin_vsx_stxvw4x: |
17067 | case PPC::BI__builtin_vsx_stxvd2x_be: |
17068 | case PPC::BI__builtin_vsx_stxvw4x_be: |
17069 | case PPC::BI__builtin_vsx_stxvl: |
17070 | case PPC::BI__builtin_vsx_stxvll: |
17071 | { |
17072 | SmallVector<Value *, 3> Ops; |
17073 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 0))); |
17074 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 1))); |
17075 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: 2))); |
17076 | if (!(BuiltinID == PPC::BI__builtin_vsx_stxvl || |
17077 | BuiltinID == PPC::BI__builtin_vsx_stxvll)) { |
17078 | Ops[1] = Builder.CreateGEP(Ty: Int8Ty, Ptr: Ops[2], IdxList: Ops[1]); |
17079 | Ops.pop_back(); |
17080 | } |
17081 | |
17082 | switch (BuiltinID) { |
17083 | default: llvm_unreachable("Unsupported st intrinsic!" ); |
17084 | case PPC::BI__builtin_altivec_stvx: |
17085 | ID = Intrinsic::ppc_altivec_stvx; |
17086 | break; |
17087 | case PPC::BI__builtin_altivec_stvxl: |
17088 | ID = Intrinsic::ppc_altivec_stvxl; |
17089 | break; |
17090 | case PPC::BI__builtin_altivec_stvebx: |
17091 | ID = Intrinsic::ppc_altivec_stvebx; |
17092 | break; |
17093 | case PPC::BI__builtin_altivec_stvehx: |
17094 | ID = Intrinsic::ppc_altivec_stvehx; |
17095 | break; |
17096 | case PPC::BI__builtin_altivec_stvewx: |
17097 | ID = Intrinsic::ppc_altivec_stvewx; |
17098 | break; |
17099 | case PPC::BI__builtin_vsx_stxvd2x: |
17100 | ID = Intrinsic::ppc_vsx_stxvd2x; |
17101 | break; |
17102 | case PPC::BI__builtin_vsx_stxvw4x: |
17103 | ID = Intrinsic::ppc_vsx_stxvw4x; |
17104 | break; |
17105 | case PPC::BI__builtin_vsx_stxvd2x_be: |
17106 | ID = Intrinsic::ppc_vsx_stxvd2x_be; |
17107 | break; |
17108 | case PPC::BI__builtin_vsx_stxvw4x_be: |
17109 | ID = Intrinsic::ppc_vsx_stxvw4x_be; |
17110 | break; |
17111 | case PPC::BI__builtin_vsx_stxvl: |
17112 | ID = Intrinsic::ppc_vsx_stxvl; |
17113 | break; |
17114 | case PPC::BI__builtin_vsx_stxvll: |
17115 | ID = Intrinsic::ppc_vsx_stxvll; |
17116 | break; |
17117 | } |
17118 | llvm::Function *F = CGM.getIntrinsic(IID: ID); |
17119 | return Builder.CreateCall(Callee: F, Args: Ops, Name: "" ); |
17120 | } |
17121 | case PPC::BI__builtin_vsx_ldrmb: { |
17122 | // Essentially boils down to performing an unaligned VMX load sequence so |
17123 | // as to avoid crossing a page boundary and then shuffling the elements |
17124 | // into the right side of the vector register. |
17125 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17126 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17127 | int64_t NumBytes = cast<ConstantInt>(Val: Op1)->getZExtValue(); |
17128 | llvm::Type *ResTy = ConvertType(T: E->getType()); |
17129 | bool IsLE = getTarget().isLittleEndian(); |
17130 | |
17131 | // If the user wants the entire vector, just load the entire vector. |
17132 | if (NumBytes == 16) { |
17133 | Value *LD = |
17134 | Builder.CreateLoad(Addr: Address(Op0, ResTy, CharUnits::fromQuantity(Quantity: 1))); |
17135 | if (!IsLE) |
17136 | return LD; |
17137 | |
17138 | // Reverse the bytes on LE. |
17139 | SmallVector<int, 16> RevMask; |
17140 | for (int Idx = 0; Idx < 16; Idx++) |
17141 | RevMask.push_back(Elt: 15 - Idx); |
17142 | return Builder.CreateShuffleVector(V1: LD, V2: LD, Mask: RevMask); |
17143 | } |
17144 | |
17145 | llvm::Function *Lvx = CGM.getIntrinsic(IID: Intrinsic::ppc_altivec_lvx); |
17146 | llvm::Function *Lvs = CGM.getIntrinsic(IID: IsLE ? Intrinsic::ppc_altivec_lvsr |
17147 | : Intrinsic::ppc_altivec_lvsl); |
17148 | llvm::Function *Vperm = CGM.getIntrinsic(IID: Intrinsic::ppc_altivec_vperm); |
17149 | Value *HiMem = Builder.CreateGEP( |
17150 | Ty: Int8Ty, Ptr: Op0, IdxList: ConstantInt::get(Ty: Op1->getType(), V: NumBytes - 1)); |
17151 | Value *LoLd = Builder.CreateCall(Callee: Lvx, Args: Op0, Name: "ld.lo" ); |
17152 | Value *HiLd = Builder.CreateCall(Callee: Lvx, Args: HiMem, Name: "ld.hi" ); |
17153 | Value *Mask1 = Builder.CreateCall(Callee: Lvs, Args: Op0, Name: "mask1" ); |
17154 | |
17155 | Op0 = IsLE ? HiLd : LoLd; |
17156 | Op1 = IsLE ? LoLd : HiLd; |
17157 | Value *AllElts = Builder.CreateCall(Callee: Vperm, Args: {Op0, Op1, Mask1}, Name: "shuffle1" ); |
17158 | Constant *Zero = llvm::Constant::getNullValue(Ty: IsLE ? ResTy : AllElts->getType()); |
17159 | |
17160 | if (IsLE) { |
17161 | SmallVector<int, 16> Consts; |
17162 | for (int Idx = 0; Idx < 16; Idx++) { |
17163 | int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1) |
17164 | : 16 - (NumBytes - Idx); |
17165 | Consts.push_back(Elt: Val); |
17166 | } |
17167 | return Builder.CreateShuffleVector(V1: Builder.CreateBitCast(V: AllElts, DestTy: ResTy), |
17168 | V2: Zero, Mask: Consts); |
17169 | } |
17170 | SmallVector<Constant *, 16> Consts; |
17171 | for (int Idx = 0; Idx < 16; Idx++) |
17172 | Consts.push_back(Elt: Builder.getInt8(C: NumBytes + Idx)); |
17173 | Value *Mask2 = ConstantVector::get(V: Consts); |
17174 | return Builder.CreateBitCast( |
17175 | V: Builder.CreateCall(Callee: Vperm, Args: {Zero, AllElts, Mask2}, Name: "shuffle2" ), DestTy: ResTy); |
17176 | } |
17177 | case PPC::BI__builtin_vsx_strmb: { |
17178 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17179 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17180 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17181 | int64_t NumBytes = cast<ConstantInt>(Val: Op1)->getZExtValue(); |
17182 | bool IsLE = getTarget().isLittleEndian(); |
17183 | auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) { |
17184 | // Storing the whole vector, simply store it on BE and reverse bytes and |
17185 | // store on LE. |
17186 | if (Width == 16) { |
17187 | Value *StVec = Op2; |
17188 | if (IsLE) { |
17189 | SmallVector<int, 16> RevMask; |
17190 | for (int Idx = 0; Idx < 16; Idx++) |
17191 | RevMask.push_back(Elt: 15 - Idx); |
17192 | StVec = Builder.CreateShuffleVector(V1: Op2, V2: Op2, Mask: RevMask); |
17193 | } |
17194 | return Builder.CreateStore( |
17195 | Val: StVec, Addr: Address(Op0, Op2->getType(), CharUnits::fromQuantity(Quantity: 1))); |
17196 | } |
17197 | auto *ConvTy = Int64Ty; |
17198 | unsigned NumElts = 0; |
17199 | switch (Width) { |
17200 | default: |
17201 | llvm_unreachable("width for stores must be a power of 2" ); |
17202 | case 8: |
17203 | ConvTy = Int64Ty; |
17204 | NumElts = 2; |
17205 | break; |
17206 | case 4: |
17207 | ConvTy = Int32Ty; |
17208 | NumElts = 4; |
17209 | break; |
17210 | case 2: |
17211 | ConvTy = Int16Ty; |
17212 | NumElts = 8; |
17213 | break; |
17214 | case 1: |
17215 | ConvTy = Int8Ty; |
17216 | NumElts = 16; |
17217 | break; |
17218 | } |
17219 | Value *Vec = Builder.CreateBitCast( |
17220 | V: Op2, DestTy: llvm::FixedVectorType::get(ElementType: ConvTy, NumElts)); |
17221 | Value *Ptr = |
17222 | Builder.CreateGEP(Ty: Int8Ty, Ptr: Op0, IdxList: ConstantInt::get(Ty: Int64Ty, V: Offset)); |
17223 | Value *Elt = Builder.CreateExtractElement(Vec, Idx: EltNo); |
17224 | if (IsLE && Width > 1) { |
17225 | Function *F = CGM.getIntrinsic(IID: Intrinsic::bswap, Tys: ConvTy); |
17226 | Elt = Builder.CreateCall(Callee: F, Args: Elt); |
17227 | } |
17228 | return Builder.CreateStore( |
17229 | Val: Elt, Addr: Address(Ptr, ConvTy, CharUnits::fromQuantity(Quantity: 1))); |
17230 | }; |
17231 | unsigned Stored = 0; |
17232 | unsigned RemainingBytes = NumBytes; |
17233 | Value *Result; |
17234 | if (NumBytes == 16) |
17235 | return StoreSubVec(16, 0, 0); |
17236 | if (NumBytes >= 8) { |
17237 | Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1); |
17238 | RemainingBytes -= 8; |
17239 | Stored += 8; |
17240 | } |
17241 | if (RemainingBytes >= 4) { |
17242 | Result = StoreSubVec(4, NumBytes - Stored - 4, |
17243 | IsLE ? (Stored >> 2) : 3 - (Stored >> 2)); |
17244 | RemainingBytes -= 4; |
17245 | Stored += 4; |
17246 | } |
17247 | if (RemainingBytes >= 2) { |
17248 | Result = StoreSubVec(2, NumBytes - Stored - 2, |
17249 | IsLE ? (Stored >> 1) : 7 - (Stored >> 1)); |
17250 | RemainingBytes -= 2; |
17251 | Stored += 2; |
17252 | } |
17253 | if (RemainingBytes) |
17254 | Result = |
17255 | StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored); |
17256 | return Result; |
17257 | } |
17258 | // Square root |
17259 | case PPC::BI__builtin_vsx_xvsqrtsp: |
17260 | case PPC::BI__builtin_vsx_xvsqrtdp: { |
17261 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17262 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17263 | if (Builder.getIsFPConstrained()) { |
17264 | llvm::Function *F = CGM.getIntrinsic( |
17265 | IID: Intrinsic::experimental_constrained_sqrt, Tys: ResultType); |
17266 | return Builder.CreateConstrainedFPCall(Callee: F, Args: X); |
17267 | } else { |
17268 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: ResultType); |
17269 | return Builder.CreateCall(Callee: F, Args: X); |
17270 | } |
17271 | } |
17272 | // Count leading zeros |
17273 | case PPC::BI__builtin_altivec_vclzb: |
17274 | case PPC::BI__builtin_altivec_vclzh: |
17275 | case PPC::BI__builtin_altivec_vclzw: |
17276 | case PPC::BI__builtin_altivec_vclzd: { |
17277 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17278 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17279 | Value *Undef = ConstantInt::get(Ty: Builder.getInt1Ty(), V: false); |
17280 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ResultType); |
17281 | return Builder.CreateCall(Callee: F, Args: {X, Undef}); |
17282 | } |
17283 | case PPC::BI__builtin_altivec_vctzb: |
17284 | case PPC::BI__builtin_altivec_vctzh: |
17285 | case PPC::BI__builtin_altivec_vctzw: |
17286 | case PPC::BI__builtin_altivec_vctzd: { |
17287 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17288 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17289 | Value *Undef = ConstantInt::get(Ty: Builder.getInt1Ty(), V: false); |
17290 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: ResultType); |
17291 | return Builder.CreateCall(Callee: F, Args: {X, Undef}); |
17292 | } |
17293 | case PPC::BI__builtin_altivec_vinsd: |
17294 | case PPC::BI__builtin_altivec_vinsw: |
17295 | case PPC::BI__builtin_altivec_vinsd_elt: |
17296 | case PPC::BI__builtin_altivec_vinsw_elt: { |
17297 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17298 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17299 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17300 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17301 | |
17302 | bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw || |
17303 | BuiltinID == PPC::BI__builtin_altivec_vinsd); |
17304 | |
17305 | bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw || |
17306 | BuiltinID == PPC::BI__builtin_altivec_vinsw_elt); |
17307 | |
17308 | // The third argument must be a compile time constant. |
17309 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Val: Op2); |
17310 | assert(ArgCI && |
17311 | "Third Arg to vinsw/vinsd intrinsic must be a constant integer!" ); |
17312 | |
17313 | // Valid value for the third argument is dependent on the input type and |
17314 | // builtin called. |
17315 | int ValidMaxValue = 0; |
17316 | if (IsUnaligned) |
17317 | ValidMaxValue = (Is32bit) ? 12 : 8; |
17318 | else |
17319 | ValidMaxValue = (Is32bit) ? 3 : 1; |
17320 | |
17321 | // Get value of third argument. |
17322 | int64_t ConstArg = ArgCI->getSExtValue(); |
17323 | |
17324 | // Compose range checking error message. |
17325 | std::string RangeErrMsg = IsUnaligned ? "byte" : "element" ; |
17326 | RangeErrMsg += " number " + llvm::to_string(Value: ConstArg); |
17327 | RangeErrMsg += " is outside of the valid range [0, " ; |
17328 | RangeErrMsg += llvm::to_string(Value: ValidMaxValue) + "]" ; |
17329 | |
17330 | // Issue error if third argument is not within the valid range. |
17331 | if (ConstArg < 0 || ConstArg > ValidMaxValue) |
17332 | CGM.Error(loc: E->getExprLoc(), error: RangeErrMsg); |
17333 | |
17334 | // Input to vec_replace_elt is an element index, convert to byte index. |
17335 | if (!IsUnaligned) { |
17336 | ConstArg *= Is32bit ? 4 : 8; |
17337 | // Fix the constant according to endianess. |
17338 | if (getTarget().isLittleEndian()) |
17339 | ConstArg = (Is32bit ? 12 : 8) - ConstArg; |
17340 | } |
17341 | |
17342 | ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd; |
17343 | Op2 = ConstantInt::getSigned(Ty: Int32Ty, V: ConstArg); |
17344 | // Casting input to vector int as per intrinsic definition. |
17345 | Op0 = |
17346 | Is32bit |
17347 | ? Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4)) |
17348 | : Builder.CreateBitCast(V: Op0, |
17349 | DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17350 | return Builder.CreateBitCast( |
17351 | V: Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: {Op0, Op1, Op2}), DestTy: ResultType); |
17352 | } |
17353 | case PPC::BI__builtin_altivec_vpopcntb: |
17354 | case PPC::BI__builtin_altivec_vpopcnth: |
17355 | case PPC::BI__builtin_altivec_vpopcntw: |
17356 | case PPC::BI__builtin_altivec_vpopcntd: { |
17357 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17358 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17359 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ResultType); |
17360 | return Builder.CreateCall(Callee: F, Args: X); |
17361 | } |
17362 | case PPC::BI__builtin_altivec_vadduqm: |
17363 | case PPC::BI__builtin_altivec_vsubuqm: { |
17364 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17365 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17366 | llvm::Type *Int128Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
17367 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int128Ty, NumElts: 1)); |
17368 | Op1 = Builder.CreateBitCast(V: Op1, DestTy: llvm::FixedVectorType::get(ElementType: Int128Ty, NumElts: 1)); |
17369 | if (BuiltinID == PPC::BI__builtin_altivec_vadduqm) |
17370 | return Builder.CreateAdd(LHS: Op0, RHS: Op1, Name: "vadduqm" ); |
17371 | else |
17372 | return Builder.CreateSub(LHS: Op0, RHS: Op1, Name: "vsubuqm" ); |
17373 | } |
17374 | case PPC::BI__builtin_altivec_vaddcuq_c: |
17375 | case PPC::BI__builtin_altivec_vsubcuq_c: { |
17376 | SmallVector<Value *, 2> Ops; |
17377 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17378 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17379 | llvm::Type *V1I128Ty = llvm::FixedVectorType::get( |
17380 | ElementType: llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128), NumElts: 1); |
17381 | Ops.push_back(Elt: Builder.CreateBitCast(V: Op0, DestTy: V1I128Ty)); |
17382 | Ops.push_back(Elt: Builder.CreateBitCast(V: Op1, DestTy: V1I128Ty)); |
17383 | ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c) |
17384 | ? Intrinsic::ppc_altivec_vaddcuq |
17385 | : Intrinsic::ppc_altivec_vsubcuq; |
17386 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops, Name: "" ); |
17387 | } |
17388 | case PPC::BI__builtin_altivec_vaddeuqm_c: |
17389 | case PPC::BI__builtin_altivec_vaddecuq_c: |
17390 | case PPC::BI__builtin_altivec_vsubeuqm_c: |
17391 | case PPC::BI__builtin_altivec_vsubecuq_c: { |
17392 | SmallVector<Value *, 3> Ops; |
17393 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17394 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17395 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17396 | llvm::Type *V1I128Ty = llvm::FixedVectorType::get( |
17397 | ElementType: llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128), NumElts: 1); |
17398 | Ops.push_back(Elt: Builder.CreateBitCast(V: Op0, DestTy: V1I128Ty)); |
17399 | Ops.push_back(Elt: Builder.CreateBitCast(V: Op1, DestTy: V1I128Ty)); |
17400 | Ops.push_back(Elt: Builder.CreateBitCast(V: Op2, DestTy: V1I128Ty)); |
17401 | switch (BuiltinID) { |
17402 | default: |
17403 | llvm_unreachable("Unsupported intrinsic!" ); |
17404 | case PPC::BI__builtin_altivec_vaddeuqm_c: |
17405 | ID = Intrinsic::ppc_altivec_vaddeuqm; |
17406 | break; |
17407 | case PPC::BI__builtin_altivec_vaddecuq_c: |
17408 | ID = Intrinsic::ppc_altivec_vaddecuq; |
17409 | break; |
17410 | case PPC::BI__builtin_altivec_vsubeuqm_c: |
17411 | ID = Intrinsic::ppc_altivec_vsubeuqm; |
17412 | break; |
17413 | case PPC::BI__builtin_altivec_vsubecuq_c: |
17414 | ID = Intrinsic::ppc_altivec_vsubecuq; |
17415 | break; |
17416 | } |
17417 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops, Name: "" ); |
17418 | } |
17419 | case PPC::BI__builtin_ppc_rldimi: |
17420 | case PPC::BI__builtin_ppc_rlwimi: { |
17421 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17422 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17423 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17424 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
17425 | // rldimi is 64-bit instruction, expand the intrinsic before isel to |
17426 | // leverage peephole and avoid legalization efforts. |
17427 | if (BuiltinID == PPC::BI__builtin_ppc_rldimi && |
17428 | !getTarget().getTriple().isPPC64()) { |
17429 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fshl, Tys: Op0->getType()); |
17430 | Op2 = Builder.CreateZExt(V: Op2, DestTy: Int64Ty); |
17431 | Value *Shift = Builder.CreateCall(Callee: F, Args: {Op0, Op0, Op2}); |
17432 | return Builder.CreateOr(LHS: Builder.CreateAnd(LHS: Shift, RHS: Op3), |
17433 | RHS: Builder.CreateAnd(LHS: Op1, RHS: Builder.CreateNot(V: Op3))); |
17434 | } |
17435 | return Builder.CreateCall( |
17436 | Callee: CGM.getIntrinsic(IID: BuiltinID == PPC::BI__builtin_ppc_rldimi |
17437 | ? Intrinsic::ppc_rldimi |
17438 | : Intrinsic::ppc_rlwimi), |
17439 | Args: {Op0, Op1, Op2, Op3}); |
17440 | } |
17441 | case PPC::BI__builtin_ppc_rlwnm: { |
17442 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17443 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17444 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17445 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_rlwnm), |
17446 | Args: {Op0, Op1, Op2}); |
17447 | } |
17448 | case PPC::BI__builtin_ppc_poppar4: |
17449 | case PPC::BI__builtin_ppc_poppar8: { |
17450 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17451 | llvm::Type *ArgType = Op0->getType(); |
17452 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ArgType); |
17453 | Value *Tmp = Builder.CreateCall(Callee: F, Args: Op0); |
17454 | |
17455 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17456 | Value *Result = Builder.CreateAnd(LHS: Tmp, RHS: llvm::ConstantInt::get(Ty: ArgType, V: 1)); |
17457 | if (Result->getType() != ResultType) |
17458 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
17459 | Name: "cast" ); |
17460 | return Result; |
17461 | } |
17462 | case PPC::BI__builtin_ppc_cmpb: { |
17463 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17464 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17465 | if (getTarget().getTriple().isPPC64()) { |
17466 | Function *F = |
17467 | CGM.getIntrinsic(IID: Intrinsic::ppc_cmpb, Tys: {Int64Ty, Int64Ty, Int64Ty}); |
17468 | return Builder.CreateCall(Callee: F, Args: {Op0, Op1}, Name: "cmpb" ); |
17469 | } |
17470 | // For 32 bit, emit the code as below: |
17471 | // %conv = trunc i64 %a to i32 |
17472 | // %conv1 = trunc i64 %b to i32 |
17473 | // %shr = lshr i64 %a, 32 |
17474 | // %conv2 = trunc i64 %shr to i32 |
17475 | // %shr3 = lshr i64 %b, 32 |
17476 | // %conv4 = trunc i64 %shr3 to i32 |
17477 | // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1) |
17478 | // %conv5 = zext i32 %0 to i64 |
17479 | // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4) |
17480 | // %conv614 = zext i32 %1 to i64 |
17481 | // %shl = shl nuw i64 %conv614, 32 |
17482 | // %or = or i64 %shl, %conv5 |
17483 | // ret i64 %or |
17484 | Function *F = |
17485 | CGM.getIntrinsic(IID: Intrinsic::ppc_cmpb, Tys: {Int32Ty, Int32Ty, Int32Ty}); |
17486 | Value *ArgOneLo = Builder.CreateTrunc(V: Op0, DestTy: Int32Ty); |
17487 | Value *ArgTwoLo = Builder.CreateTrunc(V: Op1, DestTy: Int32Ty); |
17488 | Constant *ShiftAmt = ConstantInt::get(Ty: Int64Ty, V: 32); |
17489 | Value *ArgOneHi = |
17490 | Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Op0, RHS: ShiftAmt), DestTy: Int32Ty); |
17491 | Value *ArgTwoHi = |
17492 | Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Op1, RHS: ShiftAmt), DestTy: Int32Ty); |
17493 | Value *ResLo = Builder.CreateZExt( |
17494 | V: Builder.CreateCall(Callee: F, Args: {ArgOneLo, ArgTwoLo}, Name: "cmpb" ), DestTy: Int64Ty); |
17495 | Value *ResHiShift = Builder.CreateZExt( |
17496 | V: Builder.CreateCall(Callee: F, Args: {ArgOneHi, ArgTwoHi}, Name: "cmpb" ), DestTy: Int64Ty); |
17497 | Value *ResHi = Builder.CreateShl(LHS: ResHiShift, RHS: ShiftAmt); |
17498 | return Builder.CreateOr(LHS: ResLo, RHS: ResHi); |
17499 | } |
17500 | // Copy sign |
17501 | case PPC::BI__builtin_vsx_xvcpsgnsp: |
17502 | case PPC::BI__builtin_vsx_xvcpsgndp: { |
17503 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17504 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17505 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17506 | ID = Intrinsic::copysign; |
17507 | llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: ResultType); |
17508 | return Builder.CreateCall(Callee: F, Args: {X, Y}); |
17509 | } |
17510 | // Rounding/truncation |
17511 | case PPC::BI__builtin_vsx_xvrspip: |
17512 | case PPC::BI__builtin_vsx_xvrdpip: |
17513 | case PPC::BI__builtin_vsx_xvrdpim: |
17514 | case PPC::BI__builtin_vsx_xvrspim: |
17515 | case PPC::BI__builtin_vsx_xvrdpi: |
17516 | case PPC::BI__builtin_vsx_xvrspi: |
17517 | case PPC::BI__builtin_vsx_xvrdpic: |
17518 | case PPC::BI__builtin_vsx_xvrspic: |
17519 | case PPC::BI__builtin_vsx_xvrdpiz: |
17520 | case PPC::BI__builtin_vsx_xvrspiz: { |
17521 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17522 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17523 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || |
17524 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) |
17525 | ID = Builder.getIsFPConstrained() |
17526 | ? Intrinsic::experimental_constrained_floor |
17527 | : Intrinsic::floor; |
17528 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || |
17529 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) |
17530 | ID = Builder.getIsFPConstrained() |
17531 | ? Intrinsic::experimental_constrained_round |
17532 | : Intrinsic::round; |
17533 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || |
17534 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) |
17535 | ID = Builder.getIsFPConstrained() |
17536 | ? Intrinsic::experimental_constrained_rint |
17537 | : Intrinsic::rint; |
17538 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || |
17539 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) |
17540 | ID = Builder.getIsFPConstrained() |
17541 | ? Intrinsic::experimental_constrained_ceil |
17542 | : Intrinsic::ceil; |
17543 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || |
17544 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) |
17545 | ID = Builder.getIsFPConstrained() |
17546 | ? Intrinsic::experimental_constrained_trunc |
17547 | : Intrinsic::trunc; |
17548 | llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: ResultType); |
17549 | return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(Callee: F, Args: X) |
17550 | : Builder.CreateCall(Callee: F, Args: X); |
17551 | } |
17552 | |
17553 | // Absolute value |
17554 | case PPC::BI__builtin_vsx_xvabsdp: |
17555 | case PPC::BI__builtin_vsx_xvabssp: { |
17556 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17557 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17558 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: ResultType); |
17559 | return Builder.CreateCall(Callee: F, Args: X); |
17560 | } |
17561 | |
17562 | // Fastmath by default |
17563 | case PPC::BI__builtin_ppc_recipdivf: |
17564 | case PPC::BI__builtin_ppc_recipdivd: |
17565 | case PPC::BI__builtin_ppc_rsqrtf: |
17566 | case PPC::BI__builtin_ppc_rsqrtd: { |
17567 | FastMathFlags FMF = Builder.getFastMathFlags(); |
17568 | Builder.getFastMathFlags().setFast(); |
17569 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17570 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17571 | |
17572 | if (BuiltinID == PPC::BI__builtin_ppc_recipdivf || |
17573 | BuiltinID == PPC::BI__builtin_ppc_recipdivd) { |
17574 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17575 | Value *FDiv = Builder.CreateFDiv(L: X, R: Y, Name: "recipdiv" ); |
17576 | Builder.getFastMathFlags() &= (FMF); |
17577 | return FDiv; |
17578 | } |
17579 | auto *One = ConstantFP::get(Ty: ResultType, V: 1.0); |
17580 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: ResultType); |
17581 | Value *FDiv = Builder.CreateFDiv(L: One, R: Builder.CreateCall(Callee: F, Args: X), Name: "rsqrt" ); |
17582 | Builder.getFastMathFlags() &= (FMF); |
17583 | return FDiv; |
17584 | } |
17585 | case PPC::BI__builtin_ppc_alignx: { |
17586 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17587 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17588 | ConstantInt *AlignmentCI = cast<ConstantInt>(Val: Op0); |
17589 | if (AlignmentCI->getValue().ugt(RHS: llvm::Value::MaximumAlignment)) |
17590 | AlignmentCI = ConstantInt::get(Ty: AlignmentCI->getIntegerType(), |
17591 | V: llvm::Value::MaximumAlignment); |
17592 | |
17593 | emitAlignmentAssumption(PtrValue: Op1, E: E->getArg(Arg: 1), |
17594 | /*The expr loc is sufficient.*/ AssumptionLoc: SourceLocation(), |
17595 | Alignment: AlignmentCI, OffsetValue: nullptr); |
17596 | return Op1; |
17597 | } |
17598 | case PPC::BI__builtin_ppc_rdlam: { |
17599 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17600 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17601 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17602 | llvm::Type *Ty = Op0->getType(); |
17603 | Value *ShiftAmt = Builder.CreateIntCast(V: Op1, DestTy: Ty, isSigned: false); |
17604 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fshl, Tys: Ty); |
17605 | Value *Rotate = Builder.CreateCall(Callee: F, Args: {Op0, Op0, ShiftAmt}); |
17606 | return Builder.CreateAnd(LHS: Rotate, RHS: Op2); |
17607 | } |
17608 | case PPC::BI__builtin_ppc_load2r: { |
17609 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_load2r); |
17610 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17611 | Value *LoadIntrinsic = Builder.CreateCall(Callee: F, Args: {Op0}); |
17612 | return Builder.CreateTrunc(V: LoadIntrinsic, DestTy: Int16Ty); |
17613 | } |
17614 | // FMA variations |
17615 | case PPC::BI__builtin_ppc_fnmsub: |
17616 | case PPC::BI__builtin_ppc_fnmsubs: |
17617 | case PPC::BI__builtin_vsx_xvmaddadp: |
17618 | case PPC::BI__builtin_vsx_xvmaddasp: |
17619 | case PPC::BI__builtin_vsx_xvnmaddadp: |
17620 | case PPC::BI__builtin_vsx_xvnmaddasp: |
17621 | case PPC::BI__builtin_vsx_xvmsubadp: |
17622 | case PPC::BI__builtin_vsx_xvmsubasp: |
17623 | case PPC::BI__builtin_vsx_xvnmsubadp: |
17624 | case PPC::BI__builtin_vsx_xvnmsubasp: { |
17625 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
17626 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17627 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17628 | Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17629 | llvm::Function *F; |
17630 | if (Builder.getIsFPConstrained()) |
17631 | F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: ResultType); |
17632 | else |
17633 | F = CGM.getIntrinsic(IID: Intrinsic::fma, Tys: ResultType); |
17634 | switch (BuiltinID) { |
17635 | case PPC::BI__builtin_vsx_xvmaddadp: |
17636 | case PPC::BI__builtin_vsx_xvmaddasp: |
17637 | if (Builder.getIsFPConstrained()) |
17638 | return Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, Z}); |
17639 | else |
17640 | return Builder.CreateCall(Callee: F, Args: {X, Y, Z}); |
17641 | case PPC::BI__builtin_vsx_xvnmaddadp: |
17642 | case PPC::BI__builtin_vsx_xvnmaddasp: |
17643 | if (Builder.getIsFPConstrained()) |
17644 | return Builder.CreateFNeg( |
17645 | V: Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, Z}), Name: "neg" ); |
17646 | else |
17647 | return Builder.CreateFNeg(V: Builder.CreateCall(Callee: F, Args: {X, Y, Z}), Name: "neg" ); |
17648 | case PPC::BI__builtin_vsx_xvmsubadp: |
17649 | case PPC::BI__builtin_vsx_xvmsubasp: |
17650 | if (Builder.getIsFPConstrained()) |
17651 | return Builder.CreateConstrainedFPCall( |
17652 | Callee: F, Args: {X, Y, Builder.CreateFNeg(V: Z, Name: "neg" )}); |
17653 | else |
17654 | return Builder.CreateCall(Callee: F, Args: {X, Y, Builder.CreateFNeg(V: Z, Name: "neg" )}); |
17655 | case PPC::BI__builtin_ppc_fnmsub: |
17656 | case PPC::BI__builtin_ppc_fnmsubs: |
17657 | case PPC::BI__builtin_vsx_xvnmsubadp: |
17658 | case PPC::BI__builtin_vsx_xvnmsubasp: |
17659 | if (Builder.getIsFPConstrained()) |
17660 | return Builder.CreateFNeg( |
17661 | V: Builder.CreateConstrainedFPCall( |
17662 | Callee: F, Args: {X, Y, Builder.CreateFNeg(V: Z, Name: "neg" )}), |
17663 | Name: "neg" ); |
17664 | else |
17665 | return Builder.CreateCall( |
17666 | Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_fnmsub, Tys: ResultType), Args: {X, Y, Z}); |
17667 | } |
17668 | llvm_unreachable("Unknown FMA operation" ); |
17669 | return nullptr; // Suppress no-return warning |
17670 | } |
17671 | |
17672 | case PPC::BI__builtin_vsx_insertword: { |
17673 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17674 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17675 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17676 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_vsx_xxinsertw); |
17677 | |
17678 | // Third argument is a compile time constant int. It must be clamped to |
17679 | // to the range [0, 12]. |
17680 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Val: Op2); |
17681 | assert(ArgCI && |
17682 | "Third arg to xxinsertw intrinsic must be constant integer" ); |
17683 | const int64_t MaxIndex = 12; |
17684 | int64_t Index = std::clamp(val: ArgCI->getSExtValue(), lo: (int64_t)0, hi: MaxIndex); |
17685 | |
17686 | // The builtin semantics don't exactly match the xxinsertw instructions |
17687 | // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the |
17688 | // word from the first argument, and inserts it in the second argument. The |
17689 | // instruction extracts the word from its second input register and inserts |
17690 | // it into its first input register, so swap the first and second arguments. |
17691 | std::swap(a&: Op0, b&: Op1); |
17692 | |
17693 | // Need to cast the second argument from a vector of unsigned int to a |
17694 | // vector of long long. |
17695 | Op1 = Builder.CreateBitCast(V: Op1, DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17696 | |
17697 | if (getTarget().isLittleEndian()) { |
17698 | // Reverse the double words in the vector we will extract from. |
17699 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17700 | Op0 = Builder.CreateShuffleVector(V1: Op0, V2: Op0, Mask: ArrayRef<int>{1, 0}); |
17701 | |
17702 | // Reverse the index. |
17703 | Index = MaxIndex - Index; |
17704 | } |
17705 | |
17706 | // Intrinsic expects the first arg to be a vector of int. |
17707 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4)); |
17708 | Op2 = ConstantInt::getSigned(Ty: Int32Ty, V: Index); |
17709 | return Builder.CreateCall(Callee: F, Args: {Op0, Op1, Op2}); |
17710 | } |
17711 | |
17712 | case PPC::BI__builtin_vsx_extractuword: { |
17713 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17714 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17715 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_vsx_xxextractuw); |
17716 | |
17717 | // Intrinsic expects the first argument to be a vector of doublewords. |
17718 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17719 | |
17720 | // The second argument is a compile time constant int that needs to |
17721 | // be clamped to the range [0, 12]. |
17722 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Val: Op1); |
17723 | assert(ArgCI && |
17724 | "Second Arg to xxextractuw intrinsic must be a constant integer!" ); |
17725 | const int64_t MaxIndex = 12; |
17726 | int64_t Index = std::clamp(val: ArgCI->getSExtValue(), lo: (int64_t)0, hi: MaxIndex); |
17727 | |
17728 | if (getTarget().isLittleEndian()) { |
17729 | // Reverse the index. |
17730 | Index = MaxIndex - Index; |
17731 | Op1 = ConstantInt::getSigned(Ty: Int32Ty, V: Index); |
17732 | |
17733 | // Emit the call, then reverse the double words of the results vector. |
17734 | Value *Call = Builder.CreateCall(Callee: F, Args: {Op0, Op1}); |
17735 | |
17736 | Value *ShuffleCall = |
17737 | Builder.CreateShuffleVector(V1: Call, V2: Call, Mask: ArrayRef<int>{1, 0}); |
17738 | return ShuffleCall; |
17739 | } else { |
17740 | Op1 = ConstantInt::getSigned(Ty: Int32Ty, V: Index); |
17741 | return Builder.CreateCall(Callee: F, Args: {Op0, Op1}); |
17742 | } |
17743 | } |
17744 | |
17745 | case PPC::BI__builtin_vsx_xxpermdi: { |
17746 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17747 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17748 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17749 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Val: Op2); |
17750 | assert(ArgCI && "Third arg must be constant integer!" ); |
17751 | |
17752 | unsigned Index = ArgCI->getZExtValue(); |
17753 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17754 | Op1 = Builder.CreateBitCast(V: Op1, DestTy: llvm::FixedVectorType::get(ElementType: Int64Ty, NumElts: 2)); |
17755 | |
17756 | // Account for endianness by treating this as just a shuffle. So we use the |
17757 | // same indices for both LE and BE in order to produce expected results in |
17758 | // both cases. |
17759 | int ElemIdx0 = (Index & 2) >> 1; |
17760 | int ElemIdx1 = 2 + (Index & 1); |
17761 | |
17762 | int ShuffleElts[2] = {ElemIdx0, ElemIdx1}; |
17763 | Value *ShuffleCall = Builder.CreateShuffleVector(V1: Op0, V2: Op1, Mask: ShuffleElts); |
17764 | QualType BIRetType = E->getType(); |
17765 | auto RetTy = ConvertType(T: BIRetType); |
17766 | return Builder.CreateBitCast(V: ShuffleCall, DestTy: RetTy); |
17767 | } |
17768 | |
17769 | case PPC::BI__builtin_vsx_xxsldwi: { |
17770 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17771 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17772 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17773 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Val: Op2); |
17774 | assert(ArgCI && "Third argument must be a compile time constant" ); |
17775 | unsigned Index = ArgCI->getZExtValue() & 0x3; |
17776 | Op0 = Builder.CreateBitCast(V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4)); |
17777 | Op1 = Builder.CreateBitCast(V: Op1, DestTy: llvm::FixedVectorType::get(ElementType: Int32Ty, NumElts: 4)); |
17778 | |
17779 | // Create a shuffle mask |
17780 | int ElemIdx0; |
17781 | int ElemIdx1; |
17782 | int ElemIdx2; |
17783 | int ElemIdx3; |
17784 | if (getTarget().isLittleEndian()) { |
17785 | // Little endian element N comes from element 8+N-Index of the |
17786 | // concatenated wide vector (of course, using modulo arithmetic on |
17787 | // the total number of elements). |
17788 | ElemIdx0 = (8 - Index) % 8; |
17789 | ElemIdx1 = (9 - Index) % 8; |
17790 | ElemIdx2 = (10 - Index) % 8; |
17791 | ElemIdx3 = (11 - Index) % 8; |
17792 | } else { |
17793 | // Big endian ElemIdx<N> = Index + N |
17794 | ElemIdx0 = Index; |
17795 | ElemIdx1 = Index + 1; |
17796 | ElemIdx2 = Index + 2; |
17797 | ElemIdx3 = Index + 3; |
17798 | } |
17799 | |
17800 | int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3}; |
17801 | Value *ShuffleCall = Builder.CreateShuffleVector(V1: Op0, V2: Op1, Mask: ShuffleElts); |
17802 | QualType BIRetType = E->getType(); |
17803 | auto RetTy = ConvertType(T: BIRetType); |
17804 | return Builder.CreateBitCast(V: ShuffleCall, DestTy: RetTy); |
17805 | } |
17806 | |
17807 | case PPC::BI__builtin_pack_vector_int128: { |
17808 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17809 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17810 | bool isLittleEndian = getTarget().isLittleEndian(); |
17811 | Value *PoisonValue = |
17812 | llvm::PoisonValue::get(T: llvm::FixedVectorType::get(ElementType: Op0->getType(), NumElts: 2)); |
17813 | Value *Res = Builder.CreateInsertElement( |
17814 | Vec: PoisonValue, NewElt: Op0, Idx: (uint64_t)(isLittleEndian ? 1 : 0)); |
17815 | Res = Builder.CreateInsertElement(Vec: Res, NewElt: Op1, |
17816 | Idx: (uint64_t)(isLittleEndian ? 0 : 1)); |
17817 | return Builder.CreateBitCast(V: Res, DestTy: ConvertType(T: E->getType())); |
17818 | } |
17819 | |
17820 | case PPC::BI__builtin_unpack_vector_int128: { |
17821 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17822 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17823 | ConstantInt *Index = cast<ConstantInt>(Val: Op1); |
17824 | Value *Unpacked = Builder.CreateBitCast( |
17825 | V: Op0, DestTy: llvm::FixedVectorType::get(ElementType: ConvertType(T: E->getType()), NumElts: 2)); |
17826 | |
17827 | if (getTarget().isLittleEndian()) |
17828 | Index = |
17829 | ConstantInt::get(Ty: Index->getIntegerType(), V: 1 - Index->getZExtValue()); |
17830 | |
17831 | return Builder.CreateExtractElement(Vec: Unpacked, Idx: Index); |
17832 | } |
17833 | |
17834 | case PPC::BI__builtin_ppc_sthcx: { |
17835 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_sthcx); |
17836 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17837 | Value *Op1 = Builder.CreateSExt(V: EmitScalarExpr(E: E->getArg(Arg: 1)), DestTy: Int32Ty); |
17838 | return Builder.CreateCall(Callee: F, Args: {Op0, Op1}); |
17839 | } |
17840 | |
17841 | // The PPC MMA builtins take a pointer to a __vector_quad as an argument. |
17842 | // Some of the MMA instructions accumulate their result into an existing |
17843 | // accumulator whereas the others generate a new accumulator. So we need to |
17844 | // use custom code generation to expand a builtin call with a pointer to a |
17845 | // load (if the corresponding instruction accumulates its result) followed by |
17846 | // the call to the intrinsic and a store of the result. |
17847 | #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \ |
17848 | case PPC::BI__builtin_##Name: |
17849 | #include "clang/Basic/BuiltinsPPC.def" |
17850 | { |
17851 | SmallVector<Value *, 4> Ops; |
17852 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
17853 | if (E->getArg(Arg: i)->getType()->isArrayType()) |
17854 | Ops.push_back( |
17855 | Elt: EmitArrayToPointerDecay(Array: E->getArg(Arg: i)).emitRawPointer(CGF&: *this)); |
17856 | else |
17857 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i))); |
17858 | // The first argument of these two builtins is a pointer used to store their |
17859 | // result. However, the llvm intrinsics return their result in multiple |
17860 | // return values. So, here we emit code extracting these values from the |
17861 | // intrinsic results and storing them using that pointer. |
17862 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || |
17863 | BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || |
17864 | BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { |
17865 | unsigned NumVecs = 2; |
17866 | auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; |
17867 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { |
17868 | NumVecs = 4; |
17869 | Intrinsic = Intrinsic::ppc_mma_disassemble_acc; |
17870 | } |
17871 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic); |
17872 | Address Addr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
17873 | Value *Vec = Builder.CreateLoad(Addr); |
17874 | Value *Call = Builder.CreateCall(Callee: F, Args: {Vec}); |
17875 | llvm::Type *VTy = llvm::FixedVectorType::get(ElementType: Int8Ty, NumElts: 16); |
17876 | Value *Ptr = Ops[0]; |
17877 | for (unsigned i=0; i<NumVecs; i++) { |
17878 | Value *Vec = Builder.CreateExtractValue(Agg: Call, Idxs: i); |
17879 | llvm::ConstantInt* Index = llvm::ConstantInt::get(Ty: IntTy, V: i); |
17880 | Value *GEP = Builder.CreateInBoundsGEP(Ty: VTy, Ptr, IdxList: Index); |
17881 | Builder.CreateAlignedStore(Val: Vec, Ptr: GEP, Align: MaybeAlign(16)); |
17882 | } |
17883 | return Call; |
17884 | } |
17885 | if (BuiltinID == PPC::BI__builtin_vsx_build_pair || |
17886 | BuiltinID == PPC::BI__builtin_mma_build_acc) { |
17887 | // Reverse the order of the operands for LE, so the |
17888 | // same builtin call can be used on both LE and BE |
17889 | // without the need for the programmer to swap operands. |
17890 | // The operands are reversed starting from the second argument, |
17891 | // the first operand is the pointer to the pair/accumulator |
17892 | // that is being built. |
17893 | if (getTarget().isLittleEndian()) |
17894 | std::reverse(first: Ops.begin() + 1, last: Ops.end()); |
17895 | } |
17896 | bool Accumulate; |
17897 | switch (BuiltinID) { |
17898 | #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \ |
17899 | case PPC::BI__builtin_##Name: \ |
17900 | ID = Intrinsic::ppc_##Intr; \ |
17901 | Accumulate = Acc; \ |
17902 | break; |
17903 | #include "clang/Basic/BuiltinsPPC.def" |
17904 | } |
17905 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
17906 | BuiltinID == PPC::BI__builtin_vsx_stxvp || |
17907 | BuiltinID == PPC::BI__builtin_mma_lxvp || |
17908 | BuiltinID == PPC::BI__builtin_mma_stxvp) { |
17909 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
17910 | BuiltinID == PPC::BI__builtin_mma_lxvp) { |
17911 | Ops[0] = Builder.CreateGEP(Ty: Int8Ty, Ptr: Ops[1], IdxList: Ops[0]); |
17912 | } else { |
17913 | Ops[1] = Builder.CreateGEP(Ty: Int8Ty, Ptr: Ops[2], IdxList: Ops[1]); |
17914 | } |
17915 | Ops.pop_back(); |
17916 | llvm::Function *F = CGM.getIntrinsic(IID: ID); |
17917 | return Builder.CreateCall(Callee: F, Args: Ops, Name: "" ); |
17918 | } |
17919 | SmallVector<Value*, 4> CallOps; |
17920 | if (Accumulate) { |
17921 | Address Addr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
17922 | Value *Acc = Builder.CreateLoad(Addr); |
17923 | CallOps.push_back(Elt: Acc); |
17924 | } |
17925 | for (unsigned i=1; i<Ops.size(); i++) |
17926 | CallOps.push_back(Elt: Ops[i]); |
17927 | llvm::Function *F = CGM.getIntrinsic(IID: ID); |
17928 | Value *Call = Builder.CreateCall(Callee: F, Args: CallOps); |
17929 | return Builder.CreateAlignedStore(Val: Call, Ptr: Ops[0], Align: MaybeAlign(64)); |
17930 | } |
17931 | |
17932 | case PPC::BI__builtin_ppc_compare_and_swap: |
17933 | case PPC::BI__builtin_ppc_compare_and_swaplp: { |
17934 | Address Addr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
17935 | Address OldValAddr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
17936 | Value *OldVal = Builder.CreateLoad(Addr: OldValAddr); |
17937 | QualType AtomicTy = E->getArg(Arg: 0)->getType()->getPointeeType(); |
17938 | LValue LV = MakeAddrLValue(Addr, T: AtomicTy); |
17939 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
17940 | auto Pair = EmitAtomicCompareExchange( |
17941 | Obj: LV, Expected: RValue::get(V: OldVal), Desired: RValue::get(V: Op2), Loc: E->getExprLoc(), |
17942 | Success: llvm::AtomicOrdering::Monotonic, Failure: llvm::AtomicOrdering::Monotonic, IsWeak: true); |
17943 | // Unlike c11's atomic_compare_exchange, according to |
17944 | // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp |
17945 | // > In either case, the contents of the memory location specified by addr |
17946 | // > are copied into the memory location specified by old_val_addr. |
17947 | // But it hasn't specified storing to OldValAddr is atomic or not and |
17948 | // which order to use. Now following XL's codegen, treat it as a normal |
17949 | // store. |
17950 | Value *LoadedVal = Pair.first.getScalarVal(); |
17951 | Builder.CreateStore(Val: LoadedVal, Addr: OldValAddr); |
17952 | return Builder.CreateZExt(V: Pair.second, DestTy: Builder.getInt32Ty()); |
17953 | } |
17954 | case PPC::BI__builtin_ppc_fetch_and_add: |
17955 | case PPC::BI__builtin_ppc_fetch_and_addlp: { |
17956 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Add, E, |
17957 | Ordering: llvm::AtomicOrdering::Monotonic); |
17958 | } |
17959 | case PPC::BI__builtin_ppc_fetch_and_and: |
17960 | case PPC::BI__builtin_ppc_fetch_and_andlp: { |
17961 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::And, E, |
17962 | Ordering: llvm::AtomicOrdering::Monotonic); |
17963 | } |
17964 | |
17965 | case PPC::BI__builtin_ppc_fetch_and_or: |
17966 | case PPC::BI__builtin_ppc_fetch_and_orlp: { |
17967 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Or, E, |
17968 | Ordering: llvm::AtomicOrdering::Monotonic); |
17969 | } |
17970 | case PPC::BI__builtin_ppc_fetch_and_swap: |
17971 | case PPC::BI__builtin_ppc_fetch_and_swaplp: { |
17972 | return MakeBinaryAtomicValue(CGF&: *this, Kind: AtomicRMWInst::Xchg, E, |
17973 | Ordering: llvm::AtomicOrdering::Monotonic); |
17974 | } |
17975 | case PPC::BI__builtin_ppc_ldarx: |
17976 | case PPC::BI__builtin_ppc_lwarx: |
17977 | case PPC::BI__builtin_ppc_lharx: |
17978 | case PPC::BI__builtin_ppc_lbarx: |
17979 | return emitPPCLoadReserveIntrinsic(CGF&: *this, BuiltinID, E); |
17980 | case PPC::BI__builtin_ppc_mfspr: { |
17981 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17982 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(Ty: VoidPtrTy) == 32 |
17983 | ? Int32Ty |
17984 | : Int64Ty; |
17985 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_mfspr, Tys: RetType); |
17986 | return Builder.CreateCall(Callee: F, Args: {Op0}); |
17987 | } |
17988 | case PPC::BI__builtin_ppc_mtspr: { |
17989 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17990 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
17991 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(Ty: VoidPtrTy) == 32 |
17992 | ? Int32Ty |
17993 | : Int64Ty; |
17994 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_mtspr, Tys: RetType); |
17995 | return Builder.CreateCall(Callee: F, Args: {Op0, Op1}); |
17996 | } |
17997 | case PPC::BI__builtin_ppc_popcntb: { |
17998 | Value *ArgValue = EmitScalarExpr(E: E->getArg(Arg: 0)); |
17999 | llvm::Type *ArgType = ArgValue->getType(); |
18000 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_popcntb, Tys: {ArgType, ArgType}); |
18001 | return Builder.CreateCall(Callee: F, Args: {ArgValue}, Name: "popcntb" ); |
18002 | } |
18003 | case PPC::BI__builtin_ppc_mtfsf: { |
18004 | // The builtin takes a uint32 that needs to be cast to an |
18005 | // f64 to be passed to the intrinsic. |
18006 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18007 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18008 | Value *Cast = Builder.CreateUIToFP(V: Op1, DestTy: DoubleTy); |
18009 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::ppc_mtfsf); |
18010 | return Builder.CreateCall(Callee: F, Args: {Op0, Cast}, Name: "" ); |
18011 | } |
18012 | |
18013 | case PPC::BI__builtin_ppc_swdiv_nochk: |
18014 | case PPC::BI__builtin_ppc_swdivs_nochk: { |
18015 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18016 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18017 | FastMathFlags FMF = Builder.getFastMathFlags(); |
18018 | Builder.getFastMathFlags().setFast(); |
18019 | Value *FDiv = Builder.CreateFDiv(L: Op0, R: Op1, Name: "swdiv_nochk" ); |
18020 | Builder.getFastMathFlags() &= (FMF); |
18021 | return FDiv; |
18022 | } |
18023 | case PPC::BI__builtin_ppc_fric: |
18024 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18025 | CGF&: *this, E, IntrinsicID: Intrinsic::rint, |
18026 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_rint)) |
18027 | .getScalarVal(); |
18028 | case PPC::BI__builtin_ppc_frim: |
18029 | case PPC::BI__builtin_ppc_frims: |
18030 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18031 | CGF&: *this, E, IntrinsicID: Intrinsic::floor, |
18032 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_floor)) |
18033 | .getScalarVal(); |
18034 | case PPC::BI__builtin_ppc_frin: |
18035 | case PPC::BI__builtin_ppc_frins: |
18036 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18037 | CGF&: *this, E, IntrinsicID: Intrinsic::round, |
18038 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_round)) |
18039 | .getScalarVal(); |
18040 | case PPC::BI__builtin_ppc_frip: |
18041 | case PPC::BI__builtin_ppc_frips: |
18042 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18043 | CGF&: *this, E, IntrinsicID: Intrinsic::ceil, |
18044 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_ceil)) |
18045 | .getScalarVal(); |
18046 | case PPC::BI__builtin_ppc_friz: |
18047 | case PPC::BI__builtin_ppc_frizs: |
18048 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18049 | CGF&: *this, E, IntrinsicID: Intrinsic::trunc, |
18050 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_trunc)) |
18051 | .getScalarVal(); |
18052 | case PPC::BI__builtin_ppc_fsqrt: |
18053 | case PPC::BI__builtin_ppc_fsqrts: |
18054 | return RValue::get(V: emitUnaryMaybeConstrainedFPBuiltin( |
18055 | CGF&: *this, E, IntrinsicID: Intrinsic::sqrt, |
18056 | ConstrainedIntrinsicID: Intrinsic::experimental_constrained_sqrt)) |
18057 | .getScalarVal(); |
18058 | case PPC::BI__builtin_ppc_test_data_class: { |
18059 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18060 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18061 | return Builder.CreateCall( |
18062 | Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_test_data_class, Tys: Op0->getType()), |
18063 | Args: {Op0, Op1}, Name: "test_data_class" ); |
18064 | } |
18065 | case PPC::BI__builtin_ppc_maxfe: { |
18066 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18067 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18068 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18069 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18070 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_maxfe), |
18071 | Args: {Op0, Op1, Op2, Op3}); |
18072 | } |
18073 | case PPC::BI__builtin_ppc_maxfl: { |
18074 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18075 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18076 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18077 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18078 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_maxfl), |
18079 | Args: {Op0, Op1, Op2, Op3}); |
18080 | } |
18081 | case PPC::BI__builtin_ppc_maxfs: { |
18082 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18083 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18084 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18085 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18086 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_maxfs), |
18087 | Args: {Op0, Op1, Op2, Op3}); |
18088 | } |
18089 | case PPC::BI__builtin_ppc_minfe: { |
18090 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18091 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18092 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18093 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18094 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_minfe), |
18095 | Args: {Op0, Op1, Op2, Op3}); |
18096 | } |
18097 | case PPC::BI__builtin_ppc_minfl: { |
18098 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18099 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18100 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18101 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18102 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_minfl), |
18103 | Args: {Op0, Op1, Op2, Op3}); |
18104 | } |
18105 | case PPC::BI__builtin_ppc_minfs: { |
18106 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18107 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18108 | Value *Op2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18109 | Value *Op3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18110 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_minfs), |
18111 | Args: {Op0, Op1, Op2, Op3}); |
18112 | } |
18113 | case PPC::BI__builtin_ppc_swdiv: |
18114 | case PPC::BI__builtin_ppc_swdivs: { |
18115 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18116 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18117 | return Builder.CreateFDiv(L: Op0, R: Op1, Name: "swdiv" ); |
18118 | } |
18119 | case PPC::BI__builtin_ppc_set_fpscr_rn: |
18120 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_setrnd), |
18121 | Args: {EmitScalarExpr(E: E->getArg(Arg: 0))}); |
18122 | case PPC::BI__builtin_ppc_mffs: |
18123 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: Intrinsic::ppc_readflm)); |
18124 | } |
18125 | } |
18126 | |
18127 | namespace { |
18128 | // If \p E is not null pointer, insert address space cast to match return |
18129 | // type of \p E if necessary. |
18130 | Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF, |
18131 | const CallExpr *E = nullptr) { |
18132 | auto *F = CGF.CGM.getIntrinsic(IID: Intrinsic::amdgcn_dispatch_ptr); |
18133 | auto *Call = CGF.Builder.CreateCall(Callee: F); |
18134 | Call->addRetAttr( |
18135 | Attr: Attribute::getWithDereferenceableBytes(Context&: Call->getContext(), Bytes: 64)); |
18136 | Call->addRetAttr(Attr: Attribute::getWithAlignment(Context&: Call->getContext(), Alignment: Align(4))); |
18137 | if (!E) |
18138 | return Call; |
18139 | QualType BuiltinRetType = E->getType(); |
18140 | auto *RetTy = cast<llvm::PointerType>(Val: CGF.ConvertType(T: BuiltinRetType)); |
18141 | if (RetTy == Call->getType()) |
18142 | return Call; |
18143 | return CGF.Builder.CreateAddrSpaceCast(V: Call, DestTy: RetTy); |
18144 | } |
18145 | |
18146 | Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) { |
18147 | auto *F = CGF.CGM.getIntrinsic(IID: Intrinsic::amdgcn_implicitarg_ptr); |
18148 | auto *Call = CGF.Builder.CreateCall(Callee: F); |
18149 | Call->addRetAttr( |
18150 | Attr: Attribute::getWithDereferenceableBytes(Context&: Call->getContext(), Bytes: 256)); |
18151 | Call->addRetAttr(Attr: Attribute::getWithAlignment(Context&: Call->getContext(), Alignment: Align(8))); |
18152 | return Call; |
18153 | } |
18154 | |
18155 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
18156 | /// Emit code based on Code Object ABI version. |
18157 | /// COV_4 : Emit code to use dispatch ptr |
18158 | /// COV_5+ : Emit code to use implicitarg ptr |
18159 | /// COV_NONE : Emit code to load a global variable "__oclc_ABI_version" |
18160 | /// and use its value for COV_4 or COV_5+ approach. It is used for |
18161 | /// compiling device libraries in an ABI-agnostic way. |
18162 | /// |
18163 | /// Note: "__oclc_ABI_version" is supposed to be emitted and intialized by |
18164 | /// clang during compilation of user code. |
18165 | Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) { |
18166 | llvm::LoadInst *LD; |
18167 | |
18168 | auto Cov = CGF.getTarget().getTargetOpts().CodeObjectVersion; |
18169 | |
18170 | if (Cov == CodeObjectVersionKind::COV_None) { |
18171 | StringRef Name = "__oclc_ABI_version" ; |
18172 | auto *ABIVersionC = CGF.CGM.getModule().getNamedGlobal(Name); |
18173 | if (!ABIVersionC) |
18174 | ABIVersionC = new llvm::GlobalVariable( |
18175 | CGF.CGM.getModule(), CGF.Int32Ty, false, |
18176 | llvm::GlobalValue::ExternalLinkage, nullptr, Name, nullptr, |
18177 | llvm::GlobalVariable::NotThreadLocal, |
18178 | CGF.CGM.getContext().getTargetAddressSpace(AS: LangAS::opencl_constant)); |
18179 | |
18180 | // This load will be eliminated by the IPSCCP because it is constant |
18181 | // weak_odr without externally_initialized. Either changing it to weak or |
18182 | // adding externally_initialized will keep the load. |
18183 | Value *ABIVersion = CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: ABIVersionC, |
18184 | Align: CGF.CGM.getIntAlign()); |
18185 | |
18186 | Value *IsCOV5 = CGF.Builder.CreateICmpSGE( |
18187 | LHS: ABIVersion, |
18188 | RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: CodeObjectVersionKind::COV_5)); |
18189 | |
18190 | // Indexing the implicit kernarg segment. |
18191 | Value *ImplicitGEP = CGF.Builder.CreateConstGEP1_32( |
18192 | Ty: CGF.Int8Ty, Ptr: EmitAMDGPUImplicitArgPtr(CGF), Idx0: 12 + Index * 2); |
18193 | |
18194 | // Indexing the HSA kernel_dispatch_packet struct. |
18195 | Value *DispatchGEP = CGF.Builder.CreateConstGEP1_32( |
18196 | Ty: CGF.Int8Ty, Ptr: EmitAMDGPUDispatchPtr(CGF), Idx0: 4 + Index * 2); |
18197 | |
18198 | auto Result = CGF.Builder.CreateSelect(C: IsCOV5, True: ImplicitGEP, False: DispatchGEP); |
18199 | LD = CGF.Builder.CreateLoad( |
18200 | Addr: Address(Result, CGF.Int16Ty, CharUnits::fromQuantity(Quantity: 2))); |
18201 | } else { |
18202 | Value *GEP = nullptr; |
18203 | if (Cov >= CodeObjectVersionKind::COV_5) { |
18204 | // Indexing the implicit kernarg segment. |
18205 | GEP = CGF.Builder.CreateConstGEP1_32( |
18206 | Ty: CGF.Int8Ty, Ptr: EmitAMDGPUImplicitArgPtr(CGF), Idx0: 12 + Index * 2); |
18207 | } else { |
18208 | // Indexing the HSA kernel_dispatch_packet struct. |
18209 | GEP = CGF.Builder.CreateConstGEP1_32( |
18210 | Ty: CGF.Int8Ty, Ptr: EmitAMDGPUDispatchPtr(CGF), Idx0: 4 + Index * 2); |
18211 | } |
18212 | LD = CGF.Builder.CreateLoad( |
18213 | Addr: Address(GEP, CGF.Int16Ty, CharUnits::fromQuantity(Quantity: 2))); |
18214 | } |
18215 | |
18216 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
18217 | llvm::MDNode *RNode = MDHelper.createRange(Lo: APInt(16, 1), |
18218 | Hi: APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1)); |
18219 | LD->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RNode); |
18220 | LD->setMetadata(KindID: llvm::LLVMContext::MD_noundef, |
18221 | Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: std::nullopt)); |
18222 | LD->setMetadata(KindID: llvm::LLVMContext::MD_invariant_load, |
18223 | Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: std::nullopt)); |
18224 | return LD; |
18225 | } |
18226 | |
18227 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
18228 | Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { |
18229 | const unsigned XOffset = 12; |
18230 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
18231 | // Indexing the HSA kernel_dispatch_packet struct. |
18232 | auto *Offset = llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: XOffset + Index * 4); |
18233 | auto *GEP = CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: DP, IdxList: Offset); |
18234 | auto *LD = CGF.Builder.CreateLoad( |
18235 | Addr: Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(Quantity: 4))); |
18236 | LD->setMetadata(KindID: llvm::LLVMContext::MD_invariant_load, |
18237 | Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: std::nullopt)); |
18238 | return LD; |
18239 | } |
18240 | } // namespace |
18241 | |
18242 | // For processing memory ordering and memory scope arguments of various |
18243 | // amdgcn builtins. |
18244 | // \p Order takes a C++11 comptabile memory-ordering specifier and converts |
18245 | // it into LLVM's memory ordering specifier using atomic C ABI, and writes |
18246 | // to \p AO. \p Scope takes a const char * and converts it into AMDGCN |
18247 | // specific SyncScopeID and writes it to \p SSID. |
18248 | void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, |
18249 | llvm::AtomicOrdering &AO, |
18250 | llvm::SyncScope::ID &SSID) { |
18251 | int ord = cast<llvm::ConstantInt>(Val: Order)->getZExtValue(); |
18252 | |
18253 | // Map C11/C++11 memory ordering to LLVM memory ordering |
18254 | assert(llvm::isValidAtomicOrderingCABI(ord)); |
18255 | switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { |
18256 | case llvm::AtomicOrderingCABI::acquire: |
18257 | case llvm::AtomicOrderingCABI::consume: |
18258 | AO = llvm::AtomicOrdering::Acquire; |
18259 | break; |
18260 | case llvm::AtomicOrderingCABI::release: |
18261 | AO = llvm::AtomicOrdering::Release; |
18262 | break; |
18263 | case llvm::AtomicOrderingCABI::acq_rel: |
18264 | AO = llvm::AtomicOrdering::AcquireRelease; |
18265 | break; |
18266 | case llvm::AtomicOrderingCABI::seq_cst: |
18267 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
18268 | break; |
18269 | case llvm::AtomicOrderingCABI::relaxed: |
18270 | AO = llvm::AtomicOrdering::Monotonic; |
18271 | break; |
18272 | } |
18273 | |
18274 | // Some of the atomic builtins take the scope as a string name. |
18275 | StringRef scp; |
18276 | if (llvm::getConstantStringInfo(V: Scope, Str&: scp)) { |
18277 | SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: scp); |
18278 | return; |
18279 | } |
18280 | |
18281 | // Older builtins had an enum argument for the memory scope. |
18282 | int scope = cast<llvm::ConstantInt>(Val: Scope)->getZExtValue(); |
18283 | switch (scope) { |
18284 | case 0: // __MEMORY_SCOPE_SYSTEM |
18285 | SSID = llvm::SyncScope::System; |
18286 | break; |
18287 | case 1: // __MEMORY_SCOPE_DEVICE |
18288 | SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: "agent" ); |
18289 | break; |
18290 | case 2: // __MEMORY_SCOPE_WRKGRP |
18291 | SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: "workgroup" ); |
18292 | break; |
18293 | case 3: // __MEMORY_SCOPE_WVFRNT |
18294 | SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: "wavefront" ); |
18295 | break; |
18296 | case 4: // __MEMORY_SCOPE_SINGLE |
18297 | SSID = llvm::SyncScope::SingleThread; |
18298 | break; |
18299 | default: |
18300 | SSID = llvm::SyncScope::System; |
18301 | break; |
18302 | } |
18303 | } |
18304 | |
18305 | llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments, |
18306 | unsigned Idx, |
18307 | const CallExpr *E) { |
18308 | llvm::Value *Arg = nullptr; |
18309 | if ((ICEArguments & (1 << Idx)) == 0) { |
18310 | Arg = EmitScalarExpr(E: E->getArg(Arg: Idx)); |
18311 | } else { |
18312 | // If this is required to be a constant, constant fold it so that we |
18313 | // know that the generated intrinsic gets a ConstantInt. |
18314 | std::optional<llvm::APSInt> Result = |
18315 | E->getArg(Arg: Idx)->getIntegerConstantExpr(Ctx: getContext()); |
18316 | assert(Result && "Expected argument to be a constant" ); |
18317 | Arg = llvm::ConstantInt::get(Context&: getLLVMContext(), V: *Result); |
18318 | } |
18319 | return Arg; |
18320 | } |
18321 | |
18322 | Intrinsic::ID getDotProductIntrinsic(QualType QT, int elementCount) { |
18323 | if (QT->hasFloatingRepresentation()) { |
18324 | switch (elementCount) { |
18325 | case 2: |
18326 | return Intrinsic::dx_dot2; |
18327 | case 3: |
18328 | return Intrinsic::dx_dot3; |
18329 | case 4: |
18330 | return Intrinsic::dx_dot4; |
18331 | } |
18332 | } |
18333 | if (QT->hasSignedIntegerRepresentation()) |
18334 | return Intrinsic::dx_sdot; |
18335 | |
18336 | assert(QT->hasUnsignedIntegerRepresentation()); |
18337 | return Intrinsic::dx_udot; |
18338 | } |
18339 | |
18340 | Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, |
18341 | const CallExpr *E) { |
18342 | if (!getLangOpts().HLSL) |
18343 | return nullptr; |
18344 | |
18345 | switch (BuiltinID) { |
18346 | case Builtin::BI__builtin_hlsl_elementwise_all: { |
18347 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18348 | return Builder.CreateIntrinsic( |
18349 | /*ReturnType=*/RetTy: llvm::Type::getInt1Ty(C&: getLLVMContext()), |
18350 | ID: CGM.getHLSLRuntime().getAllIntrinsic(), Args: ArrayRef<Value *>{Op0}, FMFSource: nullptr, |
18351 | Name: "hlsl.all" ); |
18352 | } |
18353 | case Builtin::BI__builtin_hlsl_elementwise_any: { |
18354 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18355 | return Builder.CreateIntrinsic( |
18356 | /*ReturnType=*/RetTy: llvm::Type::getInt1Ty(C&: getLLVMContext()), |
18357 | ID: CGM.getHLSLRuntime().getAnyIntrinsic(), Args: ArrayRef<Value *>{Op0}, FMFSource: nullptr, |
18358 | Name: "hlsl.any" ); |
18359 | } |
18360 | case Builtin::BI__builtin_hlsl_elementwise_clamp: { |
18361 | Value *OpX = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18362 | Value *OpMin = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18363 | Value *OpMax = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18364 | |
18365 | QualType Ty = E->getArg(Arg: 0)->getType(); |
18366 | bool IsUnsigned = false; |
18367 | if (auto *VecTy = Ty->getAs<VectorType>()) |
18368 | Ty = VecTy->getElementType(); |
18369 | IsUnsigned = Ty->isUnsignedIntegerType(); |
18370 | return Builder.CreateIntrinsic( |
18371 | /*ReturnType=*/RetTy: OpX->getType(), |
18372 | ID: IsUnsigned ? Intrinsic::dx_uclamp : Intrinsic::dx_clamp, |
18373 | Args: ArrayRef<Value *>{OpX, OpMin, OpMax}, FMFSource: nullptr, Name: "dx.clamp" ); |
18374 | } |
18375 | case Builtin::BI__builtin_hlsl_dot: { |
18376 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18377 | Value *Op1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18378 | llvm::Type *T0 = Op0->getType(); |
18379 | llvm::Type *T1 = Op1->getType(); |
18380 | if (!T0->isVectorTy() && !T1->isVectorTy()) { |
18381 | if (T0->isFloatingPointTy()) |
18382 | return Builder.CreateFMul(L: Op0, R: Op1, Name: "dx.dot" ); |
18383 | |
18384 | if (T0->isIntegerTy()) |
18385 | return Builder.CreateMul(LHS: Op0, RHS: Op1, Name: "dx.dot" ); |
18386 | |
18387 | // Bools should have been promoted |
18388 | llvm_unreachable( |
18389 | "Scalar dot product is only supported on ints and floats." ); |
18390 | } |
18391 | // A VectorSplat should have happened |
18392 | assert(T0->isVectorTy() && T1->isVectorTy() && |
18393 | "Dot product of vector and scalar is not supported." ); |
18394 | |
18395 | // A vector sext or sitofp should have happened |
18396 | assert(T0->getScalarType() == T1->getScalarType() && |
18397 | "Dot product of vectors need the same element types." ); |
18398 | |
18399 | auto *VecTy0 = E->getArg(Arg: 0)->getType()->getAs<VectorType>(); |
18400 | [[maybe_unused]] auto *VecTy1 = |
18401 | E->getArg(Arg: 1)->getType()->getAs<VectorType>(); |
18402 | // A HLSLVectorTruncation should have happend |
18403 | assert(VecTy0->getNumElements() == VecTy1->getNumElements() && |
18404 | "Dot product requires vectors to be of the same size." ); |
18405 | |
18406 | return Builder.CreateIntrinsic( |
18407 | /*ReturnType=*/RetTy: T0->getScalarType(), |
18408 | ID: getDotProductIntrinsic(QT: E->getArg(Arg: 0)->getType(), |
18409 | elementCount: VecTy0->getNumElements()), |
18410 | Args: ArrayRef<Value *>{Op0, Op1}, FMFSource: nullptr, Name: "dx.dot" ); |
18411 | } break; |
18412 | case Builtin::BI__builtin_hlsl_lerp: { |
18413 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18414 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18415 | Value *S = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18416 | if (!E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18417 | llvm_unreachable("lerp operand must have a float representation" ); |
18418 | return Builder.CreateIntrinsic( |
18419 | /*ReturnType=*/RetTy: X->getType(), ID: CGM.getHLSLRuntime().getLerpIntrinsic(), |
18420 | Args: ArrayRef<Value *>{X, Y, S}, FMFSource: nullptr, Name: "hlsl.lerp" ); |
18421 | } |
18422 | case Builtin::BI__builtin_hlsl_elementwise_frac: { |
18423 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18424 | if (!E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18425 | llvm_unreachable("frac operand must have a float representation" ); |
18426 | return Builder.CreateIntrinsic( |
18427 | /*ReturnType=*/RetTy: Op0->getType(), ID: Intrinsic::dx_frac, |
18428 | Args: ArrayRef<Value *>{Op0}, FMFSource: nullptr, Name: "dx.frac" ); |
18429 | } |
18430 | case Builtin::BI__builtin_hlsl_elementwise_isinf: { |
18431 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18432 | llvm::Type *Xty = Op0->getType(); |
18433 | llvm::Type *retType = llvm::Type::getInt1Ty(C&: this->getLLVMContext()); |
18434 | if (Xty->isVectorTy()) { |
18435 | auto *XVecTy = E->getArg(Arg: 0)->getType()->getAs<VectorType>(); |
18436 | retType = llvm::VectorType::get( |
18437 | ElementType: retType, EC: ElementCount::getFixed(MinVal: XVecTy->getNumElements())); |
18438 | } |
18439 | if (!E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18440 | llvm_unreachable("isinf operand must have a float representation" ); |
18441 | return Builder.CreateIntrinsic(RetTy: retType, ID: Intrinsic::dx_isinf, |
18442 | Args: ArrayRef<Value *>{Op0}, FMFSource: nullptr, Name: "dx.isinf" ); |
18443 | } |
18444 | case Builtin::BI__builtin_hlsl_mad: { |
18445 | Value *M = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18446 | Value *A = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18447 | Value *B = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18448 | if (E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18449 | return Builder.CreateIntrinsic( |
18450 | /*ReturnType*/ RetTy: M->getType(), ID: Intrinsic::fmuladd, |
18451 | Args: ArrayRef<Value *>{M, A, B}, FMFSource: nullptr, Name: "hlsl.fmad" ); |
18452 | |
18453 | if (E->getArg(Arg: 0)->getType()->hasSignedIntegerRepresentation()) { |
18454 | if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil) |
18455 | return Builder.CreateIntrinsic( |
18456 | /*ReturnType*/ RetTy: M->getType(), ID: Intrinsic::dx_imad, |
18457 | Args: ArrayRef<Value *>{M, A, B}, FMFSource: nullptr, Name: "dx.imad" ); |
18458 | |
18459 | Value *Mul = Builder.CreateNSWMul(LHS: M, RHS: A); |
18460 | return Builder.CreateNSWAdd(LHS: Mul, RHS: B); |
18461 | } |
18462 | assert(E->getArg(0)->getType()->hasUnsignedIntegerRepresentation()); |
18463 | if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil) |
18464 | return Builder.CreateIntrinsic( |
18465 | /*ReturnType=*/RetTy: M->getType(), ID: Intrinsic::dx_umad, |
18466 | Args: ArrayRef<Value *>{M, A, B}, FMFSource: nullptr, Name: "dx.umad" ); |
18467 | |
18468 | Value *Mul = Builder.CreateNUWMul(LHS: M, RHS: A); |
18469 | return Builder.CreateNUWAdd(LHS: Mul, RHS: B); |
18470 | } |
18471 | case Builtin::BI__builtin_hlsl_elementwise_rcp: { |
18472 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18473 | if (!E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18474 | llvm_unreachable("rcp operand must have a float representation" ); |
18475 | llvm::Type *Ty = Op0->getType(); |
18476 | llvm::Type *EltTy = Ty->getScalarType(); |
18477 | Constant *One = Ty->isVectorTy() |
18478 | ? ConstantVector::getSplat( |
18479 | EC: ElementCount::getFixed( |
18480 | MinVal: cast<FixedVectorType>(Val: Ty)->getNumElements()), |
18481 | Elt: ConstantFP::get(Ty: EltTy, V: 1.0)) |
18482 | : ConstantFP::get(Ty: EltTy, V: 1.0); |
18483 | return Builder.CreateFDiv(L: One, R: Op0, Name: "hlsl.rcp" ); |
18484 | } |
18485 | case Builtin::BI__builtin_hlsl_elementwise_rsqrt: { |
18486 | Value *Op0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18487 | if (!E->getArg(Arg: 0)->getType()->hasFloatingRepresentation()) |
18488 | llvm_unreachable("rsqrt operand must have a float representation" ); |
18489 | return Builder.CreateIntrinsic( |
18490 | /*ReturnType=*/RetTy: Op0->getType(), ID: CGM.getHLSLRuntime().getRsqrtIntrinsic(), |
18491 | Args: ArrayRef<Value *>{Op0}, FMFSource: nullptr, Name: "hlsl.rsqrt" ); |
18492 | } |
18493 | case Builtin::BI__builtin_hlsl_wave_get_lane_index: { |
18494 | return EmitRuntimeCall(callee: CGM.CreateRuntimeFunction( |
18495 | Ty: llvm::FunctionType::get(Result: IntTy, Params: {}, isVarArg: false), Name: "__hlsl_wave_get_lane_index" , |
18496 | ExtraAttrs: {}, Local: false, AssumeConvergent: true)); |
18497 | } |
18498 | } |
18499 | return nullptr; |
18500 | } |
18501 | |
18502 | void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst, |
18503 | const CallExpr *E) { |
18504 | constexpr const char *Tag = "amdgpu-as" ; |
18505 | |
18506 | LLVMContext &Ctx = Inst->getContext(); |
18507 | SmallVector<MMRAMetadata::TagT, 3> MMRAs; |
18508 | for (unsigned K = 2; K < E->getNumArgs(); ++K) { |
18509 | llvm::Value *V = EmitScalarExpr(E: E->getArg(Arg: K)); |
18510 | StringRef AS; |
18511 | if (llvm::getConstantStringInfo(V, Str&: AS)) { |
18512 | MMRAs.push_back(Elt: {Tag, AS}); |
18513 | // TODO: Delete the resulting unused constant? |
18514 | continue; |
18515 | } |
18516 | CGM.Error(loc: E->getExprLoc(), |
18517 | error: "expected an address space name as a string literal" ); |
18518 | } |
18519 | |
18520 | llvm::sort(C&: MMRAs); |
18521 | MMRAs.erase(CS: llvm::unique(R&: MMRAs), CE: MMRAs.end()); |
18522 | Inst->setMetadata(KindID: LLVMContext::MD_mmra, Node: MMRAMetadata::getMD(Ctx, Tags: MMRAs)); |
18523 | } |
18524 | |
18525 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, |
18526 | const CallExpr *E) { |
18527 | llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent; |
18528 | llvm::SyncScope::ID SSID; |
18529 | switch (BuiltinID) { |
18530 | case AMDGPU::BI__builtin_amdgcn_div_scale: |
18531 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { |
18532 | // Translate from the intrinsics's struct return to the builtin's out |
18533 | // argument. |
18534 | |
18535 | Address FlagOutPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 3)); |
18536 | |
18537 | llvm::Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18538 | llvm::Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18539 | llvm::Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18540 | |
18541 | llvm::Function *Callee = CGM.getIntrinsic(IID: Intrinsic::amdgcn_div_scale, |
18542 | Tys: X->getType()); |
18543 | |
18544 | llvm::Value *Tmp = Builder.CreateCall(Callee, Args: {X, Y, Z}); |
18545 | |
18546 | llvm::Value *Result = Builder.CreateExtractValue(Agg: Tmp, Idxs: 0); |
18547 | llvm::Value *Flag = Builder.CreateExtractValue(Agg: Tmp, Idxs: 1); |
18548 | |
18549 | llvm::Type *RealFlagType = FlagOutPtr.getElementType(); |
18550 | |
18551 | llvm::Value *FlagExt = Builder.CreateZExt(V: Flag, DestTy: RealFlagType); |
18552 | Builder.CreateStore(Val: FlagExt, Addr: FlagOutPtr); |
18553 | return Result; |
18554 | } |
18555 | case AMDGPU::BI__builtin_amdgcn_div_fmas: |
18556 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { |
18557 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18558 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18559 | llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18560 | llvm::Value *Src3 = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18561 | |
18562 | llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_div_fmas, |
18563 | Tys: Src0->getType()); |
18564 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Arg: Src3); |
18565 | return Builder.CreateCall(Callee: F, Args: {Src0, Src1, Src2, Src3ToBool}); |
18566 | } |
18567 | |
18568 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: |
18569 | return emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E, |
18570 | IntrinsicID: Intrinsic::amdgcn_ds_swizzle); |
18571 | case AMDGPU::BI__builtin_amdgcn_mov_dpp8: |
18572 | return emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E, |
18573 | IntrinsicID: Intrinsic::amdgcn_mov_dpp8); |
18574 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
18575 | case AMDGPU::BI__builtin_amdgcn_update_dpp: { |
18576 | llvm::SmallVector<llvm::Value *, 6> Args; |
18577 | // Find out if any arguments are required to be integer constant |
18578 | // expressions. |
18579 | unsigned ICEArguments = 0; |
18580 | ASTContext::GetBuiltinTypeError Error; |
18581 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
18582 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
18583 | for (unsigned I = 0; I != E->getNumArgs(); ++I) { |
18584 | Args.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: I, E)); |
18585 | } |
18586 | assert(Args.size() == 5 || Args.size() == 6); |
18587 | if (Args.size() == 5) |
18588 | Args.insert(I: Args.begin(), Elt: llvm::PoisonValue::get(T: Args[0]->getType())); |
18589 | Function *F = |
18590 | CGM.getIntrinsic(IID: Intrinsic::amdgcn_update_dpp, Tys: Args[0]->getType()); |
18591 | return Builder.CreateCall(Callee: F, Args); |
18592 | } |
18593 | case AMDGPU::BI__builtin_amdgcn_permlane16: |
18594 | case AMDGPU::BI__builtin_amdgcn_permlanex16: |
18595 | return emitBuiltinWithOneOverloadedType<6>( |
18596 | CGF&: *this, E, |
18597 | IntrinsicID: BuiltinID == AMDGPU::BI__builtin_amdgcn_permlane16 |
18598 | ? Intrinsic::amdgcn_permlane16 |
18599 | : Intrinsic::amdgcn_permlanex16); |
18600 | case AMDGPU::BI__builtin_amdgcn_permlane64: |
18601 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18602 | IntrinsicID: Intrinsic::amdgcn_permlane64); |
18603 | case AMDGPU::BI__builtin_amdgcn_readlane: |
18604 | return emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E, |
18605 | IntrinsicID: Intrinsic::amdgcn_readlane); |
18606 | case AMDGPU::BI__builtin_amdgcn_readfirstlane: |
18607 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18608 | IntrinsicID: Intrinsic::amdgcn_readfirstlane); |
18609 | case AMDGPU::BI__builtin_amdgcn_div_fixup: |
18610 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: |
18611 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: |
18612 | return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, |
18613 | IntrinsicID: Intrinsic::amdgcn_div_fixup); |
18614 | case AMDGPU::BI__builtin_amdgcn_trig_preop: |
18615 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: |
18616 | return emitFPIntBuiltin(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_trig_preop); |
18617 | case AMDGPU::BI__builtin_amdgcn_rcp: |
18618 | case AMDGPU::BI__builtin_amdgcn_rcpf: |
18619 | case AMDGPU::BI__builtin_amdgcn_rcph: |
18620 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_rcp); |
18621 | case AMDGPU::BI__builtin_amdgcn_sqrt: |
18622 | case AMDGPU::BI__builtin_amdgcn_sqrtf: |
18623 | case AMDGPU::BI__builtin_amdgcn_sqrth: |
18624 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18625 | IntrinsicID: Intrinsic::amdgcn_sqrt); |
18626 | case AMDGPU::BI__builtin_amdgcn_rsq: |
18627 | case AMDGPU::BI__builtin_amdgcn_rsqf: |
18628 | case AMDGPU::BI__builtin_amdgcn_rsqh: |
18629 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_rsq); |
18630 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: |
18631 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: |
18632 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18633 | IntrinsicID: Intrinsic::amdgcn_rsq_clamp); |
18634 | case AMDGPU::BI__builtin_amdgcn_sinf: |
18635 | case AMDGPU::BI__builtin_amdgcn_sinh: |
18636 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_sin); |
18637 | case AMDGPU::BI__builtin_amdgcn_cosf: |
18638 | case AMDGPU::BI__builtin_amdgcn_cosh: |
18639 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_cos); |
18640 | case AMDGPU::BI__builtin_amdgcn_dispatch_ptr: |
18641 | return EmitAMDGPUDispatchPtr(CGF&: *this, E); |
18642 | case AMDGPU::BI__builtin_amdgcn_logf: |
18643 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_log); |
18644 | case AMDGPU::BI__builtin_amdgcn_exp2f: |
18645 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18646 | IntrinsicID: Intrinsic::amdgcn_exp2); |
18647 | case AMDGPU::BI__builtin_amdgcn_log_clampf: |
18648 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18649 | IntrinsicID: Intrinsic::amdgcn_log_clamp); |
18650 | case AMDGPU::BI__builtin_amdgcn_ldexp: |
18651 | case AMDGPU::BI__builtin_amdgcn_ldexpf: { |
18652 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18653 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18654 | llvm::Function *F = |
18655 | CGM.getIntrinsic(IID: Intrinsic::ldexp, Tys: {Src0->getType(), Src1->getType()}); |
18656 | return Builder.CreateCall(Callee: F, Args: {Src0, Src1}); |
18657 | } |
18658 | case AMDGPU::BI__builtin_amdgcn_ldexph: { |
18659 | // The raw instruction has a different behavior for out of bounds exponent |
18660 | // values (implicit truncation instead of saturate to short_min/short_max). |
18661 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18662 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18663 | llvm::Function *F = |
18664 | CGM.getIntrinsic(IID: Intrinsic::ldexp, Tys: {Src0->getType(), Int16Ty}); |
18665 | return Builder.CreateCall(Callee: F, Args: {Src0, Builder.CreateTrunc(V: Src1, DestTy: Int16Ty)}); |
18666 | } |
18667 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: |
18668 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: |
18669 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: |
18670 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18671 | IntrinsicID: Intrinsic::amdgcn_frexp_mant); |
18672 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: |
18673 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { |
18674 | Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18675 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_frexp_exp, |
18676 | Tys: { Builder.getInt32Ty(), Src0->getType() }); |
18677 | return Builder.CreateCall(Callee: F, Args: Src0); |
18678 | } |
18679 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { |
18680 | Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18681 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_frexp_exp, |
18682 | Tys: { Builder.getInt16Ty(), Src0->getType() }); |
18683 | return Builder.CreateCall(Callee: F, Args: Src0); |
18684 | } |
18685 | case AMDGPU::BI__builtin_amdgcn_fract: |
18686 | case AMDGPU::BI__builtin_amdgcn_fractf: |
18687 | case AMDGPU::BI__builtin_amdgcn_fracth: |
18688 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
18689 | IntrinsicID: Intrinsic::amdgcn_fract); |
18690 | case AMDGPU::BI__builtin_amdgcn_lerp: |
18691 | return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, |
18692 | IntrinsicID: Intrinsic::amdgcn_lerp); |
18693 | case AMDGPU::BI__builtin_amdgcn_ubfe: |
18694 | return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, |
18695 | IntrinsicID: Intrinsic::amdgcn_ubfe); |
18696 | case AMDGPU::BI__builtin_amdgcn_sbfe: |
18697 | return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, |
18698 | IntrinsicID: Intrinsic::amdgcn_sbfe); |
18699 | case AMDGPU::BI__builtin_amdgcn_ballot_w32: |
18700 | case AMDGPU::BI__builtin_amdgcn_ballot_w64: { |
18701 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
18702 | llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18703 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_ballot, Tys: { ResultType }); |
18704 | return Builder.CreateCall(Callee: F, Args: { Src }); |
18705 | } |
18706 | case AMDGPU::BI__builtin_amdgcn_uicmp: |
18707 | case AMDGPU::BI__builtin_amdgcn_uicmpl: |
18708 | case AMDGPU::BI__builtin_amdgcn_sicmp: |
18709 | case AMDGPU::BI__builtin_amdgcn_sicmpl: { |
18710 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18711 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18712 | llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18713 | |
18714 | // FIXME-GFX10: How should 32 bit mask be handled? |
18715 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_icmp, |
18716 | Tys: { Builder.getInt64Ty(), Src0->getType() }); |
18717 | return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 }); |
18718 | } |
18719 | case AMDGPU::BI__builtin_amdgcn_fcmp: |
18720 | case AMDGPU::BI__builtin_amdgcn_fcmpf: { |
18721 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18722 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18723 | llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18724 | |
18725 | // FIXME-GFX10: How should 32 bit mask be handled? |
18726 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_fcmp, |
18727 | Tys: { Builder.getInt64Ty(), Src0->getType() }); |
18728 | return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 }); |
18729 | } |
18730 | case AMDGPU::BI__builtin_amdgcn_class: |
18731 | case AMDGPU::BI__builtin_amdgcn_classf: |
18732 | case AMDGPU::BI__builtin_amdgcn_classh: |
18733 | return emitFPIntBuiltin(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_class); |
18734 | case AMDGPU::BI__builtin_amdgcn_fmed3f: |
18735 | case AMDGPU::BI__builtin_amdgcn_fmed3h: |
18736 | return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E, |
18737 | IntrinsicID: Intrinsic::amdgcn_fmed3); |
18738 | case AMDGPU::BI__builtin_amdgcn_ds_append: |
18739 | case AMDGPU::BI__builtin_amdgcn_ds_consume: { |
18740 | Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? |
18741 | Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; |
18742 | Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18743 | Function *F = CGM.getIntrinsic(IID: Intrin, Tys: { Src0->getType() }); |
18744 | return Builder.CreateCall(Callee: F, Args: { Src0, Builder.getFalse() }); |
18745 | } |
18746 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
18747 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
18748 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
18749 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
18750 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
18751 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
18752 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
18753 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: |
18754 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32: |
18755 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: { |
18756 | Intrinsic::ID IID; |
18757 | llvm::Type *ArgTy = llvm::Type::getDoubleTy(C&: getLLVMContext()); |
18758 | switch (BuiltinID) { |
18759 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
18760 | ArgTy = llvm::Type::getFloatTy(C&: getLLVMContext()); |
18761 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
18762 | break; |
18763 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
18764 | ArgTy = llvm::FixedVectorType::get( |
18765 | ElementType: llvm::Type::getHalfTy(C&: getLLVMContext()), NumElts: 2); |
18766 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
18767 | break; |
18768 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
18769 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
18770 | break; |
18771 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
18772 | IID = Intrinsic::amdgcn_global_atomic_fmin; |
18773 | break; |
18774 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
18775 | IID = Intrinsic::amdgcn_global_atomic_fmax; |
18776 | break; |
18777 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
18778 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
18779 | break; |
18780 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
18781 | IID = Intrinsic::amdgcn_flat_atomic_fmin; |
18782 | break; |
18783 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: |
18784 | IID = Intrinsic::amdgcn_flat_atomic_fmax; |
18785 | break; |
18786 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32: |
18787 | ArgTy = llvm::Type::getFloatTy(C&: getLLVMContext()); |
18788 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
18789 | break; |
18790 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: |
18791 | ArgTy = llvm::FixedVectorType::get( |
18792 | ElementType: llvm::Type::getHalfTy(C&: getLLVMContext()), NumElts: 2); |
18793 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
18794 | break; |
18795 | } |
18796 | llvm::Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18797 | llvm::Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18798 | llvm::Function *F = |
18799 | CGM.getIntrinsic(IID, Tys: {ArgTy, Addr->getType(), Val->getType()}); |
18800 | return Builder.CreateCall(Callee: F, Args: {Addr, Val}); |
18801 | } |
18802 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16: |
18803 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: { |
18804 | Intrinsic::ID IID; |
18805 | switch (BuiltinID) { |
18806 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16: |
18807 | IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16; |
18808 | break; |
18809 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: |
18810 | IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16; |
18811 | break; |
18812 | } |
18813 | llvm::Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18814 | llvm::Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18815 | llvm::Function *F = CGM.getIntrinsic(IID, Tys: {Addr->getType()}); |
18816 | return Builder.CreateCall(Callee: F, Args: {Addr, Val}); |
18817 | } |
18818 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32: |
18819 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32: |
18820 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16: |
18821 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16: |
18822 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16: |
18823 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16: |
18824 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16: |
18825 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16: { |
18826 | |
18827 | Intrinsic::ID IID; |
18828 | switch (BuiltinID) { |
18829 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32: |
18830 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32: |
18831 | IID = Intrinsic::amdgcn_global_load_tr_b64; |
18832 | break; |
18833 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16: |
18834 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16: |
18835 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16: |
18836 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16: |
18837 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16: |
18838 | case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16: |
18839 | IID = Intrinsic::amdgcn_global_load_tr_b128; |
18840 | break; |
18841 | } |
18842 | llvm::Type *LoadTy = ConvertType(T: E->getType()); |
18843 | llvm::Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18844 | llvm::Function *F = CGM.getIntrinsic(IID, Tys: {LoadTy}); |
18845 | return Builder.CreateCall(Callee: F, Args: {Addr}); |
18846 | } |
18847 | case AMDGPU::BI__builtin_amdgcn_get_fpenv: { |
18848 | Function *F = CGM.getIntrinsic(IID: Intrinsic::get_fpenv, |
18849 | Tys: {llvm::Type::getInt64Ty(C&: getLLVMContext())}); |
18850 | return Builder.CreateCall(Callee: F); |
18851 | } |
18852 | case AMDGPU::BI__builtin_amdgcn_set_fpenv: { |
18853 | Function *F = CGM.getIntrinsic(IID: Intrinsic::set_fpenv, |
18854 | Tys: {llvm::Type::getInt64Ty(C&: getLLVMContext())}); |
18855 | llvm::Value *Env = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18856 | return Builder.CreateCall(Callee: F, Args: {Env}); |
18857 | } |
18858 | case AMDGPU::BI__builtin_amdgcn_read_exec: |
18859 | return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int64Ty, ValueType: Int64Ty, isExecHi: false); |
18860 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: |
18861 | return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int32Ty, ValueType: Int32Ty, isExecHi: false); |
18862 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: |
18863 | return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int64Ty, ValueType: Int64Ty, isExecHi: true); |
18864 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray: |
18865 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h: |
18866 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l: |
18867 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: { |
18868 | llvm::Value *NodePtr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
18869 | llvm::Value *RayExtent = EmitScalarExpr(E: E->getArg(Arg: 1)); |
18870 | llvm::Value *RayOrigin = EmitScalarExpr(E: E->getArg(Arg: 2)); |
18871 | llvm::Value *RayDir = EmitScalarExpr(E: E->getArg(Arg: 3)); |
18872 | llvm::Value *RayInverseDir = EmitScalarExpr(E: E->getArg(Arg: 4)); |
18873 | llvm::Value *TextureDescr = EmitScalarExpr(E: E->getArg(Arg: 5)); |
18874 | |
18875 | // The builtins take these arguments as vec4 where the last element is |
18876 | // ignored. The intrinsic takes them as vec3. |
18877 | RayOrigin = Builder.CreateShuffleVector(V1: RayOrigin, V2: RayOrigin, |
18878 | Mask: ArrayRef<int>{0, 1, 2}); |
18879 | RayDir = |
18880 | Builder.CreateShuffleVector(V1: RayDir, V2: RayDir, Mask: ArrayRef<int>{0, 1, 2}); |
18881 | RayInverseDir = Builder.CreateShuffleVector(V1: RayInverseDir, V2: RayInverseDir, |
18882 | Mask: ArrayRef<int>{0, 1, 2}); |
18883 | |
18884 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_image_bvh_intersect_ray, |
18885 | Tys: {NodePtr->getType(), RayDir->getType()}); |
18886 | return Builder.CreateCall(Callee: F, Args: {NodePtr, RayExtent, RayOrigin, RayDir, |
18887 | RayInverseDir, TextureDescr}); |
18888 | } |
18889 | |
18890 | case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn: { |
18891 | SmallVector<Value *, 4> Args; |
18892 | for (int i = 0, e = E->getNumArgs(); i != e; ++i) |
18893 | Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i))); |
18894 | |
18895 | Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_ds_bvh_stack_rtn); |
18896 | Value *Call = Builder.CreateCall(Callee: F, Args); |
18897 | Value *Rtn = Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
18898 | Value *A = Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
18899 | llvm::Type *RetTy = ConvertType(T: E->getType()); |
18900 | Value *I0 = Builder.CreateInsertElement(Vec: PoisonValue::get(T: RetTy), NewElt: Rtn, |
18901 | Idx: (uint64_t)0); |
18902 | return Builder.CreateInsertElement(Vec: I0, NewElt: A, Idx: 1); |
18903 | } |
18904 | |
18905 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32: |
18906 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32: |
18907 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64: |
18908 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64: |
18909 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32: |
18910 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32: |
18911 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64: |
18912 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64: |
18913 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32: |
18914 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64: |
18915 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32: |
18916 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64: |
18917 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32: |
18918 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64: |
18919 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32: |
18920 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64: |
18921 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12: |
18922 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12: |
18923 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12: |
18924 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12: |
18925 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12: |
18926 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12: |
18927 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12: |
18928 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12: |
18929 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12: |
18930 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12: |
18931 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12: |
18932 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12: |
18933 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12: |
18934 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12: |
18935 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12: |
18936 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12: |
18937 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12: |
18938 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12: |
18939 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12: |
18940 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12: |
18941 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12: |
18942 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12: |
18943 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32: |
18944 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64: |
18945 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32: |
18946 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64: |
18947 | case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32: |
18948 | case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64: |
18949 | case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32: |
18950 | case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64: |
18951 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32: |
18952 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64: |
18953 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32: |
18954 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64: |
18955 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32: |
18956 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64: |
18957 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32: |
18958 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64: |
18959 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32: |
18960 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64: |
18961 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32: |
18962 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64: |
18963 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32: |
18964 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64: { |
18965 | |
18966 | // These operations perform a matrix multiplication and accumulation of |
18967 | // the form: |
18968 | // D = A * B + C |
18969 | // We need to specify one type for matrices AB and one for matrices CD. |
18970 | // Sparse matrix operations can have different types for A and B as well as |
18971 | // an additional type for sparsity index. |
18972 | // Destination type should be put before types used for source operands. |
18973 | SmallVector<unsigned, 2> ArgsForMatchingMatrixTypes; |
18974 | // On GFX12, the intrinsics with 16-bit accumulator use a packed layout. |
18975 | // There is no need for the variable opsel argument, so always set it to |
18976 | // "false". |
18977 | bool AppendFalseForOpselArg = false; |
18978 | unsigned BuiltinWMMAOp; |
18979 | |
18980 | switch (BuiltinID) { |
18981 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32: |
18982 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64: |
18983 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12: |
18984 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12: |
18985 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
18986 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16; |
18987 | break; |
18988 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32: |
18989 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64: |
18990 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12: |
18991 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12: |
18992 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
18993 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16; |
18994 | break; |
18995 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12: |
18996 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12: |
18997 | AppendFalseForOpselArg = true; |
18998 | [[fallthrough]]; |
18999 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32: |
19000 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64: |
19001 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19002 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16; |
19003 | break; |
19004 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12: |
19005 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12: |
19006 | AppendFalseForOpselArg = true; |
19007 | [[fallthrough]]; |
19008 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32: |
19009 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64: |
19010 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19011 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16; |
19012 | break; |
19013 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32: |
19014 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64: |
19015 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19016 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16_tied; |
19017 | break; |
19018 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32: |
19019 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64: |
19020 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19021 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16_tied; |
19022 | break; |
19023 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32: |
19024 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64: |
19025 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12: |
19026 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12: |
19027 | ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB |
19028 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8; |
19029 | break; |
19030 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32: |
19031 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64: |
19032 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12: |
19033 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12: |
19034 | ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB |
19035 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4; |
19036 | break; |
19037 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12: |
19038 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12: |
19039 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19040 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_fp8; |
19041 | break; |
19042 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12: |
19043 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12: |
19044 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19045 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_bf8; |
19046 | break; |
19047 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12: |
19048 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12: |
19049 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19050 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_fp8; |
19051 | break; |
19052 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12: |
19053 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12: |
19054 | ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB |
19055 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_bf8; |
19056 | break; |
19057 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12: |
19058 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12: |
19059 | ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB |
19060 | BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x32_iu4; |
19061 | break; |
19062 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32: |
19063 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64: |
19064 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19065 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_f16; |
19066 | break; |
19067 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32: |
19068 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64: |
19069 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19070 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf16; |
19071 | break; |
19072 | case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32: |
19073 | case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64: |
19074 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19075 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x32_f16; |
19076 | break; |
19077 | case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32: |
19078 | case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64: |
19079 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19080 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_bf16_16x16x32_bf16; |
19081 | break; |
19082 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32: |
19083 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64: |
19084 | ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index |
19085 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu8; |
19086 | break; |
19087 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32: |
19088 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64: |
19089 | ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index |
19090 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu4; |
19091 | break; |
19092 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32: |
19093 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64: |
19094 | ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index |
19095 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x64_iu4; |
19096 | break; |
19097 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32: |
19098 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64: |
19099 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19100 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_fp8; |
19101 | break; |
19102 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32: |
19103 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64: |
19104 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19105 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_bf8; |
19106 | break; |
19107 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32: |
19108 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64: |
19109 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19110 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_fp8; |
19111 | break; |
19112 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32: |
19113 | case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64: |
19114 | ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index |
19115 | BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_bf8; |
19116 | break; |
19117 | } |
19118 | |
19119 | SmallVector<Value *, 6> Args; |
19120 | for (int i = 0, e = E->getNumArgs(); i != e; ++i) |
19121 | Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i))); |
19122 | if (AppendFalseForOpselArg) |
19123 | Args.push_back(Elt: Builder.getFalse()); |
19124 | |
19125 | SmallVector<llvm::Type *, 6> ArgTypes; |
19126 | for (auto ArgIdx : ArgsForMatchingMatrixTypes) |
19127 | ArgTypes.push_back(Elt: Args[ArgIdx]->getType()); |
19128 | |
19129 | Function *F = CGM.getIntrinsic(IID: BuiltinWMMAOp, Tys: ArgTypes); |
19130 | return Builder.CreateCall(Callee: F, Args); |
19131 | } |
19132 | |
19133 | // amdgcn workitem |
19134 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: |
19135 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::amdgcn_workitem_id_x, low: 0, high: 1024); |
19136 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: |
19137 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::amdgcn_workitem_id_y, low: 0, high: 1024); |
19138 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: |
19139 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::amdgcn_workitem_id_z, low: 0, high: 1024); |
19140 | |
19141 | // amdgcn workgroup size |
19142 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_x: |
19143 | return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 0); |
19144 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_y: |
19145 | return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 1); |
19146 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_z: |
19147 | return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 2); |
19148 | |
19149 | // amdgcn grid size |
19150 | case AMDGPU::BI__builtin_amdgcn_grid_size_x: |
19151 | return EmitAMDGPUGridSize(CGF&: *this, Index: 0); |
19152 | case AMDGPU::BI__builtin_amdgcn_grid_size_y: |
19153 | return EmitAMDGPUGridSize(CGF&: *this, Index: 1); |
19154 | case AMDGPU::BI__builtin_amdgcn_grid_size_z: |
19155 | return EmitAMDGPUGridSize(CGF&: *this, Index: 2); |
19156 | |
19157 | // r600 intrinsics |
19158 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: |
19159 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: |
19160 | return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, |
19161 | IntrinsicID: Intrinsic::r600_recipsqrt_ieee); |
19162 | case AMDGPU::BI__builtin_r600_read_tidig_x: |
19163 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::r600_read_tidig_x, low: 0, high: 1024); |
19164 | case AMDGPU::BI__builtin_r600_read_tidig_y: |
19165 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::r600_read_tidig_y, low: 0, high: 1024); |
19166 | case AMDGPU::BI__builtin_r600_read_tidig_z: |
19167 | return emitRangedBuiltin(CGF&: *this, IntrinsicID: Intrinsic::r600_read_tidig_z, low: 0, high: 1024); |
19168 | case AMDGPU::BI__builtin_amdgcn_alignbit: { |
19169 | llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19170 | llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19171 | llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2)); |
19172 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fshr, Tys: Src0->getType()); |
19173 | return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 }); |
19174 | } |
19175 | case AMDGPU::BI__builtin_amdgcn_fence: { |
19176 | ProcessOrderScopeAMDGCN(Order: EmitScalarExpr(E: E->getArg(Arg: 0)), |
19177 | Scope: EmitScalarExpr(E: E->getArg(Arg: 1)), AO, SSID); |
19178 | FenceInst *Fence = Builder.CreateFence(Ordering: AO, SSID); |
19179 | if (E->getNumArgs() > 2) |
19180 | AddAMDGPUFenceAddressSpaceMMRA(Inst: Fence, E); |
19181 | return Fence; |
19182 | } |
19183 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
19184 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
19185 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
19186 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
19187 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
19188 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: |
19189 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: |
19190 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16: |
19191 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
19192 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
19193 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: { |
19194 | llvm::AtomicRMWInst::BinOp BinOp; |
19195 | switch (BuiltinID) { |
19196 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
19197 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
19198 | BinOp = llvm::AtomicRMWInst::UIncWrap; |
19199 | break; |
19200 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
19201 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
19202 | BinOp = llvm::AtomicRMWInst::UDecWrap; |
19203 | break; |
19204 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
19205 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
19206 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: |
19207 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: |
19208 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16: |
19209 | BinOp = llvm::AtomicRMWInst::FAdd; |
19210 | break; |
19211 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
19212 | BinOp = llvm::AtomicRMWInst::FMin; |
19213 | break; |
19214 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: |
19215 | BinOp = llvm::AtomicRMWInst::FMax; |
19216 | break; |
19217 | } |
19218 | |
19219 | Address Ptr = CheckAtomicAlignment(CGF&: *this, E); |
19220 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19221 | llvm::Type *OrigTy = Val->getType(); |
19222 | QualType PtrTy = E->getArg(Arg: 0)->IgnoreImpCasts()->getType(); |
19223 | |
19224 | bool Volatile; |
19225 | |
19226 | if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_faddf || |
19227 | BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fminf || |
19228 | BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fmaxf) { |
19229 | // __builtin_amdgcn_ds_faddf/fminf/fmaxf has an explicit volatile argument |
19230 | Volatile = |
19231 | cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 4)))->getZExtValue(); |
19232 | } else { |
19233 | // Infer volatile from the passed type. |
19234 | Volatile = |
19235 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
19236 | } |
19237 | |
19238 | if (E->getNumArgs() >= 4) { |
19239 | // Some of the builtins have explicit ordering and scope arguments. |
19240 | ProcessOrderScopeAMDGCN(Order: EmitScalarExpr(E: E->getArg(Arg: 2)), |
19241 | Scope: EmitScalarExpr(E: E->getArg(Arg: 3)), AO, SSID); |
19242 | } else { |
19243 | // The ds_atomic_fadd_* builtins do not have syncscope/order arguments. |
19244 | SSID = llvm::SyncScope::System; |
19245 | AO = AtomicOrdering::SequentiallyConsistent; |
19246 | |
19247 | // The v2bf16 builtin uses i16 instead of a natural bfloat type. |
19248 | if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16) { |
19249 | llvm::Type *V2BF16Ty = FixedVectorType::get( |
19250 | ElementType: llvm::Type::getBFloatTy(C&: Builder.getContext()), NumElts: 2); |
19251 | Val = Builder.CreateBitCast(V: Val, DestTy: V2BF16Ty); |
19252 | } |
19253 | } |
19254 | |
19255 | llvm::AtomicRMWInst *RMW = |
19256 | Builder.CreateAtomicRMW(Op: BinOp, Addr: Ptr, Val, Ordering: AO, SSID); |
19257 | if (Volatile) |
19258 | RMW->setVolatile(true); |
19259 | return Builder.CreateBitCast(V: RMW, DestTy: OrigTy); |
19260 | } |
19261 | case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn: |
19262 | case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: { |
19263 | llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19264 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19265 | // s_sendmsg_rtn is mangled using return type only. |
19266 | Function *F = |
19267 | CGM.getIntrinsic(IID: Intrinsic::amdgcn_s_sendmsg_rtn, Tys: {ResultType}); |
19268 | return Builder.CreateCall(Callee: F, Args: {Arg}); |
19269 | } |
19270 | case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc: |
19271 | return emitBuiltinWithOneOverloadedType<4>( |
19272 | CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_make_buffer_rsrc); |
19273 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b8: |
19274 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b16: |
19275 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b32: |
19276 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b64: |
19277 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b96: |
19278 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b128: |
19279 | return emitBuiltinWithOneOverloadedType<5>( |
19280 | CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_store); |
19281 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8: |
19282 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16: |
19283 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32: |
19284 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64: |
19285 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96: |
19286 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128: { |
19287 | llvm::Type *RetTy = nullptr; |
19288 | switch (BuiltinID) { |
19289 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8: |
19290 | RetTy = Int8Ty; |
19291 | break; |
19292 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16: |
19293 | RetTy = Int16Ty; |
19294 | break; |
19295 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32: |
19296 | RetTy = Int32Ty; |
19297 | break; |
19298 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64: |
19299 | RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 2); |
19300 | break; |
19301 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96: |
19302 | RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 3); |
19303 | break; |
19304 | case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128: |
19305 | RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 4); |
19306 | break; |
19307 | } |
19308 | Function *F = |
19309 | CGM.getIntrinsic(IID: Intrinsic::amdgcn_raw_ptr_buffer_load, Tys: RetTy); |
19310 | return Builder.CreateCall( |
19311 | Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1)), |
19312 | EmitScalarExpr(E: E->getArg(Arg: 2)), EmitScalarExpr(E: E->getArg(Arg: 3))}); |
19313 | } |
19314 | default: |
19315 | return nullptr; |
19316 | } |
19317 | } |
19318 | |
19319 | /// Handle a SystemZ function in which the final argument is a pointer |
19320 | /// to an int that receives the post-instruction CC value. At the LLVM level |
19321 | /// this is represented as a function that returns a {result, cc} pair. |
19322 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, |
19323 | unsigned IntrinsicID, |
19324 | const CallExpr *E) { |
19325 | unsigned NumArgs = E->getNumArgs() - 1; |
19326 | SmallVector<Value *, 8> Args(NumArgs); |
19327 | for (unsigned I = 0; I < NumArgs; ++I) |
19328 | Args[I] = CGF.EmitScalarExpr(E: E->getArg(Arg: I)); |
19329 | Address CCPtr = CGF.EmitPointerWithAlignment(Addr: E->getArg(Arg: NumArgs)); |
19330 | Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID); |
19331 | Value *Call = CGF.Builder.CreateCall(Callee: F, Args); |
19332 | Value *CC = CGF.Builder.CreateExtractValue(Agg: Call, Idxs: 1); |
19333 | CGF.Builder.CreateStore(Val: CC, Addr: CCPtr); |
19334 | return CGF.Builder.CreateExtractValue(Agg: Call, Idxs: 0); |
19335 | } |
19336 | |
19337 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, |
19338 | const CallExpr *E) { |
19339 | switch (BuiltinID) { |
19340 | case SystemZ::BI__builtin_tbegin: { |
19341 | Value *TDB = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19342 | Value *Control = llvm::ConstantInt::get(Ty: Int32Ty, V: 0xff0c); |
19343 | Function *F = CGM.getIntrinsic(IID: Intrinsic::s390_tbegin); |
19344 | return Builder.CreateCall(Callee: F, Args: {TDB, Control}); |
19345 | } |
19346 | case SystemZ::BI__builtin_tbegin_nofloat: { |
19347 | Value *TDB = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19348 | Value *Control = llvm::ConstantInt::get(Ty: Int32Ty, V: 0xff0c); |
19349 | Function *F = CGM.getIntrinsic(IID: Intrinsic::s390_tbegin_nofloat); |
19350 | return Builder.CreateCall(Callee: F, Args: {TDB, Control}); |
19351 | } |
19352 | case SystemZ::BI__builtin_tbeginc: { |
19353 | Value *TDB = llvm::ConstantPointerNull::get(T: Int8PtrTy); |
19354 | Value *Control = llvm::ConstantInt::get(Ty: Int32Ty, V: 0xff08); |
19355 | Function *F = CGM.getIntrinsic(IID: Intrinsic::s390_tbeginc); |
19356 | return Builder.CreateCall(Callee: F, Args: {TDB, Control}); |
19357 | } |
19358 | case SystemZ::BI__builtin_tabort: { |
19359 | Value *Data = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19360 | Function *F = CGM.getIntrinsic(IID: Intrinsic::s390_tabort); |
19361 | return Builder.CreateCall(Callee: F, Args: Builder.CreateSExt(V: Data, DestTy: Int64Ty, Name: "tabort" )); |
19362 | } |
19363 | case SystemZ::BI__builtin_non_tx_store: { |
19364 | Value *Address = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19365 | Value *Data = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19366 | Function *F = CGM.getIntrinsic(IID: Intrinsic::s390_ntstg); |
19367 | return Builder.CreateCall(Callee: F, Args: {Data, Address}); |
19368 | } |
19369 | |
19370 | // Vector builtins. Note that most vector builtins are mapped automatically |
19371 | // to target-specific LLVM intrinsics. The ones handled specially here can |
19372 | // be represented via standard LLVM IR, which is preferable to enable common |
19373 | // LLVM optimizations. |
19374 | |
19375 | case SystemZ::BI__builtin_s390_vpopctb: |
19376 | case SystemZ::BI__builtin_s390_vpopcth: |
19377 | case SystemZ::BI__builtin_s390_vpopctf: |
19378 | case SystemZ::BI__builtin_s390_vpopctg: { |
19379 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19380 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19381 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ResultType); |
19382 | return Builder.CreateCall(Callee: F, Args: X); |
19383 | } |
19384 | |
19385 | case SystemZ::BI__builtin_s390_vclzb: |
19386 | case SystemZ::BI__builtin_s390_vclzh: |
19387 | case SystemZ::BI__builtin_s390_vclzf: |
19388 | case SystemZ::BI__builtin_s390_vclzg: { |
19389 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19390 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19391 | Value *Undef = ConstantInt::get(Ty: Builder.getInt1Ty(), V: false); |
19392 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: ResultType); |
19393 | return Builder.CreateCall(Callee: F, Args: {X, Undef}); |
19394 | } |
19395 | |
19396 | case SystemZ::BI__builtin_s390_vctzb: |
19397 | case SystemZ::BI__builtin_s390_vctzh: |
19398 | case SystemZ::BI__builtin_s390_vctzf: |
19399 | case SystemZ::BI__builtin_s390_vctzg: { |
19400 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19401 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19402 | Value *Undef = ConstantInt::get(Ty: Builder.getInt1Ty(), V: false); |
19403 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: ResultType); |
19404 | return Builder.CreateCall(Callee: F, Args: {X, Undef}); |
19405 | } |
19406 | |
19407 | case SystemZ::BI__builtin_s390_verllb: |
19408 | case SystemZ::BI__builtin_s390_verllh: |
19409 | case SystemZ::BI__builtin_s390_verllf: |
19410 | case SystemZ::BI__builtin_s390_verllg: { |
19411 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19412 | llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19413 | llvm::Value *Amt = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19414 | // Splat scalar rotate amount to vector type. |
19415 | unsigned NumElts = cast<llvm::FixedVectorType>(Val: ResultType)->getNumElements(); |
19416 | Amt = Builder.CreateIntCast(V: Amt, DestTy: ResultType->getScalarType(), isSigned: false); |
19417 | Amt = Builder.CreateVectorSplat(NumElts, V: Amt); |
19418 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fshl, Tys: ResultType); |
19419 | return Builder.CreateCall(Callee: F, Args: { Src, Src, Amt }); |
19420 | } |
19421 | |
19422 | case SystemZ::BI__builtin_s390_verllvb: |
19423 | case SystemZ::BI__builtin_s390_verllvh: |
19424 | case SystemZ::BI__builtin_s390_verllvf: |
19425 | case SystemZ::BI__builtin_s390_verllvg: { |
19426 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19427 | llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19428 | llvm::Value *Amt = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19429 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fshl, Tys: ResultType); |
19430 | return Builder.CreateCall(Callee: F, Args: { Src, Src, Amt }); |
19431 | } |
19432 | |
19433 | case SystemZ::BI__builtin_s390_vfsqsb: |
19434 | case SystemZ::BI__builtin_s390_vfsqdb: { |
19435 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19436 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19437 | if (Builder.getIsFPConstrained()) { |
19438 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_sqrt, Tys: ResultType); |
19439 | return Builder.CreateConstrainedFPCall(Callee: F, Args: { X }); |
19440 | } else { |
19441 | Function *F = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: ResultType); |
19442 | return Builder.CreateCall(Callee: F, Args: X); |
19443 | } |
19444 | } |
19445 | case SystemZ::BI__builtin_s390_vfmasb: |
19446 | case SystemZ::BI__builtin_s390_vfmadb: { |
19447 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19448 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19449 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19450 | Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
19451 | if (Builder.getIsFPConstrained()) { |
19452 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: ResultType); |
19453 | return Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, Z}); |
19454 | } else { |
19455 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fma, Tys: ResultType); |
19456 | return Builder.CreateCall(Callee: F, Args: {X, Y, Z}); |
19457 | } |
19458 | } |
19459 | case SystemZ::BI__builtin_s390_vfmssb: |
19460 | case SystemZ::BI__builtin_s390_vfmsdb: { |
19461 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19462 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19463 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19464 | Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
19465 | if (Builder.getIsFPConstrained()) { |
19466 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: ResultType); |
19467 | return Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, Builder.CreateFNeg(V: Z, Name: "neg" )}); |
19468 | } else { |
19469 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fma, Tys: ResultType); |
19470 | return Builder.CreateCall(Callee: F, Args: {X, Y, Builder.CreateFNeg(V: Z, Name: "neg" )}); |
19471 | } |
19472 | } |
19473 | case SystemZ::BI__builtin_s390_vfnmasb: |
19474 | case SystemZ::BI__builtin_s390_vfnmadb: { |
19475 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19476 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19477 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19478 | Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
19479 | if (Builder.getIsFPConstrained()) { |
19480 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: ResultType); |
19481 | return Builder.CreateFNeg(V: Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, Z}), Name: "neg" ); |
19482 | } else { |
19483 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fma, Tys: ResultType); |
19484 | return Builder.CreateFNeg(V: Builder.CreateCall(Callee: F, Args: {X, Y, Z}), Name: "neg" ); |
19485 | } |
19486 | } |
19487 | case SystemZ::BI__builtin_s390_vfnmssb: |
19488 | case SystemZ::BI__builtin_s390_vfnmsdb: { |
19489 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19490 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19491 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19492 | Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2)); |
19493 | if (Builder.getIsFPConstrained()) { |
19494 | Function *F = CGM.getIntrinsic(IID: Intrinsic::experimental_constrained_fma, Tys: ResultType); |
19495 | Value *NegZ = Builder.CreateFNeg(V: Z, Name: "sub" ); |
19496 | return Builder.CreateFNeg(V: Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y, NegZ})); |
19497 | } else { |
19498 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fma, Tys: ResultType); |
19499 | Value *NegZ = Builder.CreateFNeg(V: Z, Name: "neg" ); |
19500 | return Builder.CreateFNeg(V: Builder.CreateCall(Callee: F, Args: {X, Y, NegZ})); |
19501 | } |
19502 | } |
19503 | case SystemZ::BI__builtin_s390_vflpsb: |
19504 | case SystemZ::BI__builtin_s390_vflpdb: { |
19505 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19506 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19507 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: ResultType); |
19508 | return Builder.CreateCall(Callee: F, Args: X); |
19509 | } |
19510 | case SystemZ::BI__builtin_s390_vflnsb: |
19511 | case SystemZ::BI__builtin_s390_vflndb: { |
19512 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19513 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19514 | Function *F = CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: ResultType); |
19515 | return Builder.CreateFNeg(V: Builder.CreateCall(Callee: F, Args: X), Name: "neg" ); |
19516 | } |
19517 | case SystemZ::BI__builtin_s390_vfisb: |
19518 | case SystemZ::BI__builtin_s390_vfidb: { |
19519 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19520 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19521 | // Constant-fold the M4 and M5 mask arguments. |
19522 | llvm::APSInt M4 = *E->getArg(Arg: 1)->getIntegerConstantExpr(Ctx: getContext()); |
19523 | llvm::APSInt M5 = *E->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: getContext()); |
19524 | // Check whether this instance can be represented via a LLVM standard |
19525 | // intrinsic. We only support some combinations of M4 and M5. |
19526 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
19527 | Intrinsic::ID CI; |
19528 | switch (M4.getZExtValue()) { |
19529 | default: break; |
19530 | case 0: // IEEE-inexact exception allowed |
19531 | switch (M5.getZExtValue()) { |
19532 | default: break; |
19533 | case 0: ID = Intrinsic::rint; |
19534 | CI = Intrinsic::experimental_constrained_rint; break; |
19535 | } |
19536 | break; |
19537 | case 4: // IEEE-inexact exception suppressed |
19538 | switch (M5.getZExtValue()) { |
19539 | default: break; |
19540 | case 0: ID = Intrinsic::nearbyint; |
19541 | CI = Intrinsic::experimental_constrained_nearbyint; break; |
19542 | case 1: ID = Intrinsic::round; |
19543 | CI = Intrinsic::experimental_constrained_round; break; |
19544 | case 5: ID = Intrinsic::trunc; |
19545 | CI = Intrinsic::experimental_constrained_trunc; break; |
19546 | case 6: ID = Intrinsic::ceil; |
19547 | CI = Intrinsic::experimental_constrained_ceil; break; |
19548 | case 7: ID = Intrinsic::floor; |
19549 | CI = Intrinsic::experimental_constrained_floor; break; |
19550 | } |
19551 | break; |
19552 | } |
19553 | if (ID != Intrinsic::not_intrinsic) { |
19554 | if (Builder.getIsFPConstrained()) { |
19555 | Function *F = CGM.getIntrinsic(IID: CI, Tys: ResultType); |
19556 | return Builder.CreateConstrainedFPCall(Callee: F, Args: X); |
19557 | } else { |
19558 | Function *F = CGM.getIntrinsic(IID: ID, Tys: ResultType); |
19559 | return Builder.CreateCall(Callee: F, Args: X); |
19560 | } |
19561 | } |
19562 | switch (BuiltinID) { // FIXME: constrained version? |
19563 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; |
19564 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; |
19565 | default: llvm_unreachable("Unknown BuiltinID" ); |
19566 | } |
19567 | Function *F = CGM.getIntrinsic(IID: ID); |
19568 | Value *M4Value = llvm::ConstantInt::get(Context&: getLLVMContext(), V: M4); |
19569 | Value *M5Value = llvm::ConstantInt::get(Context&: getLLVMContext(), V: M5); |
19570 | return Builder.CreateCall(Callee: F, Args: {X, M4Value, M5Value}); |
19571 | } |
19572 | case SystemZ::BI__builtin_s390_vfmaxsb: |
19573 | case SystemZ::BI__builtin_s390_vfmaxdb: { |
19574 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19575 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19576 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19577 | // Constant-fold the M4 mask argument. |
19578 | llvm::APSInt M4 = *E->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: getContext()); |
19579 | // Check whether this instance can be represented via a LLVM standard |
19580 | // intrinsic. We only support some values of M4. |
19581 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
19582 | Intrinsic::ID CI; |
19583 | switch (M4.getZExtValue()) { |
19584 | default: break; |
19585 | case 4: ID = Intrinsic::maxnum; |
19586 | CI = Intrinsic::experimental_constrained_maxnum; break; |
19587 | } |
19588 | if (ID != Intrinsic::not_intrinsic) { |
19589 | if (Builder.getIsFPConstrained()) { |
19590 | Function *F = CGM.getIntrinsic(IID: CI, Tys: ResultType); |
19591 | return Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y}); |
19592 | } else { |
19593 | Function *F = CGM.getIntrinsic(IID: ID, Tys: ResultType); |
19594 | return Builder.CreateCall(Callee: F, Args: {X, Y}); |
19595 | } |
19596 | } |
19597 | switch (BuiltinID) { |
19598 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; |
19599 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; |
19600 | default: llvm_unreachable("Unknown BuiltinID" ); |
19601 | } |
19602 | Function *F = CGM.getIntrinsic(IID: ID); |
19603 | Value *M4Value = llvm::ConstantInt::get(Context&: getLLVMContext(), V: M4); |
19604 | return Builder.CreateCall(Callee: F, Args: {X, Y, M4Value}); |
19605 | } |
19606 | case SystemZ::BI__builtin_s390_vfminsb: |
19607 | case SystemZ::BI__builtin_s390_vfmindb: { |
19608 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19609 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19610 | Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1)); |
19611 | // Constant-fold the M4 mask argument. |
19612 | llvm::APSInt M4 = *E->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: getContext()); |
19613 | // Check whether this instance can be represented via a LLVM standard |
19614 | // intrinsic. We only support some values of M4. |
19615 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
19616 | Intrinsic::ID CI; |
19617 | switch (M4.getZExtValue()) { |
19618 | default: break; |
19619 | case 4: ID = Intrinsic::minnum; |
19620 | CI = Intrinsic::experimental_constrained_minnum; break; |
19621 | } |
19622 | if (ID != Intrinsic::not_intrinsic) { |
19623 | if (Builder.getIsFPConstrained()) { |
19624 | Function *F = CGM.getIntrinsic(IID: CI, Tys: ResultType); |
19625 | return Builder.CreateConstrainedFPCall(Callee: F, Args: {X, Y}); |
19626 | } else { |
19627 | Function *F = CGM.getIntrinsic(IID: ID, Tys: ResultType); |
19628 | return Builder.CreateCall(Callee: F, Args: {X, Y}); |
19629 | } |
19630 | } |
19631 | switch (BuiltinID) { |
19632 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; |
19633 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; |
19634 | default: llvm_unreachable("Unknown BuiltinID" ); |
19635 | } |
19636 | Function *F = CGM.getIntrinsic(IID: ID); |
19637 | Value *M4Value = llvm::ConstantInt::get(Context&: getLLVMContext(), V: M4); |
19638 | return Builder.CreateCall(Callee: F, Args: {X, Y, M4Value}); |
19639 | } |
19640 | |
19641 | case SystemZ::BI__builtin_s390_vlbrh: |
19642 | case SystemZ::BI__builtin_s390_vlbrf: |
19643 | case SystemZ::BI__builtin_s390_vlbrg: { |
19644 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
19645 | Value *X = EmitScalarExpr(E: E->getArg(Arg: 0)); |
19646 | Function *F = CGM.getIntrinsic(IID: Intrinsic::bswap, Tys: ResultType); |
19647 | return Builder.CreateCall(Callee: F, Args: X); |
19648 | } |
19649 | |
19650 | // Vector intrinsics that output the post-instruction CC value. |
19651 | |
19652 | #define INTRINSIC_WITH_CC(NAME) \ |
19653 | case SystemZ::BI__builtin_##NAME: \ |
19654 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) |
19655 | |
19656 | INTRINSIC_WITH_CC(s390_vpkshs); |
19657 | INTRINSIC_WITH_CC(s390_vpksfs); |
19658 | INTRINSIC_WITH_CC(s390_vpksgs); |
19659 | |
19660 | INTRINSIC_WITH_CC(s390_vpklshs); |
19661 | INTRINSIC_WITH_CC(s390_vpklsfs); |
19662 | INTRINSIC_WITH_CC(s390_vpklsgs); |
19663 | |
19664 | INTRINSIC_WITH_CC(s390_vceqbs); |
19665 | INTRINSIC_WITH_CC(s390_vceqhs); |
19666 | INTRINSIC_WITH_CC(s390_vceqfs); |
19667 | INTRINSIC_WITH_CC(s390_vceqgs); |
19668 | |
19669 | INTRINSIC_WITH_CC(s390_vchbs); |
19670 | INTRINSIC_WITH_CC(s390_vchhs); |
19671 | INTRINSIC_WITH_CC(s390_vchfs); |
19672 | INTRINSIC_WITH_CC(s390_vchgs); |
19673 | |
19674 | INTRINSIC_WITH_CC(s390_vchlbs); |
19675 | INTRINSIC_WITH_CC(s390_vchlhs); |
19676 | INTRINSIC_WITH_CC(s390_vchlfs); |
19677 | INTRINSIC_WITH_CC(s390_vchlgs); |
19678 | |
19679 | INTRINSIC_WITH_CC(s390_vfaebs); |
19680 | INTRINSIC_WITH_CC(s390_vfaehs); |
19681 | INTRINSIC_WITH_CC(s390_vfaefs); |
19682 | |
19683 | INTRINSIC_WITH_CC(s390_vfaezbs); |
19684 | INTRINSIC_WITH_CC(s390_vfaezhs); |
19685 | INTRINSIC_WITH_CC(s390_vfaezfs); |
19686 | |
19687 | INTRINSIC_WITH_CC(s390_vfeebs); |
19688 | INTRINSIC_WITH_CC(s390_vfeehs); |
19689 | INTRINSIC_WITH_CC(s390_vfeefs); |
19690 | |
19691 | INTRINSIC_WITH_CC(s390_vfeezbs); |
19692 | INTRINSIC_WITH_CC(s390_vfeezhs); |
19693 | INTRINSIC_WITH_CC(s390_vfeezfs); |
19694 | |
19695 | INTRINSIC_WITH_CC(s390_vfenebs); |
19696 | INTRINSIC_WITH_CC(s390_vfenehs); |
19697 | INTRINSIC_WITH_CC(s390_vfenefs); |
19698 | |
19699 | INTRINSIC_WITH_CC(s390_vfenezbs); |
19700 | INTRINSIC_WITH_CC(s390_vfenezhs); |
19701 | INTRINSIC_WITH_CC(s390_vfenezfs); |
19702 | |
19703 | INTRINSIC_WITH_CC(s390_vistrbs); |
19704 | INTRINSIC_WITH_CC(s390_vistrhs); |
19705 | INTRINSIC_WITH_CC(s390_vistrfs); |
19706 | |
19707 | INTRINSIC_WITH_CC(s390_vstrcbs); |
19708 | INTRINSIC_WITH_CC(s390_vstrchs); |
19709 | INTRINSIC_WITH_CC(s390_vstrcfs); |
19710 | |
19711 | INTRINSIC_WITH_CC(s390_vstrczbs); |
19712 | INTRINSIC_WITH_CC(s390_vstrczhs); |
19713 | INTRINSIC_WITH_CC(s390_vstrczfs); |
19714 | |
19715 | INTRINSIC_WITH_CC(s390_vfcesbs); |
19716 | INTRINSIC_WITH_CC(s390_vfcedbs); |
19717 | INTRINSIC_WITH_CC(s390_vfchsbs); |
19718 | INTRINSIC_WITH_CC(s390_vfchdbs); |
19719 | INTRINSIC_WITH_CC(s390_vfchesbs); |
19720 | INTRINSIC_WITH_CC(s390_vfchedbs); |
19721 | |
19722 | INTRINSIC_WITH_CC(s390_vftcisb); |
19723 | INTRINSIC_WITH_CC(s390_vftcidb); |
19724 | |
19725 | INTRINSIC_WITH_CC(s390_vstrsb); |
19726 | INTRINSIC_WITH_CC(s390_vstrsh); |
19727 | INTRINSIC_WITH_CC(s390_vstrsf); |
19728 | |
19729 | INTRINSIC_WITH_CC(s390_vstrszb); |
19730 | INTRINSIC_WITH_CC(s390_vstrszh); |
19731 | INTRINSIC_WITH_CC(s390_vstrszf); |
19732 | |
19733 | #undef INTRINSIC_WITH_CC |
19734 | |
19735 | default: |
19736 | return nullptr; |
19737 | } |
19738 | } |
19739 | |
19740 | namespace { |
19741 | // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant. |
19742 | struct NVPTXMmaLdstInfo { |
19743 | unsigned NumResults; // Number of elements to load/store |
19744 | // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported. |
19745 | unsigned IID_col; |
19746 | unsigned IID_row; |
19747 | }; |
19748 | |
19749 | #define MMA_INTR(geom_op_type, layout) \ |
19750 | Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride |
19751 | #define MMA_LDST(n, geom_op_type) \ |
19752 | { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } |
19753 | |
19754 | static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { |
19755 | switch (BuiltinID) { |
19756 | // FP MMA loads |
19757 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
19758 | return MMA_LDST(8, m16n16k16_load_a_f16); |
19759 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
19760 | return MMA_LDST(8, m16n16k16_load_b_f16); |
19761 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
19762 | return MMA_LDST(4, m16n16k16_load_c_f16); |
19763 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
19764 | return MMA_LDST(8, m16n16k16_load_c_f32); |
19765 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
19766 | return MMA_LDST(8, m32n8k16_load_a_f16); |
19767 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
19768 | return MMA_LDST(8, m32n8k16_load_b_f16); |
19769 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
19770 | return MMA_LDST(4, m32n8k16_load_c_f16); |
19771 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
19772 | return MMA_LDST(8, m32n8k16_load_c_f32); |
19773 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
19774 | return MMA_LDST(8, m8n32k16_load_a_f16); |
19775 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
19776 | return MMA_LDST(8, m8n32k16_load_b_f16); |
19777 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
19778 | return MMA_LDST(4, m8n32k16_load_c_f16); |
19779 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
19780 | return MMA_LDST(8, m8n32k16_load_c_f32); |
19781 | |
19782 | // Integer MMA loads |
19783 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
19784 | return MMA_LDST(2, m16n16k16_load_a_s8); |
19785 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
19786 | return MMA_LDST(2, m16n16k16_load_a_u8); |
19787 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
19788 | return MMA_LDST(2, m16n16k16_load_b_s8); |
19789 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
19790 | return MMA_LDST(2, m16n16k16_load_b_u8); |
19791 | case NVPTX::BI__imma_m16n16k16_ld_c: |
19792 | return MMA_LDST(8, m16n16k16_load_c_s32); |
19793 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
19794 | return MMA_LDST(4, m32n8k16_load_a_s8); |
19795 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
19796 | return MMA_LDST(4, m32n8k16_load_a_u8); |
19797 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
19798 | return MMA_LDST(1, m32n8k16_load_b_s8); |
19799 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
19800 | return MMA_LDST(1, m32n8k16_load_b_u8); |
19801 | case NVPTX::BI__imma_m32n8k16_ld_c: |
19802 | return MMA_LDST(8, m32n8k16_load_c_s32); |
19803 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
19804 | return MMA_LDST(1, m8n32k16_load_a_s8); |
19805 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
19806 | return MMA_LDST(1, m8n32k16_load_a_u8); |
19807 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
19808 | return MMA_LDST(4, m8n32k16_load_b_s8); |
19809 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
19810 | return MMA_LDST(4, m8n32k16_load_b_u8); |
19811 | case NVPTX::BI__imma_m8n32k16_ld_c: |
19812 | return MMA_LDST(8, m8n32k16_load_c_s32); |
19813 | |
19814 | // Sub-integer MMA loads. |
19815 | // Only row/col layout is supported by A/B fragments. |
19816 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
19817 | return {.NumResults: 1, .IID_col: 0, MMA_INTR(m8n8k32_load_a_s4, row)}; |
19818 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
19819 | return {.NumResults: 1, .IID_col: 0, MMA_INTR(m8n8k32_load_a_u4, row)}; |
19820 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
19821 | return {.NumResults: 1, MMA_INTR(m8n8k32_load_b_s4, col), .IID_row: 0}; |
19822 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
19823 | return {.NumResults: 1, MMA_INTR(m8n8k32_load_b_u4, col), .IID_row: 0}; |
19824 | case NVPTX::BI__imma_m8n8k32_ld_c: |
19825 | return MMA_LDST(2, m8n8k32_load_c_s32); |
19826 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
19827 | return {.NumResults: 1, .IID_col: 0, MMA_INTR(m8n8k128_load_a_b1, row)}; |
19828 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
19829 | return {.NumResults: 1, MMA_INTR(m8n8k128_load_b_b1, col), .IID_row: 0}; |
19830 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
19831 | return MMA_LDST(2, m8n8k128_load_c_s32); |
19832 | |
19833 | // Double MMA loads |
19834 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
19835 | return MMA_LDST(1, m8n8k4_load_a_f64); |
19836 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
19837 | return MMA_LDST(1, m8n8k4_load_b_f64); |
19838 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
19839 | return MMA_LDST(2, m8n8k4_load_c_f64); |
19840 | |
19841 | // Alternate float MMA loads |
19842 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
19843 | return MMA_LDST(4, m16n16k16_load_a_bf16); |
19844 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
19845 | return MMA_LDST(4, m16n16k16_load_b_bf16); |
19846 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
19847 | return MMA_LDST(2, m8n32k16_load_a_bf16); |
19848 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
19849 | return MMA_LDST(8, m8n32k16_load_b_bf16); |
19850 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
19851 | return MMA_LDST(8, m32n8k16_load_a_bf16); |
19852 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
19853 | return MMA_LDST(2, m32n8k16_load_b_bf16); |
19854 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
19855 | return MMA_LDST(4, m16n16k8_load_a_tf32); |
19856 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
19857 | return MMA_LDST(4, m16n16k8_load_b_tf32); |
19858 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: |
19859 | return MMA_LDST(8, m16n16k8_load_c_f32); |
19860 | |
19861 | // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike |
19862 | // PTX and LLVM IR where stores always use fragment D, NVCC builtins always |
19863 | // use fragment C for both loads and stores. |
19864 | // FP MMA stores. |
19865 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
19866 | return MMA_LDST(4, m16n16k16_store_d_f16); |
19867 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
19868 | return MMA_LDST(8, m16n16k16_store_d_f32); |
19869 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
19870 | return MMA_LDST(4, m32n8k16_store_d_f16); |
19871 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
19872 | return MMA_LDST(8, m32n8k16_store_d_f32); |
19873 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
19874 | return MMA_LDST(4, m8n32k16_store_d_f16); |
19875 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
19876 | return MMA_LDST(8, m8n32k16_store_d_f32); |
19877 | |
19878 | // Integer and sub-integer MMA stores. |
19879 | // Another naming quirk. Unlike other MMA builtins that use PTX types in the |
19880 | // name, integer loads/stores use LLVM's i32. |
19881 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
19882 | return MMA_LDST(8, m16n16k16_store_d_s32); |
19883 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
19884 | return MMA_LDST(8, m32n8k16_store_d_s32); |
19885 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
19886 | return MMA_LDST(8, m8n32k16_store_d_s32); |
19887 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
19888 | return MMA_LDST(2, m8n8k32_store_d_s32); |
19889 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
19890 | return MMA_LDST(2, m8n8k128_store_d_s32); |
19891 | |
19892 | // Double MMA store |
19893 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
19894 | return MMA_LDST(2, m8n8k4_store_d_f64); |
19895 | |
19896 | // Alternate float MMA store |
19897 | case NVPTX::BI__mma_m16n16k8_st_c_f32: |
19898 | return MMA_LDST(8, m16n16k8_store_d_f32); |
19899 | |
19900 | default: |
19901 | llvm_unreachable("Unknown MMA builtin" ); |
19902 | } |
19903 | } |
19904 | #undef MMA_LDST |
19905 | #undef MMA_INTR |
19906 | |
19907 | |
19908 | struct NVPTXMmaInfo { |
19909 | unsigned NumEltsA; |
19910 | unsigned NumEltsB; |
19911 | unsigned NumEltsC; |
19912 | unsigned NumEltsD; |
19913 | |
19914 | // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority |
19915 | // over 'col' for layout. The index of non-satf variants is expected to match |
19916 | // the undocumented layout constants used by CUDA's mma.hpp. |
19917 | std::array<unsigned, 8> Variants; |
19918 | |
19919 | unsigned getMMAIntrinsic(int Layout, bool Satf) { |
19920 | unsigned Index = Layout + 4 * Satf; |
19921 | if (Index >= Variants.size()) |
19922 | return 0; |
19923 | return Variants[Index]; |
19924 | } |
19925 | }; |
19926 | |
19927 | // Returns an intrinsic that matches Layout and Satf for valid combinations of |
19928 | // Layout and Satf, 0 otherwise. |
19929 | static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { |
19930 | // clang-format off |
19931 | #define MMA_VARIANTS(geom, type) \ |
19932 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ |
19933 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
19934 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ |
19935 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type |
19936 | #define MMA_SATF_VARIANTS(geom, type) \ |
19937 | MMA_VARIANTS(geom, type), \ |
19938 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ |
19939 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
19940 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ |
19941 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite |
19942 | // Sub-integer MMA only supports row.col layout. |
19943 | #define MMA_VARIANTS_I4(geom, type) \ |
19944 | 0, \ |
19945 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
19946 | 0, \ |
19947 | 0, \ |
19948 | 0, \ |
19949 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
19950 | 0, \ |
19951 | 0 |
19952 | // b1 MMA does not support .satfinite. |
19953 | #define MMA_VARIANTS_B1_XOR(geom, type) \ |
19954 | 0, \ |
19955 | Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \ |
19956 | 0, \ |
19957 | 0, \ |
19958 | 0, \ |
19959 | 0, \ |
19960 | 0, \ |
19961 | 0 |
19962 | #define MMA_VARIANTS_B1_AND(geom, type) \ |
19963 | 0, \ |
19964 | Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \ |
19965 | 0, \ |
19966 | 0, \ |
19967 | 0, \ |
19968 | 0, \ |
19969 | 0, \ |
19970 | 0 |
19971 | // clang-format on |
19972 | switch (BuiltinID) { |
19973 | // FP MMA |
19974 | // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while |
19975 | // NumEltsN of return value are ordered as A,B,C,D. |
19976 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
19977 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}}; |
19978 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
19979 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}}; |
19980 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
19981 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}}; |
19982 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
19983 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}}; |
19984 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
19985 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}}; |
19986 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
19987 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}}; |
19988 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
19989 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}}; |
19990 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
19991 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}}; |
19992 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
19993 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}}; |
19994 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
19995 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 4, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}}; |
19996 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
19997 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 4, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}}; |
19998 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
19999 | return {.NumEltsA: 8, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}}; |
20000 | |
20001 | // Integer MMA |
20002 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
20003 | return {.NumEltsA: 2, .NumEltsB: 2, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, s8)}}}; |
20004 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
20005 | return {.NumEltsA: 2, .NumEltsB: 2, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m16n16k16, u8)}}}; |
20006 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
20007 | return {.NumEltsA: 4, .NumEltsB: 1, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, s8)}}}; |
20008 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
20009 | return {.NumEltsA: 4, .NumEltsB: 1, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m32n8k16, u8)}}}; |
20010 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
20011 | return {.NumEltsA: 1, .NumEltsB: 4, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, s8)}}}; |
20012 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
20013 | return {.NumEltsA: 1, .NumEltsB: 4, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_SATF_VARIANTS(m8n32k16, u8)}}}; |
20014 | |
20015 | // Sub-integer MMA |
20016 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
20017 | return {.NumEltsA: 1, .NumEltsB: 1, .NumEltsC: 2, .NumEltsD: 2, .Variants: {._M_elems: {MMA_VARIANTS_I4(m8n8k32, s4)}}}; |
20018 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
20019 | return {.NumEltsA: 1, .NumEltsB: 1, .NumEltsC: 2, .NumEltsD: 2, .Variants: {._M_elems: {MMA_VARIANTS_I4(m8n8k32, u4)}}}; |
20020 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
20021 | return {.NumEltsA: 1, .NumEltsB: 1, .NumEltsC: 2, .NumEltsD: 2, .Variants: {._M_elems: {MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}}; |
20022 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
20023 | return {.NumEltsA: 1, .NumEltsB: 1, .NumEltsC: 2, .NumEltsD: 2, .Variants: {._M_elems: {MMA_VARIANTS_B1_AND(m8n8k128, b1)}}}; |
20024 | |
20025 | // Double MMA |
20026 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
20027 | return {.NumEltsA: 1, .NumEltsB: 1, .NumEltsC: 2, .NumEltsD: 2, .Variants: {._M_elems: {MMA_VARIANTS(m8n8k4, f64)}}}; |
20028 | |
20029 | // Alternate FP MMA |
20030 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
20031 | return {.NumEltsA: 4, .NumEltsB: 4, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_VARIANTS(m16n16k16, bf16)}}}; |
20032 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
20033 | return {.NumEltsA: 2, .NumEltsB: 8, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_VARIANTS(m8n32k16, bf16)}}}; |
20034 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
20035 | return {.NumEltsA: 8, .NumEltsB: 2, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_VARIANTS(m32n8k16, bf16)}}}; |
20036 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: |
20037 | return {.NumEltsA: 4, .NumEltsB: 4, .NumEltsC: 8, .NumEltsD: 8, .Variants: {._M_elems: {MMA_VARIANTS(m16n16k8, tf32)}}}; |
20038 | default: |
20039 | llvm_unreachable("Unexpected builtin ID." ); |
20040 | } |
20041 | #undef MMA_VARIANTS |
20042 | #undef MMA_SATF_VARIANTS |
20043 | #undef MMA_VARIANTS_I4 |
20044 | #undef MMA_VARIANTS_B1_AND |
20045 | #undef MMA_VARIANTS_B1_XOR |
20046 | } |
20047 | |
20048 | static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF, |
20049 | const CallExpr *E) { |
20050 | Value *Ptr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
20051 | QualType ArgType = E->getArg(Arg: 0)->getType(); |
20052 | clang::CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(T: ArgType); |
20053 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(T: ArgType->getPointeeType()); |
20054 | return CGF.Builder.CreateCall( |
20055 | Callee: CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {ElemTy, Ptr->getType()}), |
20056 | Args: {Ptr, ConstantInt::get(Ty: CGF.Builder.getInt32Ty(), V: Align.getQuantity())}); |
20057 | } |
20058 | |
20059 | static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF, |
20060 | const CallExpr *E) { |
20061 | Value *Ptr = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
20062 | llvm::Type *ElemTy = |
20063 | CGF.ConvertTypeForMem(T: E->getArg(Arg: 0)->getType()->getPointeeType()); |
20064 | return CGF.Builder.CreateCall( |
20065 | Callee: CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {ElemTy, Ptr->getType()}), |
20066 | Args: {Ptr, CGF.EmitScalarExpr(E: E->getArg(Arg: 1))}); |
20067 | } |
20068 | |
20069 | static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS, |
20070 | CodeGenFunction &CGF, const CallExpr *E, |
20071 | int SrcSize) { |
20072 | return E->getNumArgs() == 3 |
20073 | ? CGF.Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID: IntrinsicIDS), |
20074 | Args: {CGF.EmitScalarExpr(E: E->getArg(Arg: 0)), |
20075 | CGF.EmitScalarExpr(E: E->getArg(Arg: 1)), |
20076 | CGF.EmitScalarExpr(E: E->getArg(Arg: 2))}) |
20077 | : CGF.Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID: IntrinsicID), |
20078 | Args: {CGF.EmitScalarExpr(E: E->getArg(Arg: 0)), |
20079 | CGF.EmitScalarExpr(E: E->getArg(Arg: 1))}); |
20080 | } |
20081 | |
20082 | static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID, |
20083 | const CallExpr *E, CodeGenFunction &CGF) { |
20084 | auto &C = CGF.CGM.getContext(); |
20085 | if (!(C.getLangOpts().NativeHalfType || |
20086 | !C.getTargetInfo().useFP16ConversionIntrinsics())) { |
20087 | CGF.CGM.Error(loc: E->getExprLoc(), error: C.BuiltinInfo.getName(ID: BuiltinID).str() + |
20088 | " requires native half type support." ); |
20089 | return nullptr; |
20090 | } |
20091 | |
20092 | if (IntrinsicID == Intrinsic::nvvm_ldg_global_f || |
20093 | IntrinsicID == Intrinsic::nvvm_ldu_global_f) |
20094 | return MakeLdgLdu(IntrinsicID, CGF, E); |
20095 | |
20096 | SmallVector<Value *, 16> Args; |
20097 | auto *F = CGF.CGM.getIntrinsic(IID: IntrinsicID); |
20098 | auto *FTy = F->getFunctionType(); |
20099 | unsigned ICEArguments = 0; |
20100 | ASTContext::GetBuiltinTypeError Error; |
20101 | C.GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
20102 | assert(Error == ASTContext::GE_None && "Should not codegen an error" ); |
20103 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
20104 | assert((ICEArguments & (1 << i)) == 0); |
20105 | auto *ArgValue = CGF.EmitScalarExpr(E: E->getArg(Arg: i)); |
20106 | auto *PTy = FTy->getParamType(i); |
20107 | if (PTy != ArgValue->getType()) |
20108 | ArgValue = CGF.Builder.CreateBitCast(V: ArgValue, DestTy: PTy); |
20109 | Args.push_back(Elt: ArgValue); |
20110 | } |
20111 | |
20112 | return CGF.Builder.CreateCall(Callee: F, Args); |
20113 | } |
20114 | } // namespace |
20115 | |
20116 | Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, |
20117 | const CallExpr *E) { |
20118 | switch (BuiltinID) { |
20119 | case NVPTX::BI__nvvm_atom_add_gen_i: |
20120 | case NVPTX::BI__nvvm_atom_add_gen_l: |
20121 | case NVPTX::BI__nvvm_atom_add_gen_ll: |
20122 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Add, E); |
20123 | |
20124 | case NVPTX::BI__nvvm_atom_sub_gen_i: |
20125 | case NVPTX::BI__nvvm_atom_sub_gen_l: |
20126 | case NVPTX::BI__nvvm_atom_sub_gen_ll: |
20127 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Sub, E); |
20128 | |
20129 | case NVPTX::BI__nvvm_atom_and_gen_i: |
20130 | case NVPTX::BI__nvvm_atom_and_gen_l: |
20131 | case NVPTX::BI__nvvm_atom_and_gen_ll: |
20132 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::And, E); |
20133 | |
20134 | case NVPTX::BI__nvvm_atom_or_gen_i: |
20135 | case NVPTX::BI__nvvm_atom_or_gen_l: |
20136 | case NVPTX::BI__nvvm_atom_or_gen_ll: |
20137 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Or, E); |
20138 | |
20139 | case NVPTX::BI__nvvm_atom_xor_gen_i: |
20140 | case NVPTX::BI__nvvm_atom_xor_gen_l: |
20141 | case NVPTX::BI__nvvm_atom_xor_gen_ll: |
20142 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Xor, E); |
20143 | |
20144 | case NVPTX::BI__nvvm_atom_xchg_gen_i: |
20145 | case NVPTX::BI__nvvm_atom_xchg_gen_l: |
20146 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: |
20147 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Xchg, E); |
20148 | |
20149 | case NVPTX::BI__nvvm_atom_max_gen_i: |
20150 | case NVPTX::BI__nvvm_atom_max_gen_l: |
20151 | case NVPTX::BI__nvvm_atom_max_gen_ll: |
20152 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Max, E); |
20153 | |
20154 | case NVPTX::BI__nvvm_atom_max_gen_ui: |
20155 | case NVPTX::BI__nvvm_atom_max_gen_ul: |
20156 | case NVPTX::BI__nvvm_atom_max_gen_ull: |
20157 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::UMax, E); |
20158 | |
20159 | case NVPTX::BI__nvvm_atom_min_gen_i: |
20160 | case NVPTX::BI__nvvm_atom_min_gen_l: |
20161 | case NVPTX::BI__nvvm_atom_min_gen_ll: |
20162 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::Min, E); |
20163 | |
20164 | case NVPTX::BI__nvvm_atom_min_gen_ui: |
20165 | case NVPTX::BI__nvvm_atom_min_gen_ul: |
20166 | case NVPTX::BI__nvvm_atom_min_gen_ull: |
20167 | return MakeBinaryAtomicValue(CGF&: *this, Kind: llvm::AtomicRMWInst::UMin, E); |
20168 | |
20169 | case NVPTX::BI__nvvm_atom_cas_gen_i: |
20170 | case NVPTX::BI__nvvm_atom_cas_gen_l: |
20171 | case NVPTX::BI__nvvm_atom_cas_gen_ll: |
20172 | // __nvvm_atom_cas_gen_* should return the old value rather than the |
20173 | // success flag. |
20174 | return MakeAtomicCmpXchgValue(CGF&: *this, E, /*ReturnBool=*/false); |
20175 | |
20176 | case NVPTX::BI__nvvm_atom_add_gen_f: |
20177 | case NVPTX::BI__nvvm_atom_add_gen_d: { |
20178 | Address DestAddr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
20179 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20180 | |
20181 | return Builder.CreateAtomicRMW(Op: llvm::AtomicRMWInst::FAdd, Addr: DestAddr, Val, |
20182 | Ordering: AtomicOrdering::SequentiallyConsistent); |
20183 | } |
20184 | |
20185 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { |
20186 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20187 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20188 | Function *FnALI32 = |
20189 | CGM.getIntrinsic(IID: Intrinsic::nvvm_atomic_load_inc_32, Tys: Ptr->getType()); |
20190 | return Builder.CreateCall(Callee: FnALI32, Args: {Ptr, Val}); |
20191 | } |
20192 | |
20193 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { |
20194 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20195 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20196 | Function *FnALD32 = |
20197 | CGM.getIntrinsic(IID: Intrinsic::nvvm_atomic_load_dec_32, Tys: Ptr->getType()); |
20198 | return Builder.CreateCall(Callee: FnALD32, Args: {Ptr, Val}); |
20199 | } |
20200 | |
20201 | case NVPTX::BI__nvvm_ldg_c: |
20202 | case NVPTX::BI__nvvm_ldg_sc: |
20203 | case NVPTX::BI__nvvm_ldg_c2: |
20204 | case NVPTX::BI__nvvm_ldg_sc2: |
20205 | case NVPTX::BI__nvvm_ldg_c4: |
20206 | case NVPTX::BI__nvvm_ldg_sc4: |
20207 | case NVPTX::BI__nvvm_ldg_s: |
20208 | case NVPTX::BI__nvvm_ldg_s2: |
20209 | case NVPTX::BI__nvvm_ldg_s4: |
20210 | case NVPTX::BI__nvvm_ldg_i: |
20211 | case NVPTX::BI__nvvm_ldg_i2: |
20212 | case NVPTX::BI__nvvm_ldg_i4: |
20213 | case NVPTX::BI__nvvm_ldg_l: |
20214 | case NVPTX::BI__nvvm_ldg_l2: |
20215 | case NVPTX::BI__nvvm_ldg_ll: |
20216 | case NVPTX::BI__nvvm_ldg_ll2: |
20217 | case NVPTX::BI__nvvm_ldg_uc: |
20218 | case NVPTX::BI__nvvm_ldg_uc2: |
20219 | case NVPTX::BI__nvvm_ldg_uc4: |
20220 | case NVPTX::BI__nvvm_ldg_us: |
20221 | case NVPTX::BI__nvvm_ldg_us2: |
20222 | case NVPTX::BI__nvvm_ldg_us4: |
20223 | case NVPTX::BI__nvvm_ldg_ui: |
20224 | case NVPTX::BI__nvvm_ldg_ui2: |
20225 | case NVPTX::BI__nvvm_ldg_ui4: |
20226 | case NVPTX::BI__nvvm_ldg_ul: |
20227 | case NVPTX::BI__nvvm_ldg_ul2: |
20228 | case NVPTX::BI__nvvm_ldg_ull: |
20229 | case NVPTX::BI__nvvm_ldg_ull2: |
20230 | // PTX Interoperability section 2.2: "For a vector with an even number of |
20231 | // elements, its alignment is set to number of elements times the alignment |
20232 | // of its member: n*alignof(t)." |
20233 | return MakeLdgLdu(IntrinsicID: Intrinsic::nvvm_ldg_global_i, CGF&: *this, E); |
20234 | case NVPTX::BI__nvvm_ldg_f: |
20235 | case NVPTX::BI__nvvm_ldg_f2: |
20236 | case NVPTX::BI__nvvm_ldg_f4: |
20237 | case NVPTX::BI__nvvm_ldg_d: |
20238 | case NVPTX::BI__nvvm_ldg_d2: |
20239 | return MakeLdgLdu(IntrinsicID: Intrinsic::nvvm_ldg_global_f, CGF&: *this, E); |
20240 | |
20241 | case NVPTX::BI__nvvm_ldu_c: |
20242 | case NVPTX::BI__nvvm_ldu_sc: |
20243 | case NVPTX::BI__nvvm_ldu_c2: |
20244 | case NVPTX::BI__nvvm_ldu_sc2: |
20245 | case NVPTX::BI__nvvm_ldu_c4: |
20246 | case NVPTX::BI__nvvm_ldu_sc4: |
20247 | case NVPTX::BI__nvvm_ldu_s: |
20248 | case NVPTX::BI__nvvm_ldu_s2: |
20249 | case NVPTX::BI__nvvm_ldu_s4: |
20250 | case NVPTX::BI__nvvm_ldu_i: |
20251 | case NVPTX::BI__nvvm_ldu_i2: |
20252 | case NVPTX::BI__nvvm_ldu_i4: |
20253 | case NVPTX::BI__nvvm_ldu_l: |
20254 | case NVPTX::BI__nvvm_ldu_l2: |
20255 | case NVPTX::BI__nvvm_ldu_ll: |
20256 | case NVPTX::BI__nvvm_ldu_ll2: |
20257 | case NVPTX::BI__nvvm_ldu_uc: |
20258 | case NVPTX::BI__nvvm_ldu_uc2: |
20259 | case NVPTX::BI__nvvm_ldu_uc4: |
20260 | case NVPTX::BI__nvvm_ldu_us: |
20261 | case NVPTX::BI__nvvm_ldu_us2: |
20262 | case NVPTX::BI__nvvm_ldu_us4: |
20263 | case NVPTX::BI__nvvm_ldu_ui: |
20264 | case NVPTX::BI__nvvm_ldu_ui2: |
20265 | case NVPTX::BI__nvvm_ldu_ui4: |
20266 | case NVPTX::BI__nvvm_ldu_ul: |
20267 | case NVPTX::BI__nvvm_ldu_ul2: |
20268 | case NVPTX::BI__nvvm_ldu_ull: |
20269 | case NVPTX::BI__nvvm_ldu_ull2: |
20270 | return MakeLdgLdu(IntrinsicID: Intrinsic::nvvm_ldu_global_i, CGF&: *this, E); |
20271 | case NVPTX::BI__nvvm_ldu_f: |
20272 | case NVPTX::BI__nvvm_ldu_f2: |
20273 | case NVPTX::BI__nvvm_ldu_f4: |
20274 | case NVPTX::BI__nvvm_ldu_d: |
20275 | case NVPTX::BI__nvvm_ldu_d2: |
20276 | return MakeLdgLdu(IntrinsicID: Intrinsic::nvvm_ldu_global_f, CGF&: *this, E); |
20277 | |
20278 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: |
20279 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: |
20280 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: |
20281 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_add_gen_i_cta, CGF&: *this, E); |
20282 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: |
20283 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: |
20284 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: |
20285 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_add_gen_i_sys, CGF&: *this, E); |
20286 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: |
20287 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: |
20288 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_add_gen_f_cta, CGF&: *this, E); |
20289 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: |
20290 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: |
20291 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_add_gen_f_sys, CGF&: *this, E); |
20292 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: |
20293 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: |
20294 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: |
20295 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_exch_gen_i_cta, CGF&: *this, E); |
20296 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: |
20297 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: |
20298 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: |
20299 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_exch_gen_i_sys, CGF&: *this, E); |
20300 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: |
20301 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: |
20302 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: |
20303 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: |
20304 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: |
20305 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: |
20306 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_max_gen_i_cta, CGF&: *this, E); |
20307 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: |
20308 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: |
20309 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: |
20310 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: |
20311 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: |
20312 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: |
20313 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_max_gen_i_sys, CGF&: *this, E); |
20314 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: |
20315 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: |
20316 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: |
20317 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: |
20318 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: |
20319 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: |
20320 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_min_gen_i_cta, CGF&: *this, E); |
20321 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: |
20322 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: |
20323 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: |
20324 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: |
20325 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: |
20326 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: |
20327 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_min_gen_i_sys, CGF&: *this, E); |
20328 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: |
20329 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_inc_gen_i_cta, CGF&: *this, E); |
20330 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: |
20331 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_dec_gen_i_cta, CGF&: *this, E); |
20332 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: |
20333 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_inc_gen_i_sys, CGF&: *this, E); |
20334 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: |
20335 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_dec_gen_i_sys, CGF&: *this, E); |
20336 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: |
20337 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: |
20338 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: |
20339 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_and_gen_i_cta, CGF&: *this, E); |
20340 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: |
20341 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: |
20342 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: |
20343 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_and_gen_i_sys, CGF&: *this, E); |
20344 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: |
20345 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: |
20346 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: |
20347 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_or_gen_i_cta, CGF&: *this, E); |
20348 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: |
20349 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: |
20350 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: |
20351 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_or_gen_i_sys, CGF&: *this, E); |
20352 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: |
20353 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: |
20354 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: |
20355 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_xor_gen_i_cta, CGF&: *this, E); |
20356 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: |
20357 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: |
20358 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: |
20359 | return MakeScopedAtomic(IntrinsicID: Intrinsic::nvvm_atomic_xor_gen_i_sys, CGF&: *this, E); |
20360 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: |
20361 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: |
20362 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { |
20363 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20364 | llvm::Type *ElemTy = |
20365 | ConvertTypeForMem(T: E->getArg(Arg: 0)->getType()->getPointeeType()); |
20366 | return Builder.CreateCall( |
20367 | Callee: CGM.getIntrinsic( |
20368 | IID: Intrinsic::nvvm_atomic_cas_gen_i_cta, Tys: {ElemTy, Ptr->getType()}), |
20369 | Args: {Ptr, EmitScalarExpr(E: E->getArg(Arg: 1)), EmitScalarExpr(E: E->getArg(Arg: 2))}); |
20370 | } |
20371 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: |
20372 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: |
20373 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { |
20374 | Value *Ptr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20375 | llvm::Type *ElemTy = |
20376 | ConvertTypeForMem(T: E->getArg(Arg: 0)->getType()->getPointeeType()); |
20377 | return Builder.CreateCall( |
20378 | Callee: CGM.getIntrinsic( |
20379 | IID: Intrinsic::nvvm_atomic_cas_gen_i_sys, Tys: {ElemTy, Ptr->getType()}), |
20380 | Args: {Ptr, EmitScalarExpr(E: E->getArg(Arg: 1)), EmitScalarExpr(E: E->getArg(Arg: 2))}); |
20381 | } |
20382 | case NVPTX::BI__nvvm_match_all_sync_i32p: |
20383 | case NVPTX::BI__nvvm_match_all_sync_i64p: { |
20384 | Value *Mask = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20385 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20386 | Address PredOutPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)); |
20387 | Value *ResultPair = Builder.CreateCall( |
20388 | Callee: CGM.getIntrinsic(IID: BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p |
20389 | ? Intrinsic::nvvm_match_all_sync_i32p |
20390 | : Intrinsic::nvvm_match_all_sync_i64p), |
20391 | Args: {Mask, Val}); |
20392 | Value *Pred = Builder.CreateZExt(V: Builder.CreateExtractValue(Agg: ResultPair, Idxs: 1), |
20393 | DestTy: PredOutPtr.getElementType()); |
20394 | Builder.CreateStore(Val: Pred, Addr: PredOutPtr); |
20395 | return Builder.CreateExtractValue(Agg: ResultPair, Idxs: 0); |
20396 | } |
20397 | |
20398 | // FP MMA loads |
20399 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
20400 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
20401 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
20402 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
20403 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
20404 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
20405 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
20406 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
20407 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
20408 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
20409 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
20410 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
20411 | // Integer MMA loads. |
20412 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
20413 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
20414 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
20415 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
20416 | case NVPTX::BI__imma_m16n16k16_ld_c: |
20417 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
20418 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
20419 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
20420 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
20421 | case NVPTX::BI__imma_m32n8k16_ld_c: |
20422 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
20423 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
20424 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
20425 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
20426 | case NVPTX::BI__imma_m8n32k16_ld_c: |
20427 | // Sub-integer MMA loads. |
20428 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
20429 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
20430 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
20431 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
20432 | case NVPTX::BI__imma_m8n8k32_ld_c: |
20433 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
20434 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
20435 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
20436 | // Double MMA loads. |
20437 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
20438 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
20439 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
20440 | // Alternate float MMA loads. |
20441 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
20442 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
20443 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
20444 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
20445 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
20446 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
20447 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
20448 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
20449 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: { |
20450 | Address Dst = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
20451 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20452 | Value *Ldm = EmitScalarExpr(E: E->getArg(Arg: 2)); |
20453 | std::optional<llvm::APSInt> isColMajorArg = |
20454 | E->getArg(Arg: 3)->getIntegerConstantExpr(Ctx: getContext()); |
20455 | if (!isColMajorArg) |
20456 | return nullptr; |
20457 | bool isColMajor = isColMajorArg->getSExtValue(); |
20458 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
20459 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
20460 | if (IID == 0) |
20461 | return nullptr; |
20462 | |
20463 | Value *Result = |
20464 | Builder.CreateCall(Callee: CGM.getIntrinsic(IID, Tys: Src->getType()), Args: {Src, Ldm}); |
20465 | |
20466 | // Save returned values. |
20467 | assert(II.NumResults); |
20468 | if (II.NumResults == 1) { |
20469 | Builder.CreateAlignedStore(Val: Result, Addr: Dst.emitRawPointer(CGF&: *this), |
20470 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20471 | } else { |
20472 | for (unsigned i = 0; i < II.NumResults; ++i) { |
20473 | Builder.CreateAlignedStore( |
20474 | Val: Builder.CreateBitCast(V: Builder.CreateExtractValue(Agg: Result, Idxs: i), |
20475 | DestTy: Dst.getElementType()), |
20476 | Addr: Builder.CreateGEP(Ty: Dst.getElementType(), Ptr: Dst.emitRawPointer(CGF&: *this), |
20477 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20478 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20479 | } |
20480 | } |
20481 | return Result; |
20482 | } |
20483 | |
20484 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
20485 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
20486 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
20487 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
20488 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
20489 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
20490 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
20491 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
20492 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
20493 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
20494 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
20495 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
20496 | case NVPTX::BI__mma_m16n16k8_st_c_f32: { |
20497 | Value *Dst = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20498 | Address Src = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
20499 | Value *Ldm = EmitScalarExpr(E: E->getArg(Arg: 2)); |
20500 | std::optional<llvm::APSInt> isColMajorArg = |
20501 | E->getArg(Arg: 3)->getIntegerConstantExpr(Ctx: getContext()); |
20502 | if (!isColMajorArg) |
20503 | return nullptr; |
20504 | bool isColMajor = isColMajorArg->getSExtValue(); |
20505 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
20506 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
20507 | if (IID == 0) |
20508 | return nullptr; |
20509 | Function *Intrinsic = |
20510 | CGM.getIntrinsic(IID, Tys: Dst->getType()); |
20511 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(i: 1); |
20512 | SmallVector<Value *, 10> Values = {Dst}; |
20513 | for (unsigned i = 0; i < II.NumResults; ++i) { |
20514 | Value *V = Builder.CreateAlignedLoad( |
20515 | Ty: Src.getElementType(), |
20516 | Addr: Builder.CreateGEP(Ty: Src.getElementType(), Ptr: Src.emitRawPointer(CGF&: *this), |
20517 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20518 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20519 | Values.push_back(Elt: Builder.CreateBitCast(V, DestTy: ParamType)); |
20520 | } |
20521 | Values.push_back(Elt: Ldm); |
20522 | Value *Result = Builder.CreateCall(Callee: Intrinsic, Args: Values); |
20523 | return Result; |
20524 | } |
20525 | |
20526 | // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> |
20527 | // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf> |
20528 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
20529 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
20530 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
20531 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
20532 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
20533 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
20534 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
20535 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
20536 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
20537 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
20538 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
20539 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
20540 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
20541 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
20542 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
20543 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
20544 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
20545 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
20546 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
20547 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
20548 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
20549 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
20550 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
20551 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
20552 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
20553 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
20554 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: { |
20555 | Address Dst = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
20556 | Address SrcA = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
20557 | Address SrcB = EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)); |
20558 | Address SrcC = EmitPointerWithAlignment(Addr: E->getArg(Arg: 3)); |
20559 | std::optional<llvm::APSInt> LayoutArg = |
20560 | E->getArg(Arg: 4)->getIntegerConstantExpr(Ctx: getContext()); |
20561 | if (!LayoutArg) |
20562 | return nullptr; |
20563 | int Layout = LayoutArg->getSExtValue(); |
20564 | if (Layout < 0 || Layout > 3) |
20565 | return nullptr; |
20566 | llvm::APSInt SatfArg; |
20567 | if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 || |
20568 | BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1) |
20569 | SatfArg = 0; // .b1 does not have satf argument. |
20570 | else if (std::optional<llvm::APSInt> OptSatfArg = |
20571 | E->getArg(Arg: 5)->getIntegerConstantExpr(Ctx: getContext())) |
20572 | SatfArg = *OptSatfArg; |
20573 | else |
20574 | return nullptr; |
20575 | bool Satf = SatfArg.getSExtValue(); |
20576 | NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); |
20577 | unsigned IID = MI.getMMAIntrinsic(Layout, Satf); |
20578 | if (IID == 0) // Unsupported combination of Layout/Satf. |
20579 | return nullptr; |
20580 | |
20581 | SmallVector<Value *, 24> Values; |
20582 | Function *Intrinsic = CGM.getIntrinsic(IID); |
20583 | llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(i: 0); |
20584 | // Load A |
20585 | for (unsigned i = 0; i < MI.NumEltsA; ++i) { |
20586 | Value *V = Builder.CreateAlignedLoad( |
20587 | Ty: SrcA.getElementType(), |
20588 | Addr: Builder.CreateGEP(Ty: SrcA.getElementType(), Ptr: SrcA.emitRawPointer(CGF&: *this), |
20589 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20590 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20591 | Values.push_back(Elt: Builder.CreateBitCast(V, DestTy: AType)); |
20592 | } |
20593 | // Load B |
20594 | llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(i: MI.NumEltsA); |
20595 | for (unsigned i = 0; i < MI.NumEltsB; ++i) { |
20596 | Value *V = Builder.CreateAlignedLoad( |
20597 | Ty: SrcB.getElementType(), |
20598 | Addr: Builder.CreateGEP(Ty: SrcB.getElementType(), Ptr: SrcB.emitRawPointer(CGF&: *this), |
20599 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20600 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20601 | Values.push_back(Elt: Builder.CreateBitCast(V, DestTy: BType)); |
20602 | } |
20603 | // Load C |
20604 | llvm::Type *CType = |
20605 | Intrinsic->getFunctionType()->getParamType(i: MI.NumEltsA + MI.NumEltsB); |
20606 | for (unsigned i = 0; i < MI.NumEltsC; ++i) { |
20607 | Value *V = Builder.CreateAlignedLoad( |
20608 | Ty: SrcC.getElementType(), |
20609 | Addr: Builder.CreateGEP(Ty: SrcC.getElementType(), Ptr: SrcC.emitRawPointer(CGF&: *this), |
20610 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20611 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20612 | Values.push_back(Elt: Builder.CreateBitCast(V, DestTy: CType)); |
20613 | } |
20614 | Value *Result = Builder.CreateCall(Callee: Intrinsic, Args: Values); |
20615 | llvm::Type *DType = Dst.getElementType(); |
20616 | for (unsigned i = 0; i < MI.NumEltsD; ++i) |
20617 | Builder.CreateAlignedStore( |
20618 | Val: Builder.CreateBitCast(V: Builder.CreateExtractValue(Agg: Result, Idxs: i), DestTy: DType), |
20619 | Addr: Builder.CreateGEP(Ty: Dst.getElementType(), Ptr: Dst.emitRawPointer(CGF&: *this), |
20620 | IdxList: llvm::ConstantInt::get(Ty: IntTy, V: i)), |
20621 | Align: CharUnits::fromQuantity(Quantity: 4)); |
20622 | return Result; |
20623 | } |
20624 | // The following builtins require half type support |
20625 | case NVPTX::BI__nvvm_ex2_approx_f16: |
20626 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ex2_approx_f16, BuiltinID, E, CGF&: *this); |
20627 | case NVPTX::BI__nvvm_ex2_approx_f16x2: |
20628 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ex2_approx_f16x2, BuiltinID, E, CGF&: *this); |
20629 | case NVPTX::BI__nvvm_ff2f16x2_rn: |
20630 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, CGF&: *this); |
20631 | case NVPTX::BI__nvvm_ff2f16x2_rn_relu: |
20632 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, CGF&: *this); |
20633 | case NVPTX::BI__nvvm_ff2f16x2_rz: |
20634 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, CGF&: *this); |
20635 | case NVPTX::BI__nvvm_ff2f16x2_rz_relu: |
20636 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, CGF&: *this); |
20637 | case NVPTX::BI__nvvm_fma_rn_f16: |
20638 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, CGF&: *this); |
20639 | case NVPTX::BI__nvvm_fma_rn_f16x2: |
20640 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, CGF&: *this); |
20641 | case NVPTX::BI__nvvm_fma_rn_ftz_f16: |
20642 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, CGF&: *this); |
20643 | case NVPTX::BI__nvvm_fma_rn_ftz_f16x2: |
20644 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, CGF&: *this); |
20645 | case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16: |
20646 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E, |
20647 | CGF&: *this); |
20648 | case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2: |
20649 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E, |
20650 | CGF&: *this); |
20651 | case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16: |
20652 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E, |
20653 | CGF&: *this); |
20654 | case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2: |
20655 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E, |
20656 | CGF&: *this); |
20657 | case NVPTX::BI__nvvm_fma_rn_relu_f16: |
20658 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, CGF&: *this); |
20659 | case NVPTX::BI__nvvm_fma_rn_relu_f16x2: |
20660 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, CGF&: *this); |
20661 | case NVPTX::BI__nvvm_fma_rn_sat_f16: |
20662 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, CGF&: *this); |
20663 | case NVPTX::BI__nvvm_fma_rn_sat_f16x2: |
20664 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, CGF&: *this); |
20665 | case NVPTX::BI__nvvm_fmax_f16: |
20666 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_f16, BuiltinID, E, CGF&: *this); |
20667 | case NVPTX::BI__nvvm_fmax_f16x2: |
20668 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, CGF&: *this); |
20669 | case NVPTX::BI__nvvm_fmax_ftz_f16: |
20670 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, CGF&: *this); |
20671 | case NVPTX::BI__nvvm_fmax_ftz_f16x2: |
20672 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, CGF&: *this); |
20673 | case NVPTX::BI__nvvm_fmax_ftz_nan_f16: |
20674 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, CGF&: *this); |
20675 | case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2: |
20676 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E, |
20677 | CGF&: *this); |
20678 | case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16: |
20679 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID, |
20680 | E, CGF&: *this); |
20681 | case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2: |
20682 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2, |
20683 | BuiltinID, E, CGF&: *this); |
20684 | case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16: |
20685 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E, |
20686 | CGF&: *this); |
20687 | case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2: |
20688 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID, |
20689 | E, CGF&: *this); |
20690 | case NVPTX::BI__nvvm_fmax_nan_f16: |
20691 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, CGF&: *this); |
20692 | case NVPTX::BI__nvvm_fmax_nan_f16x2: |
20693 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, CGF&: *this); |
20694 | case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16: |
20695 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E, |
20696 | CGF&: *this); |
20697 | case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2: |
20698 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID, |
20699 | E, CGF&: *this); |
20700 | case NVPTX::BI__nvvm_fmax_xorsign_abs_f16: |
20701 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E, |
20702 | CGF&: *this); |
20703 | case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2: |
20704 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E, |
20705 | CGF&: *this); |
20706 | case NVPTX::BI__nvvm_fmin_f16: |
20707 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_f16, BuiltinID, E, CGF&: *this); |
20708 | case NVPTX::BI__nvvm_fmin_f16x2: |
20709 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, CGF&: *this); |
20710 | case NVPTX::BI__nvvm_fmin_ftz_f16: |
20711 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, CGF&: *this); |
20712 | case NVPTX::BI__nvvm_fmin_ftz_f16x2: |
20713 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, CGF&: *this); |
20714 | case NVPTX::BI__nvvm_fmin_ftz_nan_f16: |
20715 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, CGF&: *this); |
20716 | case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2: |
20717 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E, |
20718 | CGF&: *this); |
20719 | case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16: |
20720 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID, |
20721 | E, CGF&: *this); |
20722 | case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2: |
20723 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2, |
20724 | BuiltinID, E, CGF&: *this); |
20725 | case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16: |
20726 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E, |
20727 | CGF&: *this); |
20728 | case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2: |
20729 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID, |
20730 | E, CGF&: *this); |
20731 | case NVPTX::BI__nvvm_fmin_nan_f16: |
20732 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, CGF&: *this); |
20733 | case NVPTX::BI__nvvm_fmin_nan_f16x2: |
20734 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, CGF&: *this); |
20735 | case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16: |
20736 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E, |
20737 | CGF&: *this); |
20738 | case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2: |
20739 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID, |
20740 | E, CGF&: *this); |
20741 | case NVPTX::BI__nvvm_fmin_xorsign_abs_f16: |
20742 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E, |
20743 | CGF&: *this); |
20744 | case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2: |
20745 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E, |
20746 | CGF&: *this); |
20747 | case NVPTX::BI__nvvm_ldg_h: |
20748 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ldg_global_f, BuiltinID, E, CGF&: *this); |
20749 | case NVPTX::BI__nvvm_ldg_h2: |
20750 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ldg_global_f, BuiltinID, E, CGF&: *this); |
20751 | case NVPTX::BI__nvvm_ldu_h: |
20752 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ldu_global_f, BuiltinID, E, CGF&: *this); |
20753 | case NVPTX::BI__nvvm_ldu_h2: { |
20754 | return MakeHalfType(IntrinsicID: Intrinsic::nvvm_ldu_global_f, BuiltinID, E, CGF&: *this); |
20755 | } |
20756 | case NVPTX::BI__nvvm_cp_async_ca_shared_global_4: |
20757 | return MakeCpAsync(IntrinsicID: Intrinsic::nvvm_cp_async_ca_shared_global_4, |
20758 | IntrinsicIDS: Intrinsic::nvvm_cp_async_ca_shared_global_4_s, CGF&: *this, E, |
20759 | SrcSize: 4); |
20760 | case NVPTX::BI__nvvm_cp_async_ca_shared_global_8: |
20761 | return MakeCpAsync(IntrinsicID: Intrinsic::nvvm_cp_async_ca_shared_global_8, |
20762 | IntrinsicIDS: Intrinsic::nvvm_cp_async_ca_shared_global_8_s, CGF&: *this, E, |
20763 | SrcSize: 8); |
20764 | case NVPTX::BI__nvvm_cp_async_ca_shared_global_16: |
20765 | return MakeCpAsync(IntrinsicID: Intrinsic::nvvm_cp_async_ca_shared_global_16, |
20766 | IntrinsicIDS: Intrinsic::nvvm_cp_async_ca_shared_global_16_s, CGF&: *this, E, |
20767 | SrcSize: 16); |
20768 | case NVPTX::BI__nvvm_cp_async_cg_shared_global_16: |
20769 | return MakeCpAsync(IntrinsicID: Intrinsic::nvvm_cp_async_cg_shared_global_16, |
20770 | IntrinsicIDS: Intrinsic::nvvm_cp_async_cg_shared_global_16_s, CGF&: *this, E, |
20771 | SrcSize: 16); |
20772 | case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x: |
20773 | return Builder.CreateCall( |
20774 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_clusterid_x)); |
20775 | case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y: |
20776 | return Builder.CreateCall( |
20777 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_clusterid_y)); |
20778 | case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z: |
20779 | return Builder.CreateCall( |
20780 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_clusterid_z)); |
20781 | case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w: |
20782 | return Builder.CreateCall( |
20783 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_clusterid_w)); |
20784 | case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x: |
20785 | return Builder.CreateCall( |
20786 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_nclusterid_x)); |
20787 | case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y: |
20788 | return Builder.CreateCall( |
20789 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_nclusterid_y)); |
20790 | case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z: |
20791 | return Builder.CreateCall( |
20792 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_nclusterid_z)); |
20793 | case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w: |
20794 | return Builder.CreateCall( |
20795 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_nclusterid_w)); |
20796 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x: |
20797 | return Builder.CreateCall( |
20798 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x)); |
20799 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y: |
20800 | return Builder.CreateCall( |
20801 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y)); |
20802 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z: |
20803 | return Builder.CreateCall( |
20804 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z)); |
20805 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w: |
20806 | return Builder.CreateCall( |
20807 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w)); |
20808 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x: |
20809 | return Builder.CreateCall( |
20810 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x)); |
20811 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y: |
20812 | return Builder.CreateCall( |
20813 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y)); |
20814 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z: |
20815 | return Builder.CreateCall( |
20816 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z)); |
20817 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w: |
20818 | return Builder.CreateCall( |
20819 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w)); |
20820 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank: |
20821 | return Builder.CreateCall( |
20822 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank)); |
20823 | case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank: |
20824 | return Builder.CreateCall( |
20825 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank)); |
20826 | case NVPTX::BI__nvvm_is_explicit_cluster: |
20827 | return Builder.CreateCall( |
20828 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_is_explicit_cluster)); |
20829 | case NVPTX::BI__nvvm_isspacep_shared_cluster: |
20830 | return Builder.CreateCall( |
20831 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_isspacep_shared_cluster), |
20832 | Args: EmitScalarExpr(E: E->getArg(Arg: 0))); |
20833 | case NVPTX::BI__nvvm_mapa: |
20834 | return Builder.CreateCall( |
20835 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_mapa), |
20836 | Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1))}); |
20837 | case NVPTX::BI__nvvm_mapa_shared_cluster: |
20838 | return Builder.CreateCall( |
20839 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_mapa_shared_cluster), |
20840 | Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1))}); |
20841 | case NVPTX::BI__nvvm_getctarank: |
20842 | return Builder.CreateCall( |
20843 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_getctarank), |
20844 | Args: EmitScalarExpr(E: E->getArg(Arg: 0))); |
20845 | case NVPTX::BI__nvvm_getctarank_shared_cluster: |
20846 | return Builder.CreateCall( |
20847 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_getctarank_shared_cluster), |
20848 | Args: EmitScalarExpr(E: E->getArg(Arg: 0))); |
20849 | case NVPTX::BI__nvvm_barrier_cluster_arrive: |
20850 | return Builder.CreateCall( |
20851 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_barrier_cluster_arrive)); |
20852 | case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed: |
20853 | return Builder.CreateCall( |
20854 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_barrier_cluster_arrive_relaxed)); |
20855 | case NVPTX::BI__nvvm_barrier_cluster_wait: |
20856 | return Builder.CreateCall( |
20857 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_barrier_cluster_wait)); |
20858 | case NVPTX::BI__nvvm_fence_sc_cluster: |
20859 | return Builder.CreateCall( |
20860 | Callee: CGM.getIntrinsic(IID: Intrinsic::nvvm_fence_sc_cluster)); |
20861 | default: |
20862 | return nullptr; |
20863 | } |
20864 | } |
20865 | |
20866 | namespace { |
20867 | struct BuiltinAlignArgs { |
20868 | llvm::Value *Src = nullptr; |
20869 | llvm::Type *SrcType = nullptr; |
20870 | llvm::Value *Alignment = nullptr; |
20871 | llvm::Value *Mask = nullptr; |
20872 | llvm::IntegerType *IntType = nullptr; |
20873 | |
20874 | BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) { |
20875 | QualType AstType = E->getArg(Arg: 0)->getType(); |
20876 | if (AstType->isArrayType()) |
20877 | Src = CGF.EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF); |
20878 | else |
20879 | Src = CGF.EmitScalarExpr(E: E->getArg(Arg: 0)); |
20880 | SrcType = Src->getType(); |
20881 | if (SrcType->isPointerTy()) { |
20882 | IntType = IntegerType::get( |
20883 | C&: CGF.getLLVMContext(), |
20884 | NumBits: CGF.CGM.getDataLayout().getIndexTypeSizeInBits(Ty: SrcType)); |
20885 | } else { |
20886 | assert(SrcType->isIntegerTy()); |
20887 | IntType = cast<llvm::IntegerType>(Val: SrcType); |
20888 | } |
20889 | Alignment = CGF.EmitScalarExpr(E: E->getArg(Arg: 1)); |
20890 | Alignment = CGF.Builder.CreateZExtOrTrunc(V: Alignment, DestTy: IntType, Name: "alignment" ); |
20891 | auto *One = llvm::ConstantInt::get(Ty: IntType, V: 1); |
20892 | Mask = CGF.Builder.CreateSub(LHS: Alignment, RHS: One, Name: "mask" ); |
20893 | } |
20894 | }; |
20895 | } // namespace |
20896 | |
20897 | /// Generate (x & (y-1)) == 0. |
20898 | RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) { |
20899 | BuiltinAlignArgs Args(E, *this); |
20900 | llvm::Value *SrcAddress = Args.Src; |
20901 | if (Args.SrcType->isPointerTy()) |
20902 | SrcAddress = |
20903 | Builder.CreateBitOrPointerCast(V: Args.Src, DestTy: Args.IntType, Name: "src_addr" ); |
20904 | return RValue::get(V: Builder.CreateICmpEQ( |
20905 | LHS: Builder.CreateAnd(LHS: SrcAddress, RHS: Args.Mask, Name: "set_bits" ), |
20906 | RHS: llvm::Constant::getNullValue(Ty: Args.IntType), Name: "is_aligned" )); |
20907 | } |
20908 | |
20909 | /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up. |
20910 | /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the |
20911 | /// llvm.ptrmask intrinsic (with a GEP before in the align_up case). |
20912 | RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) { |
20913 | BuiltinAlignArgs Args(E, *this); |
20914 | llvm::Value *SrcForMask = Args.Src; |
20915 | if (AlignUp) { |
20916 | // When aligning up we have to first add the mask to ensure we go over the |
20917 | // next alignment value and then align down to the next valid multiple. |
20918 | // By adding the mask, we ensure that align_up on an already aligned |
20919 | // value will not change the value. |
20920 | if (Args.Src->getType()->isPointerTy()) { |
20921 | if (getLangOpts().isSignedOverflowDefined()) |
20922 | SrcForMask = |
20923 | Builder.CreateGEP(Ty: Int8Ty, Ptr: SrcForMask, IdxList: Args.Mask, Name: "over_boundary" ); |
20924 | else |
20925 | SrcForMask = EmitCheckedInBoundsGEP(ElemTy: Int8Ty, Ptr: SrcForMask, IdxList: Args.Mask, |
20926 | /*SignedIndices=*/true, |
20927 | /*isSubtraction=*/IsSubtraction: false, |
20928 | Loc: E->getExprLoc(), Name: "over_boundary" ); |
20929 | } else { |
20930 | SrcForMask = Builder.CreateAdd(LHS: SrcForMask, RHS: Args.Mask, Name: "over_boundary" ); |
20931 | } |
20932 | } |
20933 | // Invert the mask to only clear the lower bits. |
20934 | llvm::Value *InvertedMask = Builder.CreateNot(V: Args.Mask, Name: "inverted_mask" ); |
20935 | llvm::Value *Result = nullptr; |
20936 | if (Args.Src->getType()->isPointerTy()) { |
20937 | Result = Builder.CreateIntrinsic( |
20938 | ID: Intrinsic::ptrmask, Types: {Args.SrcType, Args.IntType}, |
20939 | Args: {SrcForMask, InvertedMask}, FMFSource: nullptr, Name: "aligned_result" ); |
20940 | } else { |
20941 | Result = Builder.CreateAnd(LHS: SrcForMask, RHS: InvertedMask, Name: "aligned_result" ); |
20942 | } |
20943 | assert(Result->getType() == Args.SrcType); |
20944 | return RValue::get(V: Result); |
20945 | } |
20946 | |
20947 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, |
20948 | const CallExpr *E) { |
20949 | switch (BuiltinID) { |
20950 | case WebAssembly::BI__builtin_wasm_memory_size: { |
20951 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
20952 | Value *I = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20953 | Function *Callee = |
20954 | CGM.getIntrinsic(IID: Intrinsic::wasm_memory_size, Tys: ResultType); |
20955 | return Builder.CreateCall(Callee, Args: I); |
20956 | } |
20957 | case WebAssembly::BI__builtin_wasm_memory_grow: { |
20958 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
20959 | Value *Args[] = {EmitScalarExpr(E: E->getArg(Arg: 0)), |
20960 | EmitScalarExpr(E: E->getArg(Arg: 1))}; |
20961 | Function *Callee = |
20962 | CGM.getIntrinsic(IID: Intrinsic::wasm_memory_grow, Tys: ResultType); |
20963 | return Builder.CreateCall(Callee, Args); |
20964 | } |
20965 | case WebAssembly::BI__builtin_wasm_tls_size: { |
20966 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
20967 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_tls_size, Tys: ResultType); |
20968 | return Builder.CreateCall(Callee); |
20969 | } |
20970 | case WebAssembly::BI__builtin_wasm_tls_align: { |
20971 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
20972 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_tls_align, Tys: ResultType); |
20973 | return Builder.CreateCall(Callee); |
20974 | } |
20975 | case WebAssembly::BI__builtin_wasm_tls_base: { |
20976 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_tls_base); |
20977 | return Builder.CreateCall(Callee); |
20978 | } |
20979 | case WebAssembly::BI__builtin_wasm_throw: { |
20980 | Value *Tag = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20981 | Value *Obj = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20982 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_throw); |
20983 | return Builder.CreateCall(Callee, Args: {Tag, Obj}); |
20984 | } |
20985 | case WebAssembly::BI__builtin_wasm_rethrow: { |
20986 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_rethrow); |
20987 | return Builder.CreateCall(Callee); |
20988 | } |
20989 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: { |
20990 | Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20991 | Value *Expected = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20992 | Value *Timeout = EmitScalarExpr(E: E->getArg(Arg: 2)); |
20993 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_memory_atomic_wait32); |
20994 | return Builder.CreateCall(Callee, Args: {Addr, Expected, Timeout}); |
20995 | } |
20996 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: { |
20997 | Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
20998 | Value *Expected = EmitScalarExpr(E: E->getArg(Arg: 1)); |
20999 | Value *Timeout = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21000 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_memory_atomic_wait64); |
21001 | return Builder.CreateCall(Callee, Args: {Addr, Expected, Timeout}); |
21002 | } |
21003 | case WebAssembly::BI__builtin_wasm_memory_atomic_notify: { |
21004 | Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21005 | Value *Count = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21006 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_memory_atomic_notify); |
21007 | return Builder.CreateCall(Callee, Args: {Addr, Count}); |
21008 | } |
21009 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32: |
21010 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64: |
21011 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32: |
21012 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: { |
21013 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21014 | llvm::Type *ResT = ConvertType(T: E->getType()); |
21015 | Function *Callee = |
21016 | CGM.getIntrinsic(IID: Intrinsic::wasm_trunc_signed, Tys: {ResT, Src->getType()}); |
21017 | return Builder.CreateCall(Callee, Args: {Src}); |
21018 | } |
21019 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32: |
21020 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64: |
21021 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32: |
21022 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: { |
21023 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21024 | llvm::Type *ResT = ConvertType(T: E->getType()); |
21025 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_trunc_unsigned, |
21026 | Tys: {ResT, Src->getType()}); |
21027 | return Builder.CreateCall(Callee, Args: {Src}); |
21028 | } |
21029 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32: |
21030 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: |
21031 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: |
21032 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: |
21033 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: { |
21034 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21035 | llvm::Type *ResT = ConvertType(T: E->getType()); |
21036 | Function *Callee = |
21037 | CGM.getIntrinsic(IID: Intrinsic::fptosi_sat, Tys: {ResT, Src->getType()}); |
21038 | return Builder.CreateCall(Callee, Args: {Src}); |
21039 | } |
21040 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32: |
21041 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: |
21042 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: |
21043 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: |
21044 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: { |
21045 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21046 | llvm::Type *ResT = ConvertType(T: E->getType()); |
21047 | Function *Callee = |
21048 | CGM.getIntrinsic(IID: Intrinsic::fptoui_sat, Tys: {ResT, Src->getType()}); |
21049 | return Builder.CreateCall(Callee, Args: {Src}); |
21050 | } |
21051 | case WebAssembly::BI__builtin_wasm_min_f32: |
21052 | case WebAssembly::BI__builtin_wasm_min_f64: |
21053 | case WebAssembly::BI__builtin_wasm_min_f16x8: |
21054 | case WebAssembly::BI__builtin_wasm_min_f32x4: |
21055 | case WebAssembly::BI__builtin_wasm_min_f64x2: { |
21056 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21057 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21058 | Function *Callee = |
21059 | CGM.getIntrinsic(IID: Intrinsic::minimum, Tys: ConvertType(T: E->getType())); |
21060 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21061 | } |
21062 | case WebAssembly::BI__builtin_wasm_max_f32: |
21063 | case WebAssembly::BI__builtin_wasm_max_f64: |
21064 | case WebAssembly::BI__builtin_wasm_max_f16x8: |
21065 | case WebAssembly::BI__builtin_wasm_max_f32x4: |
21066 | case WebAssembly::BI__builtin_wasm_max_f64x2: { |
21067 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21068 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21069 | Function *Callee = |
21070 | CGM.getIntrinsic(IID: Intrinsic::maximum, Tys: ConvertType(T: E->getType())); |
21071 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21072 | } |
21073 | case WebAssembly::BI__builtin_wasm_pmin_f16x8: |
21074 | case WebAssembly::BI__builtin_wasm_pmin_f32x4: |
21075 | case WebAssembly::BI__builtin_wasm_pmin_f64x2: { |
21076 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21077 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21078 | Function *Callee = |
21079 | CGM.getIntrinsic(IID: Intrinsic::wasm_pmin, Tys: ConvertType(T: E->getType())); |
21080 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21081 | } |
21082 | case WebAssembly::BI__builtin_wasm_pmax_f16x8: |
21083 | case WebAssembly::BI__builtin_wasm_pmax_f32x4: |
21084 | case WebAssembly::BI__builtin_wasm_pmax_f64x2: { |
21085 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21086 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21087 | Function *Callee = |
21088 | CGM.getIntrinsic(IID: Intrinsic::wasm_pmax, Tys: ConvertType(T: E->getType())); |
21089 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21090 | } |
21091 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
21092 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
21093 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
21094 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
21095 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
21096 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
21097 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
21098 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: { |
21099 | unsigned IntNo; |
21100 | switch (BuiltinID) { |
21101 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
21102 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
21103 | IntNo = Intrinsic::ceil; |
21104 | break; |
21105 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
21106 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
21107 | IntNo = Intrinsic::floor; |
21108 | break; |
21109 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
21110 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
21111 | IntNo = Intrinsic::trunc; |
21112 | break; |
21113 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
21114 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: |
21115 | IntNo = Intrinsic::nearbyint; |
21116 | break; |
21117 | default: |
21118 | llvm_unreachable("unexpected builtin ID" ); |
21119 | } |
21120 | Value *Value = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21121 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: ConvertType(T: E->getType())); |
21122 | return Builder.CreateCall(Callee, Args: Value); |
21123 | } |
21124 | case WebAssembly::BI__builtin_wasm_ref_null_extern: { |
21125 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_ref_null_extern); |
21126 | return Builder.CreateCall(Callee); |
21127 | } |
21128 | case WebAssembly::BI__builtin_wasm_ref_null_func: { |
21129 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_ref_null_func); |
21130 | return Builder.CreateCall(Callee); |
21131 | } |
21132 | case WebAssembly::BI__builtin_wasm_swizzle_i8x16: { |
21133 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21134 | Value *Indices = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21135 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_swizzle); |
21136 | return Builder.CreateCall(Callee, Args: {Src, Indices}); |
21137 | } |
21138 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
21139 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
21140 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
21141 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
21142 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
21143 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
21144 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
21145 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: { |
21146 | unsigned IntNo; |
21147 | switch (BuiltinID) { |
21148 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
21149 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
21150 | IntNo = Intrinsic::sadd_sat; |
21151 | break; |
21152 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
21153 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
21154 | IntNo = Intrinsic::uadd_sat; |
21155 | break; |
21156 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
21157 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
21158 | IntNo = Intrinsic::wasm_sub_sat_signed; |
21159 | break; |
21160 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
21161 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: |
21162 | IntNo = Intrinsic::wasm_sub_sat_unsigned; |
21163 | break; |
21164 | default: |
21165 | llvm_unreachable("unexpected builtin ID" ); |
21166 | } |
21167 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21168 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21169 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: ConvertType(T: E->getType())); |
21170 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21171 | } |
21172 | case WebAssembly::BI__builtin_wasm_abs_i8x16: |
21173 | case WebAssembly::BI__builtin_wasm_abs_i16x8: |
21174 | case WebAssembly::BI__builtin_wasm_abs_i32x4: |
21175 | case WebAssembly::BI__builtin_wasm_abs_i64x2: { |
21176 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21177 | Value *Neg = Builder.CreateNeg(V: Vec, Name: "neg" ); |
21178 | Constant *Zero = llvm::Constant::getNullValue(Ty: Vec->getType()); |
21179 | Value *ICmp = Builder.CreateICmpSLT(LHS: Vec, RHS: Zero, Name: "abscond" ); |
21180 | return Builder.CreateSelect(C: ICmp, True: Neg, False: Vec, Name: "abs" ); |
21181 | } |
21182 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
21183 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
21184 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
21185 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
21186 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
21187 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
21188 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
21189 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
21190 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
21191 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
21192 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
21193 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: { |
21194 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21195 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21196 | Value *ICmp; |
21197 | switch (BuiltinID) { |
21198 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
21199 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
21200 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
21201 | ICmp = Builder.CreateICmpSLT(LHS, RHS); |
21202 | break; |
21203 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
21204 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
21205 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
21206 | ICmp = Builder.CreateICmpULT(LHS, RHS); |
21207 | break; |
21208 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
21209 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
21210 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
21211 | ICmp = Builder.CreateICmpSGT(LHS, RHS); |
21212 | break; |
21213 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
21214 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
21215 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: |
21216 | ICmp = Builder.CreateICmpUGT(LHS, RHS); |
21217 | break; |
21218 | default: |
21219 | llvm_unreachable("unexpected builtin ID" ); |
21220 | } |
21221 | return Builder.CreateSelect(C: ICmp, True: LHS, False: RHS); |
21222 | } |
21223 | case WebAssembly::BI__builtin_wasm_avgr_u_i8x16: |
21224 | case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: { |
21225 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21226 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21227 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_avgr_unsigned, |
21228 | Tys: ConvertType(T: E->getType())); |
21229 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21230 | } |
21231 | case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: { |
21232 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21233 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21234 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_q15mulr_sat_signed); |
21235 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21236 | } |
21237 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
21238 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
21239 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
21240 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: { |
21241 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21242 | unsigned IntNo; |
21243 | switch (BuiltinID) { |
21244 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
21245 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
21246 | IntNo = Intrinsic::wasm_extadd_pairwise_signed; |
21247 | break; |
21248 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
21249 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: |
21250 | IntNo = Intrinsic::wasm_extadd_pairwise_unsigned; |
21251 | break; |
21252 | default: |
21253 | llvm_unreachable("unexpected builtin ID" ); |
21254 | } |
21255 | |
21256 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: ConvertType(T: E->getType())); |
21257 | return Builder.CreateCall(Callee, Args: Vec); |
21258 | } |
21259 | case WebAssembly::BI__builtin_wasm_bitselect: { |
21260 | Value *V1 = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21261 | Value *V2 = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21262 | Value *C = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21263 | Function *Callee = |
21264 | CGM.getIntrinsic(IID: Intrinsic::wasm_bitselect, Tys: ConvertType(T: E->getType())); |
21265 | return Builder.CreateCall(Callee, Args: {V1, V2, C}); |
21266 | } |
21267 | case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: { |
21268 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21269 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21270 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_dot); |
21271 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21272 | } |
21273 | case WebAssembly::BI__builtin_wasm_popcnt_i8x16: { |
21274 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21275 | Function *Callee = |
21276 | CGM.getIntrinsic(IID: Intrinsic::ctpop, Tys: ConvertType(T: E->getType())); |
21277 | return Builder.CreateCall(Callee, Args: {Vec}); |
21278 | } |
21279 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
21280 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
21281 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
21282 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
21283 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: { |
21284 | unsigned IntNo; |
21285 | switch (BuiltinID) { |
21286 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
21287 | IntNo = Intrinsic::wasm_anytrue; |
21288 | break; |
21289 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
21290 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
21291 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
21292 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: |
21293 | IntNo = Intrinsic::wasm_alltrue; |
21294 | break; |
21295 | default: |
21296 | llvm_unreachable("unexpected builtin ID" ); |
21297 | } |
21298 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21299 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: Vec->getType()); |
21300 | return Builder.CreateCall(Callee, Args: {Vec}); |
21301 | } |
21302 | case WebAssembly::BI__builtin_wasm_bitmask_i8x16: |
21303 | case WebAssembly::BI__builtin_wasm_bitmask_i16x8: |
21304 | case WebAssembly::BI__builtin_wasm_bitmask_i32x4: |
21305 | case WebAssembly::BI__builtin_wasm_bitmask_i64x2: { |
21306 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21307 | Function *Callee = |
21308 | CGM.getIntrinsic(IID: Intrinsic::wasm_bitmask, Tys: Vec->getType()); |
21309 | return Builder.CreateCall(Callee, Args: {Vec}); |
21310 | } |
21311 | case WebAssembly::BI__builtin_wasm_abs_f32x4: |
21312 | case WebAssembly::BI__builtin_wasm_abs_f64x2: { |
21313 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21314 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::fabs, Tys: Vec->getType()); |
21315 | return Builder.CreateCall(Callee, Args: {Vec}); |
21316 | } |
21317 | case WebAssembly::BI__builtin_wasm_sqrt_f32x4: |
21318 | case WebAssembly::BI__builtin_wasm_sqrt_f64x2: { |
21319 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21320 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::sqrt, Tys: Vec->getType()); |
21321 | return Builder.CreateCall(Callee, Args: {Vec}); |
21322 | } |
21323 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
21324 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
21325 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
21326 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: { |
21327 | Value *Low = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21328 | Value *High = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21329 | unsigned IntNo; |
21330 | switch (BuiltinID) { |
21331 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
21332 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
21333 | IntNo = Intrinsic::wasm_narrow_signed; |
21334 | break; |
21335 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
21336 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: |
21337 | IntNo = Intrinsic::wasm_narrow_unsigned; |
21338 | break; |
21339 | default: |
21340 | llvm_unreachable("unexpected builtin ID" ); |
21341 | } |
21342 | Function *Callee = |
21343 | CGM.getIntrinsic(IID: IntNo, Tys: {ConvertType(T: E->getType()), Low->getType()}); |
21344 | return Builder.CreateCall(Callee, Args: {Low, High}); |
21345 | } |
21346 | case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4: |
21347 | case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: { |
21348 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21349 | unsigned IntNo; |
21350 | switch (BuiltinID) { |
21351 | case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4: |
21352 | IntNo = Intrinsic::fptosi_sat; |
21353 | break; |
21354 | case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: |
21355 | IntNo = Intrinsic::fptoui_sat; |
21356 | break; |
21357 | default: |
21358 | llvm_unreachable("unexpected builtin ID" ); |
21359 | } |
21360 | llvm::Type *SrcT = Vec->getType(); |
21361 | llvm::Type *TruncT = SrcT->getWithNewType(EltTy: Builder.getInt32Ty()); |
21362 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: {TruncT, SrcT}); |
21363 | Value *Trunc = Builder.CreateCall(Callee, Args: Vec); |
21364 | Value *Splat = Constant::getNullValue(Ty: TruncT); |
21365 | return Builder.CreateShuffleVector(V1: Trunc, V2: Splat, Mask: ArrayRef<int>{0, 1, 2, 3}); |
21366 | } |
21367 | case WebAssembly::BI__builtin_wasm_shuffle_i8x16: { |
21368 | Value *Ops[18]; |
21369 | size_t OpIdx = 0; |
21370 | Ops[OpIdx++] = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21371 | Ops[OpIdx++] = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21372 | while (OpIdx < 18) { |
21373 | std::optional<llvm::APSInt> LaneConst = |
21374 | E->getArg(Arg: OpIdx)->getIntegerConstantExpr(Ctx: getContext()); |
21375 | assert(LaneConst && "Constant arg isn't actually constant?" ); |
21376 | Ops[OpIdx++] = llvm::ConstantInt::get(Context&: getLLVMContext(), V: *LaneConst); |
21377 | } |
21378 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_shuffle); |
21379 | return Builder.CreateCall(Callee, Args: Ops); |
21380 | } |
21381 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f16x8: |
21382 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f16x8: |
21383 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4: |
21384 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4: |
21385 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2: |
21386 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2: { |
21387 | Value *A = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21388 | Value *B = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21389 | Value *C = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21390 | unsigned IntNo; |
21391 | switch (BuiltinID) { |
21392 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f16x8: |
21393 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4: |
21394 | case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2: |
21395 | IntNo = Intrinsic::wasm_relaxed_madd; |
21396 | break; |
21397 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f16x8: |
21398 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4: |
21399 | case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2: |
21400 | IntNo = Intrinsic::wasm_relaxed_nmadd; |
21401 | break; |
21402 | default: |
21403 | llvm_unreachable("unexpected builtin ID" ); |
21404 | } |
21405 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: A->getType()); |
21406 | return Builder.CreateCall(Callee, Args: {A, B, C}); |
21407 | } |
21408 | case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i8x16: |
21409 | case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i16x8: |
21410 | case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i32x4: |
21411 | case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i64x2: { |
21412 | Value *A = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21413 | Value *B = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21414 | Value *C = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21415 | Function *Callee = |
21416 | CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_laneselect, Tys: A->getType()); |
21417 | return Builder.CreateCall(Callee, Args: {A, B, C}); |
21418 | } |
21419 | case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: { |
21420 | Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21421 | Value *Indices = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21422 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_swizzle); |
21423 | return Builder.CreateCall(Callee, Args: {Src, Indices}); |
21424 | } |
21425 | case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4: |
21426 | case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4: |
21427 | case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2: |
21428 | case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: { |
21429 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21430 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21431 | unsigned IntNo; |
21432 | switch (BuiltinID) { |
21433 | case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4: |
21434 | case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2: |
21435 | IntNo = Intrinsic::wasm_relaxed_min; |
21436 | break; |
21437 | case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4: |
21438 | case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: |
21439 | IntNo = Intrinsic::wasm_relaxed_max; |
21440 | break; |
21441 | default: |
21442 | llvm_unreachable("unexpected builtin ID" ); |
21443 | } |
21444 | Function *Callee = CGM.getIntrinsic(IID: IntNo, Tys: LHS->getType()); |
21445 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21446 | } |
21447 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4: |
21448 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4: |
21449 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2: |
21450 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: { |
21451 | Value *Vec = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21452 | unsigned IntNo; |
21453 | switch (BuiltinID) { |
21454 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4: |
21455 | IntNo = Intrinsic::wasm_relaxed_trunc_signed; |
21456 | break; |
21457 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4: |
21458 | IntNo = Intrinsic::wasm_relaxed_trunc_unsigned; |
21459 | break; |
21460 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2: |
21461 | IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero; |
21462 | break; |
21463 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: |
21464 | IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero; |
21465 | break; |
21466 | default: |
21467 | llvm_unreachable("unexpected builtin ID" ); |
21468 | } |
21469 | Function *Callee = CGM.getIntrinsic(IID: IntNo); |
21470 | return Builder.CreateCall(Callee, Args: {Vec}); |
21471 | } |
21472 | case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: { |
21473 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21474 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21475 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_q15mulr_signed); |
21476 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21477 | } |
21478 | case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8: { |
21479 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21480 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21481 | Function *Callee = |
21482 | CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed); |
21483 | return Builder.CreateCall(Callee, Args: {LHS, RHS}); |
21484 | } |
21485 | case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4: { |
21486 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21487 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21488 | Value *Acc = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21489 | Function *Callee = |
21490 | CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed); |
21491 | return Builder.CreateCall(Callee, Args: {LHS, RHS, Acc}); |
21492 | } |
21493 | case WebAssembly::BI__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4: { |
21494 | Value *LHS = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21495 | Value *RHS = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21496 | Value *Acc = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21497 | Function *Callee = |
21498 | CGM.getIntrinsic(IID: Intrinsic::wasm_relaxed_dot_bf16x8_add_f32); |
21499 | return Builder.CreateCall(Callee, Args: {LHS, RHS, Acc}); |
21500 | } |
21501 | case WebAssembly::BI__builtin_wasm_loadf16_f32: { |
21502 | Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21503 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_loadf16_f32); |
21504 | return Builder.CreateCall(Callee, Args: {Addr}); |
21505 | } |
21506 | case WebAssembly::BI__builtin_wasm_storef16_f32: { |
21507 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21508 | Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21509 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_storef16_f32); |
21510 | return Builder.CreateCall(Callee, Args: {Val, Addr}); |
21511 | } |
21512 | case WebAssembly::BI__builtin_wasm_splat_f16x8: { |
21513 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21514 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_splat_f16x8); |
21515 | return Builder.CreateCall(Callee, Args: {Val}); |
21516 | } |
21517 | case WebAssembly::BI__builtin_wasm_extract_lane_f16x8: { |
21518 | Value *Vector = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21519 | Value *Index = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21520 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_extract_lane_f16x8); |
21521 | return Builder.CreateCall(Callee, Args: {Vector, Index}); |
21522 | } |
21523 | case WebAssembly::BI__builtin_wasm_table_get: { |
21524 | assert(E->getArg(0)->getType()->isArrayType()); |
21525 | Value *Table = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21526 | Value *Index = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21527 | Function *Callee; |
21528 | if (E->getType().isWebAssemblyExternrefType()) |
21529 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_get_externref); |
21530 | else if (E->getType().isWebAssemblyFuncrefType()) |
21531 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_get_funcref); |
21532 | else |
21533 | llvm_unreachable( |
21534 | "Unexpected reference type for __builtin_wasm_table_get" ); |
21535 | return Builder.CreateCall(Callee, Args: {Table, Index}); |
21536 | } |
21537 | case WebAssembly::BI__builtin_wasm_table_set: { |
21538 | assert(E->getArg(0)->getType()->isArrayType()); |
21539 | Value *Table = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21540 | Value *Index = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21541 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21542 | Function *Callee; |
21543 | if (E->getArg(Arg: 2)->getType().isWebAssemblyExternrefType()) |
21544 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_set_externref); |
21545 | else if (E->getArg(Arg: 2)->getType().isWebAssemblyFuncrefType()) |
21546 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_set_funcref); |
21547 | else |
21548 | llvm_unreachable( |
21549 | "Unexpected reference type for __builtin_wasm_table_set" ); |
21550 | return Builder.CreateCall(Callee, Args: {Table, Index, Val}); |
21551 | } |
21552 | case WebAssembly::BI__builtin_wasm_table_size: { |
21553 | assert(E->getArg(0)->getType()->isArrayType()); |
21554 | Value *Value = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21555 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_size); |
21556 | return Builder.CreateCall(Callee, Args: Value); |
21557 | } |
21558 | case WebAssembly::BI__builtin_wasm_table_grow: { |
21559 | assert(E->getArg(0)->getType()->isArrayType()); |
21560 | Value *Table = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21561 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21562 | Value *NElems = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21563 | |
21564 | Function *Callee; |
21565 | if (E->getArg(Arg: 1)->getType().isWebAssemblyExternrefType()) |
21566 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_grow_externref); |
21567 | else if (E->getArg(Arg: 2)->getType().isWebAssemblyFuncrefType()) |
21568 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_fill_funcref); |
21569 | else |
21570 | llvm_unreachable( |
21571 | "Unexpected reference type for __builtin_wasm_table_grow" ); |
21572 | |
21573 | return Builder.CreateCall(Callee, Args: {Table, Val, NElems}); |
21574 | } |
21575 | case WebAssembly::BI__builtin_wasm_table_fill: { |
21576 | assert(E->getArg(0)->getType()->isArrayType()); |
21577 | Value *Table = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21578 | Value *Index = EmitScalarExpr(E: E->getArg(Arg: 1)); |
21579 | Value *Val = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21580 | Value *NElems = EmitScalarExpr(E: E->getArg(Arg: 3)); |
21581 | |
21582 | Function *Callee; |
21583 | if (E->getArg(Arg: 2)->getType().isWebAssemblyExternrefType()) |
21584 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_fill_externref); |
21585 | else if (E->getArg(Arg: 2)->getType().isWebAssemblyFuncrefType()) |
21586 | Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_fill_funcref); |
21587 | else |
21588 | llvm_unreachable( |
21589 | "Unexpected reference type for __builtin_wasm_table_fill" ); |
21590 | |
21591 | return Builder.CreateCall(Callee, Args: {Table, Index, Val, NElems}); |
21592 | } |
21593 | case WebAssembly::BI__builtin_wasm_table_copy: { |
21594 | assert(E->getArg(0)->getType()->isArrayType()); |
21595 | Value *TableX = EmitArrayToPointerDecay(Array: E->getArg(Arg: 0)).emitRawPointer(CGF&: *this); |
21596 | Value *TableY = EmitArrayToPointerDecay(Array: E->getArg(Arg: 1)).emitRawPointer(CGF&: *this); |
21597 | Value *DstIdx = EmitScalarExpr(E: E->getArg(Arg: 2)); |
21598 | Value *SrcIdx = EmitScalarExpr(E: E->getArg(Arg: 3)); |
21599 | Value *NElems = EmitScalarExpr(E: E->getArg(Arg: 4)); |
21600 | |
21601 | Function *Callee = CGM.getIntrinsic(IID: Intrinsic::wasm_table_copy); |
21602 | |
21603 | return Builder.CreateCall(Callee, Args: {TableX, TableY, SrcIdx, DstIdx, NElems}); |
21604 | } |
21605 | default: |
21606 | return nullptr; |
21607 | } |
21608 | } |
21609 | |
21610 | static std::pair<Intrinsic::ID, unsigned> |
21611 | getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) { |
21612 | struct Info { |
21613 | unsigned BuiltinID; |
21614 | Intrinsic::ID IntrinsicID; |
21615 | unsigned VecLen; |
21616 | }; |
21617 | static Info Infos[] = { |
21618 | #define CUSTOM_BUILTIN_MAPPING(x,s) \ |
21619 | { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s }, |
21620 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) |
21621 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0) |
21622 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0) |
21623 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) |
21624 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) |
21625 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0) |
21626 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) |
21627 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) |
21628 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0) |
21629 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) |
21630 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0) |
21631 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) |
21632 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0) |
21633 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) |
21634 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) |
21635 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) |
21636 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) |
21637 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) |
21638 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0) |
21639 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0) |
21640 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0) |
21641 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0) |
21642 | // Legacy builtins that take a vector in place of a vector predicate. |
21643 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) |
21644 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64) |
21645 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) |
21646 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) |
21647 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) |
21648 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) |
21649 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) |
21650 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) |
21651 | #include "clang/Basic/BuiltinsHexagonMapCustomDep.def" |
21652 | #undef CUSTOM_BUILTIN_MAPPING |
21653 | }; |
21654 | |
21655 | auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; |
21656 | static const bool SortOnce = (llvm::sort(C&: Infos, Comp: CmpInfo), true); |
21657 | (void)SortOnce; |
21658 | |
21659 | const Info *F = llvm::lower_bound(Range&: Infos, Value: Info{.BuiltinID: BuiltinID, .IntrinsicID: 0, .VecLen: 0}, C: CmpInfo); |
21660 | if (F == std::end(arr&: Infos) || F->BuiltinID != BuiltinID) |
21661 | return {Intrinsic::not_intrinsic, 0}; |
21662 | |
21663 | return {F->IntrinsicID, F->VecLen}; |
21664 | } |
21665 | |
21666 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, |
21667 | const CallExpr *E) { |
21668 | Intrinsic::ID ID; |
21669 | unsigned VecLen; |
21670 | std::tie(args&: ID, args&: VecLen) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID); |
21671 | |
21672 | auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { |
21673 | // The base pointer is passed by address, so it needs to be loaded. |
21674 | Address A = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
21675 | Address BP = Address(A.emitRawPointer(CGF&: *this), Int8PtrTy, A.getAlignment()); |
21676 | llvm::Value *Base = Builder.CreateLoad(Addr: BP); |
21677 | // The treatment of both loads and stores is the same: the arguments for |
21678 | // the builtin are the same as the arguments for the intrinsic. |
21679 | // Load: |
21680 | // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start) |
21681 | // builtin(Base, Mod, Start) -> intr(Base, Mod, Start) |
21682 | // Store: |
21683 | // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start) |
21684 | // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start) |
21685 | SmallVector<llvm::Value*,5> Ops = { Base }; |
21686 | for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) |
21687 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i))); |
21688 | |
21689 | llvm::Value *Result = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: IntID), Args: Ops); |
21690 | // The load intrinsics generate two results (Value, NewBase), stores |
21691 | // generate one (NewBase). The new base address needs to be stored. |
21692 | llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Agg: Result, Idxs: 1) |
21693 | : Result; |
21694 | llvm::Value *LV = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21695 | Address Dest = EmitPointerWithAlignment(Addr: E->getArg(Arg: 0)); |
21696 | llvm::Value *RetVal = |
21697 | Builder.CreateAlignedStore(Val: NewBase, Addr: LV, Align: Dest.getAlignment()); |
21698 | if (IsLoad) |
21699 | RetVal = Builder.CreateExtractValue(Agg: Result, Idxs: 0); |
21700 | return RetVal; |
21701 | }; |
21702 | |
21703 | // Handle the conversion of bit-reverse load intrinsics to bit code. |
21704 | // The intrinsic call after this function only reads from memory and the |
21705 | // write to memory is dealt by the store instruction. |
21706 | auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) { |
21707 | // The intrinsic generates one result, which is the new value for the base |
21708 | // pointer. It needs to be returned. The result of the load instruction is |
21709 | // passed to intrinsic by address, so the value needs to be stored. |
21710 | llvm::Value *BaseAddress = EmitScalarExpr(E: E->getArg(Arg: 0)); |
21711 | |
21712 | // Expressions like &(*pt++) will be incremented per evaluation. |
21713 | // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression |
21714 | // per call. |
21715 | Address DestAddr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 1)); |
21716 | DestAddr = DestAddr.withElementType(ElemTy: Int8Ty); |
21717 | llvm::Value *DestAddress = DestAddr.emitRawPointer(CGF&: *this); |
21718 | |
21719 | // Operands are Base, Dest, Modifier. |
21720 | // The intrinsic format in LLVM IR is defined as |
21721 | // { ValueType, i8* } (i8*, i32). |
21722 | llvm::Value *Result = Builder.CreateCall( |
21723 | Callee: CGM.getIntrinsic(IID: IntID), Args: {BaseAddress, EmitScalarExpr(E: E->getArg(Arg: 2))}); |
21724 | |
21725 | // The value needs to be stored as the variable is passed by reference. |
21726 | llvm::Value *DestVal = Builder.CreateExtractValue(Agg: Result, Idxs: 0); |
21727 | |
21728 | // The store needs to be truncated to fit the destination type. |
21729 | // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs |
21730 | // to be handled with stores of respective destination type. |
21731 | DestVal = Builder.CreateTrunc(V: DestVal, DestTy); |
21732 | |
21733 | Builder.CreateAlignedStore(Val: DestVal, Addr: DestAddress, Align: DestAddr.getAlignment()); |
21734 | // The updated value of the base pointer is returned. |
21735 | return Builder.CreateExtractValue(Agg: Result, Idxs: 1); |
21736 | }; |
21737 | |
21738 | auto V2Q = [this, VecLen] (llvm::Value *Vec) { |
21739 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B |
21740 | : Intrinsic::hexagon_V6_vandvrt; |
21741 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), |
21742 | Args: {Vec, Builder.getInt32(C: -1)}); |
21743 | }; |
21744 | auto Q2V = [this, VecLen] (llvm::Value *Pred) { |
21745 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B |
21746 | : Intrinsic::hexagon_V6_vandqrt; |
21747 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), |
21748 | Args: {Pred, Builder.getInt32(C: -1)}); |
21749 | }; |
21750 | |
21751 | switch (BuiltinID) { |
21752 | // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR, |
21753 | // and the corresponding C/C++ builtins use loads/stores to update |
21754 | // the predicate. |
21755 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: |
21756 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: |
21757 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: |
21758 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { |
21759 | // Get the type from the 0-th argument. |
21760 | llvm::Type *VecType = ConvertType(T: E->getArg(Arg: 0)->getType()); |
21761 | Address PredAddr = |
21762 | EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)).withElementType(ElemTy: VecType); |
21763 | llvm::Value *PredIn = V2Q(Builder.CreateLoad(Addr: PredAddr)); |
21764 | llvm::Value *Result = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), |
21765 | Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1)), PredIn}); |
21766 | |
21767 | llvm::Value *PredOut = Builder.CreateExtractValue(Agg: Result, Idxs: 1); |
21768 | Builder.CreateAlignedStore(Val: Q2V(PredOut), Addr: PredAddr.emitRawPointer(CGF&: *this), |
21769 | Align: PredAddr.getAlignment()); |
21770 | return Builder.CreateExtractValue(Agg: Result, Idxs: 0); |
21771 | } |
21772 | // These are identical to the builtins above, except they don't consume |
21773 | // input carry, only generate carry-out. Since they still produce two |
21774 | // outputs, generate the store of the predicate, but no load. |
21775 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo: |
21776 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo_128B: |
21777 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo: |
21778 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B: { |
21779 | // Get the type from the 0-th argument. |
21780 | llvm::Type *VecType = ConvertType(T: E->getArg(Arg: 0)->getType()); |
21781 | Address PredAddr = |
21782 | EmitPointerWithAlignment(Addr: E->getArg(Arg: 2)).withElementType(ElemTy: VecType); |
21783 | llvm::Value *Result = Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), |
21784 | Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1))}); |
21785 | |
21786 | llvm::Value *PredOut = Builder.CreateExtractValue(Agg: Result, Idxs: 1); |
21787 | Builder.CreateAlignedStore(Val: Q2V(PredOut), Addr: PredAddr.emitRawPointer(CGF&: *this), |
21788 | Align: PredAddr.getAlignment()); |
21789 | return Builder.CreateExtractValue(Agg: Result, Idxs: 0); |
21790 | } |
21791 | |
21792 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq: |
21793 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq: |
21794 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq: |
21795 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq: |
21796 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B: |
21797 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B: |
21798 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B: |
21799 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: { |
21800 | SmallVector<llvm::Value*,4> Ops; |
21801 | const Expr *PredOp = E->getArg(Arg: 0); |
21802 | // There will be an implicit cast to a boolean vector. Strip it. |
21803 | if (auto *Cast = dyn_cast<ImplicitCastExpr>(Val: PredOp)) { |
21804 | if (Cast->getCastKind() == CK_BitCast) |
21805 | PredOp = Cast->getSubExpr(); |
21806 | Ops.push_back(Elt: V2Q(EmitScalarExpr(E: PredOp))); |
21807 | } |
21808 | for (int i = 1, e = E->getNumArgs(); i != e; ++i) |
21809 | Ops.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i))); |
21810 | return Builder.CreateCall(Callee: CGM.getIntrinsic(IID: ID), Args: Ops); |
21811 | } |
21812 | |
21813 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: |
21814 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: |
21815 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: |
21816 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: |
21817 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: |
21818 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: |
21819 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: |
21820 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: |
21821 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: |
21822 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: |
21823 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: |
21824 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: |
21825 | return MakeCircOp(ID, /*IsLoad=*/true); |
21826 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: |
21827 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: |
21828 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: |
21829 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: |
21830 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: |
21831 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: |
21832 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: |
21833 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: |
21834 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: |
21835 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: |
21836 | return MakeCircOp(ID, /*IsLoad=*/false); |
21837 | case Hexagon::BI__builtin_brev_ldub: |
21838 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); |
21839 | case Hexagon::BI__builtin_brev_ldb: |
21840 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); |
21841 | case Hexagon::BI__builtin_brev_lduh: |
21842 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); |
21843 | case Hexagon::BI__builtin_brev_ldh: |
21844 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); |
21845 | case Hexagon::BI__builtin_brev_ldw: |
21846 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); |
21847 | case Hexagon::BI__builtin_brev_ldd: |
21848 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); |
21849 | } // switch |
21850 | |
21851 | return nullptr; |
21852 | } |
21853 | |
21854 | Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, |
21855 | const CallExpr *E, |
21856 | ReturnValueSlot ReturnValue) { |
21857 | SmallVector<Value *, 4> Ops; |
21858 | llvm::Type *ResultType = ConvertType(T: E->getType()); |
21859 | |
21860 | // Find out if any arguments are required to be integer constant expressions. |
21861 | unsigned ICEArguments = 0; |
21862 | ASTContext::GetBuiltinTypeError Error; |
21863 | getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments); |
21864 | if (Error == ASTContext::GE_Missing_type) { |
21865 | // Vector intrinsics don't have a type string. |
21866 | assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin && |
21867 | BuiltinID <= clang::RISCV::LastRVVBuiltin); |
21868 | ICEArguments = 0; |
21869 | if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v || |
21870 | BuiltinID == RISCVVector::BI__builtin_rvv_vset_v) |
21871 | ICEArguments = 1 << 1; |
21872 | } else { |
21873 | assert(Error == ASTContext::GE_None && "Unexpected error" ); |
21874 | } |
21875 | |
21876 | if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load) |
21877 | ICEArguments |= (1 << 1); |
21878 | if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store) |
21879 | ICEArguments |= (1 << 2); |
21880 | |
21881 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
21882 | // Handle aggregate argument, namely RVV tuple types in segment load/store |
21883 | if (hasAggregateEvaluationKind(T: E->getArg(Arg: i)->getType())) { |
21884 | LValue L = EmitAggExprToLValue(E: E->getArg(Arg: i)); |
21885 | llvm::Value *AggValue = Builder.CreateLoad(Addr: L.getAddress()); |
21886 | Ops.push_back(Elt: AggValue); |
21887 | continue; |
21888 | } |
21889 | Ops.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E)); |
21890 | } |
21891 | |
21892 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
21893 | unsigned NF = 1; |
21894 | // The 0th bit simulates the `vta` of RVV |
21895 | // The 1st bit simulates the `vma` of RVV |
21896 | constexpr unsigned RVV_VTA = 0x1; |
21897 | constexpr unsigned RVV_VMA = 0x2; |
21898 | int PolicyAttrs = 0; |
21899 | bool IsMasked = false; |
21900 | |
21901 | // Required for overloaded intrinsics. |
21902 | llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes; |
21903 | switch (BuiltinID) { |
21904 | default: llvm_unreachable("unexpected builtin ID" ); |
21905 | case RISCV::BI__builtin_riscv_orc_b_32: |
21906 | case RISCV::BI__builtin_riscv_orc_b_64: |
21907 | case RISCV::BI__builtin_riscv_clz_32: |
21908 | case RISCV::BI__builtin_riscv_clz_64: |
21909 | case RISCV::BI__builtin_riscv_ctz_32: |
21910 | case RISCV::BI__builtin_riscv_ctz_64: |
21911 | case RISCV::BI__builtin_riscv_clmul_32: |
21912 | case RISCV::BI__builtin_riscv_clmul_64: |
21913 | case RISCV::BI__builtin_riscv_clmulh_32: |
21914 | case RISCV::BI__builtin_riscv_clmulh_64: |
21915 | case RISCV::BI__builtin_riscv_clmulr_32: |
21916 | case RISCV::BI__builtin_riscv_clmulr_64: |
21917 | case RISCV::BI__builtin_riscv_xperm4_32: |
21918 | case RISCV::BI__builtin_riscv_xperm4_64: |
21919 | case RISCV::BI__builtin_riscv_xperm8_32: |
21920 | case RISCV::BI__builtin_riscv_xperm8_64: |
21921 | case RISCV::BI__builtin_riscv_brev8_32: |
21922 | case RISCV::BI__builtin_riscv_brev8_64: |
21923 | case RISCV::BI__builtin_riscv_zip_32: |
21924 | case RISCV::BI__builtin_riscv_unzip_32: { |
21925 | switch (BuiltinID) { |
21926 | default: llvm_unreachable("unexpected builtin ID" ); |
21927 | // Zbb |
21928 | case RISCV::BI__builtin_riscv_orc_b_32: |
21929 | case RISCV::BI__builtin_riscv_orc_b_64: |
21930 | ID = Intrinsic::riscv_orc_b; |
21931 | break; |
21932 | case RISCV::BI__builtin_riscv_clz_32: |
21933 | case RISCV::BI__builtin_riscv_clz_64: { |
21934 | Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Ops[0]->getType()); |
21935 | Value *Result = Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)}); |
21936 | if (Result->getType() != ResultType) |
21937 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
21938 | Name: "cast" ); |
21939 | return Result; |
21940 | } |
21941 | case RISCV::BI__builtin_riscv_ctz_32: |
21942 | case RISCV::BI__builtin_riscv_ctz_64: { |
21943 | Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: Ops[0]->getType()); |
21944 | Value *Result = Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)}); |
21945 | if (Result->getType() != ResultType) |
21946 | Result = Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/true, |
21947 | Name: "cast" ); |
21948 | return Result; |
21949 | } |
21950 | |
21951 | // Zbc |
21952 | case RISCV::BI__builtin_riscv_clmul_32: |
21953 | case RISCV::BI__builtin_riscv_clmul_64: |
21954 | ID = Intrinsic::riscv_clmul; |
21955 | break; |
21956 | case RISCV::BI__builtin_riscv_clmulh_32: |
21957 | case RISCV::BI__builtin_riscv_clmulh_64: |
21958 | ID = Intrinsic::riscv_clmulh; |
21959 | break; |
21960 | case RISCV::BI__builtin_riscv_clmulr_32: |
21961 | case RISCV::BI__builtin_riscv_clmulr_64: |
21962 | ID = Intrinsic::riscv_clmulr; |
21963 | break; |
21964 | |
21965 | // Zbkx |
21966 | case RISCV::BI__builtin_riscv_xperm8_32: |
21967 | case RISCV::BI__builtin_riscv_xperm8_64: |
21968 | ID = Intrinsic::riscv_xperm8; |
21969 | break; |
21970 | case RISCV::BI__builtin_riscv_xperm4_32: |
21971 | case RISCV::BI__builtin_riscv_xperm4_64: |
21972 | ID = Intrinsic::riscv_xperm4; |
21973 | break; |
21974 | |
21975 | // Zbkb |
21976 | case RISCV::BI__builtin_riscv_brev8_32: |
21977 | case RISCV::BI__builtin_riscv_brev8_64: |
21978 | ID = Intrinsic::riscv_brev8; |
21979 | break; |
21980 | case RISCV::BI__builtin_riscv_zip_32: |
21981 | ID = Intrinsic::riscv_zip; |
21982 | break; |
21983 | case RISCV::BI__builtin_riscv_unzip_32: |
21984 | ID = Intrinsic::riscv_unzip; |
21985 | break; |
21986 | } |
21987 | |
21988 | IntrinsicTypes = {ResultType}; |
21989 | break; |
21990 | } |
21991 | |
21992 | // Zk builtins |
21993 | |
21994 | // Zknh |
21995 | case RISCV::BI__builtin_riscv_sha256sig0: |
21996 | ID = Intrinsic::riscv_sha256sig0; |
21997 | break; |
21998 | case RISCV::BI__builtin_riscv_sha256sig1: |
21999 | ID = Intrinsic::riscv_sha256sig1; |
22000 | break; |
22001 | case RISCV::BI__builtin_riscv_sha256sum0: |
22002 | ID = Intrinsic::riscv_sha256sum0; |
22003 | break; |
22004 | case RISCV::BI__builtin_riscv_sha256sum1: |
22005 | ID = Intrinsic::riscv_sha256sum1; |
22006 | break; |
22007 | |
22008 | // Zksed |
22009 | case RISCV::BI__builtin_riscv_sm4ks: |
22010 | ID = Intrinsic::riscv_sm4ks; |
22011 | break; |
22012 | case RISCV::BI__builtin_riscv_sm4ed: |
22013 | ID = Intrinsic::riscv_sm4ed; |
22014 | break; |
22015 | |
22016 | // Zksh |
22017 | case RISCV::BI__builtin_riscv_sm3p0: |
22018 | ID = Intrinsic::riscv_sm3p0; |
22019 | break; |
22020 | case RISCV::BI__builtin_riscv_sm3p1: |
22021 | ID = Intrinsic::riscv_sm3p1; |
22022 | break; |
22023 | |
22024 | // Zihintntl |
22025 | case RISCV::BI__builtin_riscv_ntl_load: { |
22026 | llvm::Type *ResTy = ConvertType(T: E->getType()); |
22027 | unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL |
22028 | if (Ops.size() == 2) |
22029 | DomainVal = cast<ConstantInt>(Val: Ops[1])->getZExtValue(); |
22030 | |
22031 | llvm::MDNode *RISCVDomainNode = llvm::MDNode::get( |
22032 | Context&: getLLVMContext(), |
22033 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: DomainVal))); |
22034 | llvm::MDNode *NontemporalNode = llvm::MDNode::get( |
22035 | Context&: getLLVMContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
22036 | |
22037 | int Width; |
22038 | if(ResTy->isScalableTy()) { |
22039 | const ScalableVectorType *SVTy = cast<ScalableVectorType>(Val: ResTy); |
22040 | llvm::Type *ScalarTy = ResTy->getScalarType(); |
22041 | Width = ScalarTy->getPrimitiveSizeInBits() * |
22042 | SVTy->getElementCount().getKnownMinValue(); |
22043 | } else |
22044 | Width = ResTy->getPrimitiveSizeInBits(); |
22045 | LoadInst *Load = Builder.CreateLoad( |
22046 | Addr: Address(Ops[0], ResTy, CharUnits::fromQuantity(Quantity: Width / 8))); |
22047 | |
22048 | Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node: NontemporalNode); |
22049 | Load->setMetadata(KindID: CGM.getModule().getMDKindID(Name: "riscv-nontemporal-domain" ), |
22050 | Node: RISCVDomainNode); |
22051 | |
22052 | return Load; |
22053 | } |
22054 | case RISCV::BI__builtin_riscv_ntl_store: { |
22055 | unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL |
22056 | if (Ops.size() == 3) |
22057 | DomainVal = cast<ConstantInt>(Val: Ops[2])->getZExtValue(); |
22058 | |
22059 | llvm::MDNode *RISCVDomainNode = llvm::MDNode::get( |
22060 | Context&: getLLVMContext(), |
22061 | MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: DomainVal))); |
22062 | llvm::MDNode *NontemporalNode = llvm::MDNode::get( |
22063 | Context&: getLLVMContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1))); |
22064 | |
22065 | StoreInst *Store = Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]); |
22066 | Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node: NontemporalNode); |
22067 | Store->setMetadata(KindID: CGM.getModule().getMDKindID(Name: "riscv-nontemporal-domain" ), |
22068 | Node: RISCVDomainNode); |
22069 | |
22070 | return Store; |
22071 | } |
22072 | |
22073 | // Vector builtins are handled from here. |
22074 | #include "clang/Basic/riscv_vector_builtin_cg.inc" |
22075 | // SiFive Vector builtins are handled from here. |
22076 | #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc" |
22077 | } |
22078 | |
22079 | assert(ID != Intrinsic::not_intrinsic); |
22080 | |
22081 | llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes); |
22082 | return Builder.CreateCall(Callee: F, Args: Ops, Name: "" ); |
22083 | } |
22084 | |