1//===------- AMDCPU.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "CodeGenFunction.h"
15#include "TargetInfo.h"
16#include "clang/Basic/DiagnosticFrontend.h"
17#include "clang/Basic/SyncScope.h"
18#include "clang/Basic/TargetBuiltins.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/IR/IntrinsicsAMDGPU.h"
22#include "llvm/IR/IntrinsicsR600.h"
23#include "llvm/IR/IntrinsicsSPIRV.h"
24#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
25#include "llvm/Support/AMDGPUAddrSpace.h"
26#include "llvm/Support/AtomicOrdering.h"
27
28using namespace clang;
29using namespace CodeGen;
30using namespace llvm;
31
32namespace {
33
34// Has second type mangled argument.
35static Value *
36emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
37 Intrinsic::ID IntrinsicID,
38 Intrinsic::ID ConstrainedIntrinsicID) {
39 llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0));
40 llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1));
41
42 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
43 if (CGF.Builder.getIsFPConstrained()) {
44 Function *F = CGF.CGM.getIntrinsic(IID: ConstrainedIntrinsicID,
45 Tys: {Src0->getType(), Src1->getType()});
46 return CGF.Builder.CreateConstrainedFPCall(Callee: F, Args: {Src0, Src1});
47 }
48
49 Function *F =
50 CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: {Src0->getType(), Src1->getType()});
51 return CGF.Builder.CreateCall(Callee: F, Args: {Src0, Src1});
52}
53
54// If \p E is not null pointer, insert address space cast to match return
55// type of \p E if necessary.
56Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
57 const CallExpr *E = nullptr) {
58 auto *F = CGF.CGM.getIntrinsic(IID: Intrinsic::amdgcn_dispatch_ptr);
59 auto *Call = CGF.Builder.CreateCall(Callee: F);
60 if (!E)
61 return Call;
62 QualType BuiltinRetType = E->getType();
63 auto *RetTy = cast<llvm::PointerType>(Val: CGF.ConvertType(T: BuiltinRetType));
64 if (RetTy == Call->getType())
65 return Call;
66 return CGF.Builder.CreateAddrSpaceCast(V: Call, DestTy: RetTy);
67}
68
69Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
70 auto *F = CGF.CGM.getIntrinsic(IID: Intrinsic::amdgcn_implicitarg_ptr);
71 auto *Call = CGF.Builder.CreateCall(Callee: F);
72 Call->addRetAttr(
73 Attr: Attribute::getWithDereferenceableBytes(Context&: Call->getContext(), Bytes: 256));
74 Call->addRetAttr(Attr: Attribute::getWithAlignment(Context&: Call->getContext(), Alignment: Align(8)));
75 return Call;
76}
77
78static llvm::Intrinsic::ID getAMDGPUWorkGroupID(CodeGenFunction &CGF,
79 unsigned Index) {
80 switch (Index) {
81 case 0:
82 return llvm::Intrinsic::amdgcn_workgroup_id_x;
83 case 1:
84 return llvm::Intrinsic::amdgcn_workgroup_id_y;
85 case 2:
86 return llvm::Intrinsic::amdgcn_workgroup_id_z;
87 default:
88 llvm_unreachable("unhandled index");
89 }
90}
91
92static void setNoundefInvariantLoad(llvm::LoadInst *Ld) {
93 Ld->setMetadata(KindID: llvm::LLVMContext::MD_noundef,
94 Node: llvm::MDNode::get(Context&: Ld->getContext(), MDs: {}));
95 Ld->setMetadata(KindID: llvm::LLVMContext::MD_invariant_load,
96 Node: llvm::MDNode::get(Context&: Ld->getContext(), MDs: {}));
97}
98
99static void addMaxWorkGroupSizeRangeMetadata(CodeGenFunction &CGF,
100 llvm::LoadInst *GroupSize) {
101 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
102 llvm::MDNode *RNode = MDHelper.createRange(
103 Lo: APInt(16, 1), Hi: APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
104 GroupSize->setMetadata(KindID: llvm::LLVMContext::MD_range, Node: RNode);
105 setNoundefInvariantLoad(GroupSize);
106}
107
108static Value *emitAMDGPUWorkGroupSizeV5(CodeGenFunction &CGF, unsigned Index) {
109 llvm::Value *ImplicitArgPtr = EmitAMDGPUImplicitArgPtr(CGF);
110
111 // offsetof(amdhsa_implicit_kernarg_v5, block_count[Index])
112 unsigned BlockCountOffset = 0 + Index * 4;
113 // offsetof(amdhsa_implicit_kernarg_v5, group_size[Index])
114 unsigned GroupSizeOffset = 12 + Index * 2;
115 // offsetof(amdhsa_implicit_kernarg_v5, remainder[Index])
116 unsigned RemainderOffset = 18 + Index * 2;
117
118 if (CGF.CGM.getLangOpts().OffloadUniformBlock) {
119 // Indexing the implicit kernarg segment.
120 llvm::Value *GroupSizeGEP = CGF.Builder.CreateConstInBoundsGEP1_64(
121 Ty: CGF.Int8Ty, Ptr: ImplicitArgPtr, Idx0: GroupSizeOffset);
122 llvm::LoadInst *GroupSize = CGF.Builder.CreateLoad(
123 Addr: Address(GroupSizeGEP, CGF.Int16Ty, CharUnits::fromQuantity(Quantity: 2)));
124
125 addMaxWorkGroupSizeRangeMetadata(CGF, GroupSize);
126
127 return CGF.Builder.CreateZExt(V: GroupSize, DestTy: CGF.Int32Ty);
128 }
129
130 llvm::Value *BlockCountGEP = CGF.Builder.CreateConstGEP1_64(
131 Ty: CGF.Int8Ty, Ptr: ImplicitArgPtr, Idx0: BlockCountOffset);
132 llvm::LoadInst *BlockCount = CGF.Builder.CreateLoad(
133 Addr: Address(BlockCountGEP, CGF.Int32Ty, CharUnits::fromQuantity(Quantity: 4)));
134 setNoundefInvariantLoad(BlockCount);
135
136 llvm::Value *WorkgroupID =
137 CGF.Builder.CreateIntrinsic(ID: getAMDGPUWorkGroupID(CGF, Index), Args: {});
138 llvm::Value *IsFull = CGF.Builder.CreateICmpULT(LHS: WorkgroupID, RHS: BlockCount);
139
140 llvm::Value *StructOffset = CGF.Builder.CreateSelect(
141 C: IsFull, True: ConstantInt::get(Ty: CGF.Int32Ty, V: GroupSizeOffset),
142 False: ConstantInt::get(Ty: CGF.Int32Ty, V: RemainderOffset));
143
144 llvm::Value *SizeGEP =
145 CGF.Builder.CreateInBoundsGEP(Ty: CGF.Int8Ty, Ptr: ImplicitArgPtr, IdxList: StructOffset);
146 llvm::LoadInst *Size = CGF.Builder.CreateLoad(
147 Addr: Address(SizeGEP, CGF.Int16Ty, CharUnits::fromQuantity(Quantity: 2)));
148 addMaxWorkGroupSizeRangeMetadata(CGF, GroupSize: Size);
149 setNoundefInvariantLoad(Size);
150
151 return CGF.Builder.CreateZExt(V: Size, DestTy: CGF.Int32Ty);
152}
153
154static Value *emitAMDGPUWorkGroupSizeV4(CodeGenFunction &CGF, unsigned Index) {
155 llvm::Value *DispatchPtr = EmitAMDGPUDispatchPtr(CGF);
156
157 // Indexing the HSA kernel_dispatch_packet struct.
158 llvm::Value *GroupSizeGEP = CGF.Builder.CreateConstInBoundsGEP1_64(
159 Ty: CGF.Int8Ty, Ptr: DispatchPtr, Idx0: 4 + Index * 2);
160 llvm::LoadInst *GroupSizeLD = CGF.Builder.CreateLoad(
161 Addr: Address(GroupSizeGEP, CGF.Int16Ty, CharUnits::fromQuantity(Quantity: 2)));
162
163 addMaxWorkGroupSizeRangeMetadata(CGF, GroupSize: GroupSizeLD);
164
165 llvm::Value *GroupSize = CGF.Builder.CreateZExt(V: GroupSizeLD, DestTy: CGF.Int32Ty);
166
167 if (CGF.CGM.getLangOpts().OffloadUniformBlock)
168 return GroupSize;
169
170 llvm::Value *WorkgroupID =
171 CGF.Builder.CreateIntrinsic(ID: getAMDGPUWorkGroupID(CGF, Index), Args: {});
172
173 llvm::Value *GridSizeGEP = CGF.Builder.CreateConstInBoundsGEP1_64(
174 Ty: CGF.Int8Ty, Ptr: DispatchPtr, Idx0: 12 + Index * 4);
175 llvm::LoadInst *GridSize = CGF.Builder.CreateLoad(
176 Addr: Address(GridSizeGEP, CGF.Int32Ty, CharUnits::fromQuantity(Quantity: 4)));
177
178 llvm::MDBuilder MDB(CGF.getLLVMContext());
179
180 // Known non-zero.
181 GridSize->setMetadata(KindID: llvm::LLVMContext::MD_range,
182 Node: MDB.createRange(Lo: APInt(32, 1), Hi: APInt::getZero(numBits: 32)));
183 GridSize->setMetadata(KindID: llvm::LLVMContext::MD_invariant_load,
184 Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: {}));
185
186 llvm::Value *Mul = CGF.Builder.CreateMul(LHS: WorkgroupID, RHS: GroupSize);
187 llvm::Value *Remainder = CGF.Builder.CreateSub(LHS: GridSize, RHS: Mul);
188
189 llvm::Value *IsPartial = CGF.Builder.CreateICmpULT(LHS: Remainder, RHS: GroupSize);
190
191 return CGF.Builder.CreateSelect(C: IsPartial, True: Remainder, False: GroupSize);
192}
193
194// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
195/// Emit code based on Code Object ABI version.
196/// COV_4 : Emit code to use dispatch ptr
197/// COV_5+ : Emit code to use implicitarg ptr
198/// COV_NONE : Emit code to load a global variable "__oclc_ABI_version"
199/// and use its value for COV_4 or COV_5+ approach. It is used for
200/// compiling device libraries in an ABI-agnostic way.
201Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
202 auto Cov = CGF.getTarget().getTargetOpts().CodeObjectVersion;
203
204 // Do not emit __oclc_ABI_version references with non-empt environment.
205 if (Cov == CodeObjectVersionKind::COV_None &&
206 CGF.getTarget().getTriple().hasEnvironment())
207 Cov = CodeObjectVersionKind::COV_6;
208
209 if (Cov == CodeObjectVersionKind::COV_None) {
210 StringRef Name = "__oclc_ABI_version";
211 auto *ABIVersionC = CGF.CGM.getModule().getNamedGlobal(Name);
212 if (!ABIVersionC)
213 ABIVersionC = new llvm::GlobalVariable(
214 CGF.CGM.getModule(), CGF.Int32Ty, false,
215 llvm::GlobalValue::ExternalLinkage, nullptr, Name, nullptr,
216 llvm::GlobalVariable::NotThreadLocal,
217 CGF.CGM.getContext().getTargetAddressSpace(AS: LangAS::opencl_constant));
218
219 // This load will be eliminated by the IPSCCP because it is constant
220 // weak_odr without externally_initialized. Either changing it to weak or
221 // adding externally_initialized will keep the load.
222 Value *ABIVersion = CGF.Builder.CreateAlignedLoad(Ty: CGF.Int32Ty, Addr: ABIVersionC,
223 Align: CGF.CGM.getIntAlign());
224
225 Value *IsCOV5 = CGF.Builder.CreateICmpSGE(
226 LHS: ABIVersion,
227 RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: CodeObjectVersionKind::COV_5));
228
229 llvm::Value *V5Impl = emitAMDGPUWorkGroupSizeV5(CGF, Index);
230 llvm::Value *V4Impl = emitAMDGPUWorkGroupSizeV4(CGF, Index);
231 return CGF.Builder.CreateSelect(C: IsCOV5, True: V5Impl, False: V4Impl);
232 }
233
234 return Cov >= CodeObjectVersionKind::COV_5
235 ? emitAMDGPUWorkGroupSizeV5(CGF, Index)
236 : emitAMDGPUWorkGroupSizeV4(CGF, Index);
237}
238
239// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
240Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
241 const unsigned XOffset = 12;
242 auto *DP = EmitAMDGPUDispatchPtr(CGF);
243 // Indexing the HSA kernel_dispatch_packet struct.
244 auto *Offset = llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: XOffset + Index * 4);
245 auto *GEP = CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: DP, IdxList: Offset);
246 auto *LD = CGF.Builder.CreateLoad(
247 Addr: Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(Quantity: 4)));
248
249 llvm::MDBuilder MDB(CGF.getLLVMContext());
250
251 // Known non-zero.
252 LD->setMetadata(KindID: llvm::LLVMContext::MD_range,
253 Node: MDB.createRange(Lo: APInt(32, 1), Hi: APInt::getZero(numBits: 32)));
254 LD->setMetadata(KindID: llvm::LLVMContext::MD_invariant_load,
255 Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: {}));
256 return LD;
257}
258} // namespace
259
260// Generates the IR for __builtin_read_exec_*.
261// Lowers the builtin to amdgcn_ballot intrinsic.
262static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
263 llvm::Type *RegisterType,
264 llvm::Type *ValueType, bool isExecHi) {
265 CodeGen::CGBuilderTy &Builder = CGF.Builder;
266 CodeGen::CodeGenModule &CGM = CGF.CGM;
267
268 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_ballot, Tys: {RegisterType});
269 llvm::Value *Call = Builder.CreateCall(Callee: F, Args: {Builder.getInt1(V: true)});
270
271 if (isExecHi) {
272 Value *Rt2 = Builder.CreateLShr(LHS: Call, RHS: 32);
273 Rt2 = Builder.CreateTrunc(V: Rt2, DestTy: CGF.Int32Ty);
274 return Rt2;
275 }
276
277 return Call;
278}
279
280static llvm::Value *loadTextureDescPtorAsVec8I32(CodeGenFunction &CGF,
281 llvm::Value *RsrcPtr) {
282 auto &B = CGF.Builder;
283 auto *VecTy = llvm::FixedVectorType::get(ElementType: B.getInt32Ty(), NumElts: 8);
284
285 if (RsrcPtr->getType() == VecTy)
286 return RsrcPtr;
287
288 if (RsrcPtr->getType()->isIntegerTy(Bitwidth: 32)) {
289 llvm::PointerType *VecPtrTy =
290 llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: 8);
291 llvm::Value *Ptr = B.CreateIntToPtr(V: RsrcPtr, DestTy: VecPtrTy, Name: "tex.rsrc.from.int");
292 return B.CreateAlignedLoad(Ty: VecTy, Ptr, Align: llvm::Align(32), Name: "tex.rsrc.val");
293 }
294
295 if (RsrcPtr->getType()->isPointerTy()) {
296 auto *VecPtrTy = llvm::PointerType::get(
297 C&: CGF.getLLVMContext(), AddressSpace: RsrcPtr->getType()->getPointerAddressSpace());
298 llvm::Value *Typed = B.CreateBitCast(V: RsrcPtr, DestTy: VecPtrTy, Name: "tex.rsrc.typed");
299 return B.CreateAlignedLoad(Ty: VecTy, Ptr: Typed, Align: llvm::Align(32), Name: "tex.rsrc.val");
300 }
301
302 const auto &DL = CGF.CGM.getDataLayout();
303 if (DL.getTypeSizeInBits(Ty: RsrcPtr->getType()) == 256)
304 return B.CreateBitCast(V: RsrcPtr, DestTy: VecTy, Name: "tex.rsrc.val");
305
306 llvm::report_fatal_error(reason: "Unexpected texture resource argument form");
307}
308
309llvm::CallInst *
310emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF,
311 const clang::CallExpr *E,
312 unsigned IntrinsicID, bool IsImageStore) {
313 auto findTextureDescIndex = [&CGF](const CallExpr *E) -> unsigned {
314 QualType TexQT = CGF.getContext().AMDGPUTextureTy;
315 for (unsigned I = 0, N = E->getNumArgs(); I < N; ++I) {
316 QualType ArgTy = E->getArg(Arg: I)->getType();
317 if (ArgTy == TexQT) {
318 return I;
319 }
320
321 if (ArgTy.getCanonicalType() == TexQT.getCanonicalType()) {
322 return I;
323 }
324 }
325
326 return ~0U;
327 };
328
329 clang::SmallVector<llvm::Value *, 10> Args;
330 unsigned RsrcIndex = findTextureDescIndex(E);
331
332 if (RsrcIndex == ~0U) {
333 llvm::report_fatal_error(reason: "Invalid argument count for image builtin");
334 }
335
336 for (unsigned I = 0; I < E->getNumArgs(); ++I) {
337 llvm::Value *V = CGF.EmitScalarExpr(E: E->getArg(Arg: I));
338 if (I == RsrcIndex)
339 V = loadTextureDescPtorAsVec8I32(CGF, RsrcPtr: V);
340 Args.push_back(Elt: V);
341 }
342
343 llvm::Type *RetTy = IsImageStore ? CGF.VoidTy : CGF.ConvertType(T: E->getType());
344 llvm::CallInst *Call = CGF.Builder.CreateIntrinsic(RetTy, ID: IntrinsicID, Args);
345 return Call;
346}
347
348// Emit an intrinsic that has 1 float or double operand, and 1 integer.
349static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
350 const CallExpr *E,
351 unsigned IntrinsicID) {
352 llvm::Value *Src0 = CGF.EmitScalarExpr(E: E->getArg(Arg: 0));
353 llvm::Value *Src1 = CGF.EmitScalarExpr(E: E->getArg(Arg: 1));
354
355 Function *F = CGF.CGM.getIntrinsic(IID: IntrinsicID, Tys: Src0->getType());
356 return CGF.Builder.CreateCall(Callee: F, Args: {Src0, Src1});
357}
358
359static inline StringRef mapScopeToSPIRV(StringRef AMDGCNScope) {
360 if (AMDGCNScope == "agent")
361 return "device";
362 if (AMDGCNScope == "wavefront")
363 return "subgroup";
364 return AMDGCNScope;
365}
366
367static llvm::AtomicOrdering mapCABIAtomicOrdering(unsigned AO) {
368 // Map C11/C++11 memory ordering to LLVM memory ordering
369 assert(llvm::isValidAtomicOrderingCABI(AO));
370 switch (static_cast<llvm::AtomicOrderingCABI>(AO)) {
371 case llvm::AtomicOrderingCABI::acquire:
372 case llvm::AtomicOrderingCABI::consume:
373 return llvm::AtomicOrdering::Acquire;
374 case llvm::AtomicOrderingCABI::release:
375 return llvm::AtomicOrdering::Release;
376 case llvm::AtomicOrderingCABI::acq_rel:
377 return llvm::AtomicOrdering::AcquireRelease;
378 case llvm::AtomicOrderingCABI::seq_cst:
379 return llvm::AtomicOrdering::SequentiallyConsistent;
380 case llvm::AtomicOrderingCABI::relaxed:
381 return llvm::AtomicOrdering::Monotonic;
382 }
383 llvm_unreachable("Unknown AtomicOrderingCABI enum");
384}
385
386// For processing memory ordering and memory scope arguments of various
387// amdgcn builtins.
388// \p Order takes a C++11 compatible memory-ordering specifier and converts
389// it into LLVM's memory ordering specifier using atomic C ABI, and writes
390// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
391// specific SyncScopeID and writes it to \p SSID.
392void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
393 llvm::AtomicOrdering &AO,
394 llvm::SyncScope::ID &SSID) {
395 int ord = cast<llvm::ConstantInt>(Val: Order)->getZExtValue();
396
397 // Map C11/C++11 memory ordering to LLVM memory ordering
398 AO = mapCABIAtomicOrdering(AO: ord);
399
400 // Some of the atomic builtins take the scope as a string name.
401 StringRef scp;
402 if (llvm::getConstantStringInfo(V: Scope, Str&: scp)) {
403 if (getTarget().getTriple().isSPIRV())
404 scp = mapScopeToSPIRV(AMDGCNScope: scp);
405 SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: scp);
406 return;
407 }
408
409 // Older builtins had an enum argument for the memory scope.
410 const char *SSN = nullptr;
411 int scope = cast<llvm::ConstantInt>(Val: Scope)->getZExtValue();
412 switch (scope) {
413 case AtomicScopeGenericModel::System: // __MEMORY_SCOPE_SYSTEM
414 SSID = llvm::SyncScope::System;
415 break;
416 case AtomicScopeGenericModel::Device: // __MEMORY_SCOPE_DEVICE
417 SSN = getTarget().getTriple().isSPIRV() ? "device" : "agent";
418 break;
419 case AtomicScopeGenericModel::Workgroup: // __MEMORY_SCOPE_WRKGRP
420 SSN = "workgroup";
421 break;
422 case AtomicScopeGenericModel::Cluster: // __MEMORY_SCOPE_CLUSTR
423 SSN = getTarget().getTriple().isSPIRV() ? "workgroup" : "cluster";
424 break;
425 case AtomicScopeGenericModel::Wavefront: // __MEMORY_SCOPE_WVFRNT
426 SSN = getTarget().getTriple().isSPIRV() ? "subgroup" : "wavefront";
427 break;
428 case AtomicScopeGenericModel::Single: // __MEMORY_SCOPE_SINGLE
429 SSID = llvm::SyncScope::SingleThread;
430 break;
431 default:
432 SSID = llvm::SyncScope::System;
433 break;
434 }
435 if (SSN)
436 SSID = getLLVMContext().getOrInsertSyncScopeID(SSN);
437}
438
439void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
440 const CallExpr *E) {
441 constexpr const char *Tag = "amdgpu-synchronize-as";
442
443 LLVMContext &Ctx = Inst->getContext();
444 SmallVector<MMRAMetadata::TagT, 3> MMRAs;
445 for (unsigned K = 2; K < E->getNumArgs(); ++K) {
446 llvm::Value *V = EmitScalarExpr(E: E->getArg(Arg: K));
447 StringRef AS;
448 if (llvm::getConstantStringInfo(V, Str&: AS)) {
449 MMRAs.push_back(Elt: {Tag, AS});
450 // TODO: Delete the resulting unused constant?
451 continue;
452 }
453 CGM.Error(loc: E->getExprLoc(),
454 error: "expected an address space name as a string literal");
455 }
456
457 llvm::sort(C&: MMRAs);
458 MMRAs.erase(CS: llvm::unique(R&: MMRAs), CE: MMRAs.end());
459 Inst->setMetadata(KindID: LLVMContext::MD_mmra, Node: MMRAMetadata::getMD(Ctx, Tags: MMRAs));
460}
461
462static Value *GetAMDGPUPredicate(CodeGenFunction &CGF, Twine Name) {
463 Constant *SpecId = ConstantInt::getAllOnesValue(Ty: CGF.Int32Ty);
464
465 LLVMContext &Ctx = CGF.getLLVMContext();
466 MDNode *Predicate = MDNode::get(Context&: Ctx, MDs: MDString::get(Context&: Ctx, Str: Name.str()));
467 std::vector<Value *> Args = {SpecId, ConstantInt::getFalse(Context&: Ctx),
468 MetadataAsValue::get(Context&: Ctx, MD: Predicate)};
469 CallInst *Call = CGF.Builder.CreateIntrinsic(
470 ID: Intrinsic::spv_named_boolean_spec_constant, Args);
471
472 return Call;
473}
474
475static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
476 switch (BuiltinID) {
477 default:
478 llvm_unreachable("Unknown BuiltinID for wave reduction");
479 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_add_u32:
480 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_add_u64:
481 return Intrinsic::amdgcn_wave_reduce_add;
482 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fadd_f32:
483 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fadd_f64:
484 return Intrinsic::amdgcn_wave_reduce_fadd;
485 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_sub_u32:
486 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_sub_u64:
487 return Intrinsic::amdgcn_wave_reduce_sub;
488 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fsub_f32:
489 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fsub_f64:
490 return Intrinsic::amdgcn_wave_reduce_fsub;
491 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_min_i32:
492 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_min_i64:
493 return Intrinsic::amdgcn_wave_reduce_min;
494 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fmin_f32:
495 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fmin_f64:
496 return Intrinsic::amdgcn_wave_reduce_fmin;
497 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_min_u32:
498 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_min_u64:
499 return Intrinsic::amdgcn_wave_reduce_umin;
500 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_max_i32:
501 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_max_i64:
502 return Intrinsic::amdgcn_wave_reduce_max;
503 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fmax_f32:
504 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_fmax_f64:
505 return Intrinsic::amdgcn_wave_reduce_fmax;
506 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_max_u32:
507 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_max_u64:
508 return Intrinsic::amdgcn_wave_reduce_umax;
509 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_and_b32:
510 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_and_b64:
511 return Intrinsic::amdgcn_wave_reduce_and;
512 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_or_b32:
513 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_or_b64:
514 return Intrinsic::amdgcn_wave_reduce_or;
515 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_xor_b32:
516 case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_xor_b64:
517 return Intrinsic::amdgcn_wave_reduce_xor;
518 }
519}
520
521Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
522 const CallExpr *E) {
523 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
524 llvm::SyncScope::ID SSID;
525 switch (BuiltinID) {
526 case AMDGPU::BI__builtin_amdgcn_wave_reduce_add_u32:
527 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fadd_f32:
528 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fadd_f64:
529 case AMDGPU::BI__builtin_amdgcn_wave_reduce_sub_u32:
530 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fsub_f32:
531 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fsub_f64:
532 case AMDGPU::BI__builtin_amdgcn_wave_reduce_min_i32:
533 case AMDGPU::BI__builtin_amdgcn_wave_reduce_min_u32:
534 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fmin_f32:
535 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fmin_f64:
536 case AMDGPU::BI__builtin_amdgcn_wave_reduce_max_i32:
537 case AMDGPU::BI__builtin_amdgcn_wave_reduce_max_u32:
538 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fmax_f32:
539 case AMDGPU::BI__builtin_amdgcn_wave_reduce_fmax_f64:
540 case AMDGPU::BI__builtin_amdgcn_wave_reduce_and_b32:
541 case AMDGPU::BI__builtin_amdgcn_wave_reduce_or_b32:
542 case AMDGPU::BI__builtin_amdgcn_wave_reduce_xor_b32:
543 case AMDGPU::BI__builtin_amdgcn_wave_reduce_add_u64:
544 case AMDGPU::BI__builtin_amdgcn_wave_reduce_sub_u64:
545 case AMDGPU::BI__builtin_amdgcn_wave_reduce_min_i64:
546 case AMDGPU::BI__builtin_amdgcn_wave_reduce_min_u64:
547 case AMDGPU::BI__builtin_amdgcn_wave_reduce_max_i64:
548 case AMDGPU::BI__builtin_amdgcn_wave_reduce_max_u64:
549 case AMDGPU::BI__builtin_amdgcn_wave_reduce_and_b64:
550 case AMDGPU::BI__builtin_amdgcn_wave_reduce_or_b64:
551 case AMDGPU::BI__builtin_amdgcn_wave_reduce_xor_b64: {
552 Intrinsic::ID IID = getIntrinsicIDforWaveReduction(BuiltinID);
553 llvm::Value *Value = EmitScalarExpr(E: E->getArg(Arg: 0));
554 llvm::Value *Strategy = EmitScalarExpr(E: E->getArg(Arg: 1));
555 llvm::Function *F = CGM.getIntrinsic(IID, Tys: {Value->getType()});
556 return Builder.CreateCall(Callee: F, Args: {Value, Strategy});
557 }
558 case AMDGPU::BI__builtin_amdgcn_div_scale:
559 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
560 // Translate from the intrinsics's struct return to the builtin's out
561 // argument.
562
563 Address FlagOutPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 3));
564
565 llvm::Value *X = EmitScalarExpr(E: E->getArg(Arg: 0));
566 llvm::Value *Y = EmitScalarExpr(E: E->getArg(Arg: 1));
567 llvm::Value *Z = EmitScalarExpr(E: E->getArg(Arg: 2));
568
569 llvm::Function *Callee = CGM.getIntrinsic(IID: Intrinsic::amdgcn_div_scale,
570 Tys: X->getType());
571
572 llvm::Value *Tmp = Builder.CreateCall(Callee, Args: {X, Y, Z});
573
574 llvm::Value *Result = Builder.CreateExtractValue(Agg: Tmp, Idxs: 0);
575 llvm::Value *Flag = Builder.CreateExtractValue(Agg: Tmp, Idxs: 1);
576
577 llvm::Type *RealFlagType = FlagOutPtr.getElementType();
578
579 llvm::Value *FlagExt = Builder.CreateZExt(V: Flag, DestTy: RealFlagType);
580 Builder.CreateStore(Val: FlagExt, Addr: FlagOutPtr);
581 return Result;
582 }
583 case AMDGPU::BI__builtin_amdgcn_div_fmas:
584 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
585 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
586 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
587 llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2));
588 llvm::Value *Src3 = EmitScalarExpr(E: E->getArg(Arg: 3));
589
590 llvm::Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_div_fmas,
591 Tys: Src0->getType());
592 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Arg: Src3);
593 return Builder.CreateCall(Callee: F, Args: {Src0, Src1, Src2, Src3ToBool});
594 }
595
596 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
597 return emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E,
598 IntrinsicID: Intrinsic::amdgcn_ds_swizzle);
599 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
600 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
601 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
602 llvm::SmallVector<llvm::Value *, 6> Args;
603 // Find out if any arguments are required to be integer constant
604 // expressions.
605 unsigned ICEArguments = 0;
606 ASTContext::GetBuiltinTypeError Error;
607 getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments);
608 assert(Error == ASTContext::GE_None && "Should not codegen an error");
609 llvm::Type *DataTy = ConvertType(T: E->getArg(Arg: 0)->getType());
610 unsigned Size = DataTy->getPrimitiveSizeInBits();
611 llvm::Type *IntTy =
612 llvm::IntegerType::get(C&: Builder.getContext(), NumBits: std::max(a: Size, b: 32u));
613 Function *F =
614 CGM.getIntrinsic(IID: BuiltinID == AMDGPU::BI__builtin_amdgcn_mov_dpp8
615 ? Intrinsic::amdgcn_mov_dpp8
616 : Intrinsic::amdgcn_update_dpp,
617 Tys: IntTy);
618 assert(E->getNumArgs() == 5 || E->getNumArgs() == 6 ||
619 E->getNumArgs() == 2);
620 bool InsertOld = BuiltinID == AMDGPU::BI__builtin_amdgcn_mov_dpp;
621 if (InsertOld)
622 Args.push_back(Elt: llvm::PoisonValue::get(T: IntTy));
623 for (unsigned I = 0; I != E->getNumArgs(); ++I) {
624 llvm::Value *V = EmitScalarOrConstFoldImmArg(ICEArguments, Idx: I, E);
625 if (I < (BuiltinID == AMDGPU::BI__builtin_amdgcn_update_dpp ? 2u : 1u) &&
626 Size < 32) {
627 if (!DataTy->isIntegerTy())
628 V = Builder.CreateBitCast(
629 V, DestTy: llvm::IntegerType::get(C&: Builder.getContext(), NumBits: Size));
630 V = Builder.CreateZExtOrBitCast(V, DestTy: IntTy);
631 }
632 llvm::Type *ExpTy =
633 F->getFunctionType()->getFunctionParamType(i: I + InsertOld);
634 Args.push_back(Elt: Builder.CreateTruncOrBitCast(V, DestTy: ExpTy));
635 }
636 Value *V = Builder.CreateCall(Callee: F, Args);
637 if (Size < 32 && !DataTy->isIntegerTy())
638 V = Builder.CreateTrunc(
639 V, DestTy: llvm::IntegerType::get(C&: Builder.getContext(), NumBits: Size));
640 return Builder.CreateTruncOrBitCast(V, DestTy: DataTy);
641 }
642 case AMDGPU::BI__builtin_amdgcn_permlane16:
643 case AMDGPU::BI__builtin_amdgcn_permlanex16:
644 return emitBuiltinWithOneOverloadedType<6>(
645 CGF&: *this, E,
646 IntrinsicID: BuiltinID == AMDGPU::BI__builtin_amdgcn_permlane16
647 ? Intrinsic::amdgcn_permlane16
648 : Intrinsic::amdgcn_permlanex16);
649 case AMDGPU::BI__builtin_amdgcn_permlane64:
650 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
651 IntrinsicID: Intrinsic::amdgcn_permlane64);
652 case AMDGPU::BI__builtin_amdgcn_readlane:
653 return emitBuiltinWithOneOverloadedType<2>(CGF&: *this, E,
654 IntrinsicID: Intrinsic::amdgcn_readlane);
655 case AMDGPU::BI__builtin_amdgcn_readfirstlane:
656 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
657 IntrinsicID: Intrinsic::amdgcn_readfirstlane);
658 case AMDGPU::BI__builtin_amdgcn_div_fixup:
659 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
660 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
661 return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E,
662 IntrinsicID: Intrinsic::amdgcn_div_fixup);
663 case AMDGPU::BI__builtin_amdgcn_trig_preop:
664 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
665 return emitFPIntBuiltin(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_trig_preop);
666 case AMDGPU::BI__builtin_amdgcn_rcp:
667 case AMDGPU::BI__builtin_amdgcn_rcpf:
668 case AMDGPU::BI__builtin_amdgcn_rcph:
669 case AMDGPU::BI__builtin_amdgcn_rcp_bf16:
670 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_rcp);
671 case AMDGPU::BI__builtin_amdgcn_sqrt:
672 case AMDGPU::BI__builtin_amdgcn_sqrtf:
673 case AMDGPU::BI__builtin_amdgcn_sqrth:
674 case AMDGPU::BI__builtin_amdgcn_sqrt_bf16:
675 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
676 IntrinsicID: Intrinsic::amdgcn_sqrt);
677 case AMDGPU::BI__builtin_amdgcn_rsq:
678 case AMDGPU::BI__builtin_amdgcn_rsqf:
679 case AMDGPU::BI__builtin_amdgcn_rsqh:
680 case AMDGPU::BI__builtin_amdgcn_rsq_bf16:
681 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_rsq);
682 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
683 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
684 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
685 IntrinsicID: Intrinsic::amdgcn_rsq_clamp);
686 case AMDGPU::BI__builtin_amdgcn_sinf:
687 case AMDGPU::BI__builtin_amdgcn_sinh:
688 case AMDGPU::BI__builtin_amdgcn_sin_bf16:
689 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_sin);
690 case AMDGPU::BI__builtin_amdgcn_cosf:
691 case AMDGPU::BI__builtin_amdgcn_cosh:
692 case AMDGPU::BI__builtin_amdgcn_cos_bf16:
693 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_cos);
694 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
695 return EmitAMDGPUDispatchPtr(CGF&: *this, E);
696 case AMDGPU::BI__builtin_amdgcn_logf:
697 case AMDGPU::BI__builtin_amdgcn_log_bf16:
698 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_log);
699 case AMDGPU::BI__builtin_amdgcn_exp2f:
700 case AMDGPU::BI__builtin_amdgcn_exp2_bf16:
701 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
702 IntrinsicID: Intrinsic::amdgcn_exp2);
703 case AMDGPU::BI__builtin_amdgcn_log_clampf:
704 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
705 IntrinsicID: Intrinsic::amdgcn_log_clamp);
706 case AMDGPU::BI__builtin_amdgcn_ldexp:
707 case AMDGPU::BI__builtin_amdgcn_ldexpf: {
708 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
709 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
710 llvm::Function *F =
711 CGM.getIntrinsic(IID: Intrinsic::ldexp, Tys: {Src0->getType(), Src1->getType()});
712 return Builder.CreateCall(Callee: F, Args: {Src0, Src1});
713 }
714 case AMDGPU::BI__builtin_amdgcn_ldexph: {
715 // The raw instruction has a different behavior for out of bounds exponent
716 // values (implicit truncation instead of saturate to short_min/short_max).
717 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
718 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
719 llvm::Function *F =
720 CGM.getIntrinsic(IID: Intrinsic::ldexp, Tys: {Src0->getType(), Int16Ty});
721 return Builder.CreateCall(Callee: F, Args: {Src0, Builder.CreateTrunc(V: Src1, DestTy: Int16Ty)});
722 }
723 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
724 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
725 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
726 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
727 IntrinsicID: Intrinsic::amdgcn_frexp_mant);
728 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
729 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
730 Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
731 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_frexp_exp,
732 Tys: { Builder.getInt32Ty(), Src0->getType() });
733 return Builder.CreateCall(Callee: F, Args: Src0);
734 }
735 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
736 Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
737 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_frexp_exp,
738 Tys: { Builder.getInt16Ty(), Src0->getType() });
739 return Builder.CreateCall(Callee: F, Args: Src0);
740 }
741 case AMDGPU::BI__builtin_amdgcn_fract:
742 case AMDGPU::BI__builtin_amdgcn_fractf:
743 case AMDGPU::BI__builtin_amdgcn_fracth:
744 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
745 IntrinsicID: Intrinsic::amdgcn_fract);
746 case AMDGPU::BI__builtin_amdgcn_lerp:
747 return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E,
748 IntrinsicID: Intrinsic::amdgcn_lerp);
749 case AMDGPU::BI__builtin_amdgcn_ubfe:
750 return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E,
751 IntrinsicID: Intrinsic::amdgcn_ubfe);
752 case AMDGPU::BI__builtin_amdgcn_sbfe:
753 return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E,
754 IntrinsicID: Intrinsic::amdgcn_sbfe);
755 case AMDGPU::BI__builtin_amdgcn_ballot_w32:
756 case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
757 llvm::Type *ResultType = ConvertType(T: E->getType());
758 llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0));
759 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_ballot, Tys: {ResultType});
760 return Builder.CreateCall(Callee: F, Args: {Src});
761 }
762 case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w32:
763 case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w64: {
764 llvm::Value *Src = EmitScalarExpr(E: E->getArg(Arg: 0));
765 Function *F =
766 CGM.getIntrinsic(IID: Intrinsic::amdgcn_inverse_ballot, Tys: {Src->getType()});
767 return Builder.CreateCall(Callee: F, Args: {Src});
768 }
769 case AMDGPU::BI__builtin_amdgcn_tanhf:
770 case AMDGPU::BI__builtin_amdgcn_tanhh:
771 case AMDGPU::BI__builtin_amdgcn_tanh_bf16:
772 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
773 IntrinsicID: Intrinsic::amdgcn_tanh);
774 case AMDGPU::BI__builtin_amdgcn_uicmp:
775 case AMDGPU::BI__builtin_amdgcn_uicmpl:
776 case AMDGPU::BI__builtin_amdgcn_sicmp:
777 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
778 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
779 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
780 llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2));
781
782 // FIXME-GFX10: How should 32 bit mask be handled?
783 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_icmp,
784 Tys: { Builder.getInt64Ty(), Src0->getType() });
785 return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 });
786 }
787 case AMDGPU::BI__builtin_amdgcn_fcmp:
788 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
789 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
790 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
791 llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2));
792
793 // FIXME-GFX10: How should 32 bit mask be handled?
794 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_fcmp,
795 Tys: { Builder.getInt64Ty(), Src0->getType() });
796 return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 });
797 }
798 case AMDGPU::BI__builtin_amdgcn_class:
799 case AMDGPU::BI__builtin_amdgcn_classf:
800 case AMDGPU::BI__builtin_amdgcn_classh:
801 return emitFPIntBuiltin(CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_class);
802 case AMDGPU::BI__builtin_amdgcn_fmed3f:
803 case AMDGPU::BI__builtin_amdgcn_fmed3h:
804 return emitBuiltinWithOneOverloadedType<3>(CGF&: *this, E,
805 IntrinsicID: Intrinsic::amdgcn_fmed3);
806 case AMDGPU::BI__builtin_amdgcn_ds_append:
807 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
808 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
809 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
810 Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
811 Function *F = CGM.getIntrinsic(IID: Intrin, Tys: { Src0->getType() });
812 return Builder.CreateCall(Callee: F, Args: { Src0, Builder.getFalse() });
813 }
814 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
815 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
816 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
817 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16:
818 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16:
819 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16:
820 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16:
821 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16:
822 case AMDGPU::BI__builtin_amdgcn_global_load_tr4_b64_v2i32:
823 case AMDGPU::BI__builtin_amdgcn_global_load_tr8_b64_v2i32:
824 case AMDGPU::BI__builtin_amdgcn_global_load_tr6_b96_v3i32:
825 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8i16:
826 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8f16:
827 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8bf16:
828 case AMDGPU::BI__builtin_amdgcn_ds_load_tr4_b64_v2i32:
829 case AMDGPU::BI__builtin_amdgcn_ds_load_tr8_b64_v2i32:
830 case AMDGPU::BI__builtin_amdgcn_ds_load_tr6_b96_v3i32:
831 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8i16:
832 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8f16:
833 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8bf16:
834 case AMDGPU::BI__builtin_amdgcn_ds_read_tr4_b64_v2i32:
835 case AMDGPU::BI__builtin_amdgcn_ds_read_tr8_b64_v2i32:
836 case AMDGPU::BI__builtin_amdgcn_ds_read_tr6_b96_v3i32:
837 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4f16:
838 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4bf16:
839 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4i16: {
840 Intrinsic::ID IID;
841 switch (BuiltinID) {
842 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
843 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
844 case AMDGPU::BI__builtin_amdgcn_global_load_tr8_b64_v2i32:
845 IID = Intrinsic::amdgcn_global_load_tr_b64;
846 break;
847 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
848 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16:
849 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16:
850 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16:
851 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16:
852 case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16:
853 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8i16:
854 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8f16:
855 case AMDGPU::BI__builtin_amdgcn_global_load_tr16_b128_v8bf16:
856 IID = Intrinsic::amdgcn_global_load_tr_b128;
857 break;
858 case AMDGPU::BI__builtin_amdgcn_global_load_tr4_b64_v2i32:
859 IID = Intrinsic::amdgcn_global_load_tr4_b64;
860 break;
861 case AMDGPU::BI__builtin_amdgcn_global_load_tr6_b96_v3i32:
862 IID = Intrinsic::amdgcn_global_load_tr6_b96;
863 break;
864 case AMDGPU::BI__builtin_amdgcn_ds_load_tr4_b64_v2i32:
865 IID = Intrinsic::amdgcn_ds_load_tr4_b64;
866 break;
867 case AMDGPU::BI__builtin_amdgcn_ds_load_tr6_b96_v3i32:
868 IID = Intrinsic::amdgcn_ds_load_tr6_b96;
869 break;
870 case AMDGPU::BI__builtin_amdgcn_ds_load_tr8_b64_v2i32:
871 IID = Intrinsic::amdgcn_ds_load_tr8_b64;
872 break;
873 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8i16:
874 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8f16:
875 case AMDGPU::BI__builtin_amdgcn_ds_load_tr16_b128_v8bf16:
876 IID = Intrinsic::amdgcn_ds_load_tr16_b128;
877 break;
878 case AMDGPU::BI__builtin_amdgcn_ds_read_tr4_b64_v2i32:
879 IID = Intrinsic::amdgcn_ds_read_tr4_b64;
880 break;
881 case AMDGPU::BI__builtin_amdgcn_ds_read_tr8_b64_v2i32:
882 IID = Intrinsic::amdgcn_ds_read_tr8_b64;
883 break;
884 case AMDGPU::BI__builtin_amdgcn_ds_read_tr6_b96_v3i32:
885 IID = Intrinsic::amdgcn_ds_read_tr6_b96;
886 break;
887 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4i16:
888 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4f16:
889 case AMDGPU::BI__builtin_amdgcn_ds_read_tr16_b64_v4bf16:
890 IID = Intrinsic::amdgcn_ds_read_tr16_b64;
891 break;
892 }
893 llvm::Type *LoadTy = ConvertType(T: E->getType());
894 llvm::Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0));
895 llvm::Function *F = CGM.getIntrinsic(IID, Tys: {LoadTy});
896 return Builder.CreateCall(Callee: F, Args: {Addr});
897 }
898 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
899 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
900 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
901 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
902 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
903 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
904
905 Intrinsic::ID IID;
906 switch (BuiltinID) {
907 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
908 IID = Intrinsic::amdgcn_global_load_monitor_b32;
909 break;
910 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
911 IID = Intrinsic::amdgcn_global_load_monitor_b64;
912 break;
913 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
914 IID = Intrinsic::amdgcn_global_load_monitor_b128;
915 break;
916 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
917 IID = Intrinsic::amdgcn_flat_load_monitor_b32;
918 break;
919 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
920 IID = Intrinsic::amdgcn_flat_load_monitor_b64;
921 break;
922 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
923 IID = Intrinsic::amdgcn_flat_load_monitor_b128;
924 break;
925 }
926
927 LLVMContext &Ctx = CGM.getLLVMContext();
928 llvm::Type *LoadTy = ConvertType(T: E->getType());
929 llvm::Value *Addr = EmitScalarExpr(E: E->getArg(Arg: 0));
930
931 auto *AOExpr = cast<llvm::ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 1)));
932 auto *ScopeExpr = cast<llvm::ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 2)));
933
934 auto Scope = static_cast<SyncScope>(ScopeExpr->getZExtValue());
935 llvm::AtomicOrdering AO = mapCABIAtomicOrdering(AO: AOExpr->getZExtValue());
936
937 StringRef ScopeStr = CGM.getTargetCodeGenInfo().getLLVMSyncScopeStr(
938 LangOpts: CGM.getLangOpts(), Scope, Ordering: AO);
939
940 llvm::MDNode *MD =
941 llvm::MDNode::get(Context&: Ctx, MDs: {llvm::MDString::get(Context&: Ctx, Str: ScopeStr)});
942 llvm::Value *ScopeMD = llvm::MetadataAsValue::get(Context&: Ctx, MD);
943 llvm::Function *F = CGM.getIntrinsic(IID, Tys: {LoadTy});
944 return Builder.CreateCall(Callee: F, Args: {Addr, AOExpr, ScopeMD});
945 }
946 case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
947 case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
948 case AMDGPU::BI__builtin_amdgcn_cluster_load_b128: {
949 Intrinsic::ID IID;
950 switch (BuiltinID) {
951 case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
952 IID = Intrinsic::amdgcn_cluster_load_b32;
953 break;
954 case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
955 IID = Intrinsic::amdgcn_cluster_load_b64;
956 break;
957 case AMDGPU::BI__builtin_amdgcn_cluster_load_b128:
958 IID = Intrinsic::amdgcn_cluster_load_b128;
959 break;
960 }
961 SmallVector<Value *, 3> Args;
962 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
963 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i)));
964 llvm::Function *F = CGM.getIntrinsic(IID, Tys: {ConvertType(T: E->getType())});
965 return Builder.CreateCall(Callee: F, Args: {Args});
966 }
967 case AMDGPU::BI__builtin_amdgcn_load_to_lds: {
968 // Should this have asan instrumentation?
969 return emitBuiltinWithOneOverloadedType<5>(CGF&: *this, E,
970 IntrinsicID: Intrinsic::amdgcn_load_to_lds);
971 }
972 case AMDGPU::BI__builtin_amdgcn_load_async_to_lds: {
973 // Should this have asan instrumentation?
974 return emitBuiltinWithOneOverloadedType<5>(
975 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_load_async_to_lds);
976 }
977 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
978 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
979 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
980 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
981 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B:
982 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B: {
983 Intrinsic::ID IID;
984 switch (BuiltinID) {
985 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
986 IID = Intrinsic::amdgcn_cooperative_atomic_load_32x4B;
987 break;
988 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
989 IID = Intrinsic::amdgcn_cooperative_atomic_store_32x4B;
990 break;
991 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
992 IID = Intrinsic::amdgcn_cooperative_atomic_load_16x8B;
993 break;
994 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
995 IID = Intrinsic::amdgcn_cooperative_atomic_store_16x8B;
996 break;
997 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B:
998 IID = Intrinsic::amdgcn_cooperative_atomic_load_8x16B;
999 break;
1000 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
1001 IID = Intrinsic::amdgcn_cooperative_atomic_store_8x16B;
1002 break;
1003 }
1004
1005 LLVMContext &Ctx = CGM.getLLVMContext();
1006 SmallVector<Value *, 5> Args;
1007 // last argument is a MD string
1008 const unsigned ScopeArg = E->getNumArgs() - 1;
1009 for (unsigned i = 0; i != ScopeArg; ++i)
1010 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i)));
1011 StringRef Arg = cast<StringLiteral>(Val: E->getArg(Arg: ScopeArg)->IgnoreParenCasts())
1012 ->getString();
1013 llvm::MDNode *MD = llvm::MDNode::get(Context&: Ctx, MDs: {llvm::MDString::get(Context&: Ctx, Str: Arg)});
1014 Args.push_back(Elt: llvm::MetadataAsValue::get(Context&: Ctx, MD));
1015 // Intrinsic is typed based on the pointer AS. Pointer is always the first
1016 // argument.
1017 llvm::Function *F = CGM.getIntrinsic(IID, Tys: {Args[0]->getType()});
1018 return Builder.CreateCall(Callee: F, Args: {Args});
1019 }
1020 case AMDGPU::BI__builtin_amdgcn_get_fpenv: {
1021 Function *F = CGM.getIntrinsic(IID: Intrinsic::get_fpenv,
1022 Tys: {llvm::Type::getInt64Ty(C&: getLLVMContext())});
1023 return Builder.CreateCall(Callee: F);
1024 }
1025 case AMDGPU::BI__builtin_amdgcn_set_fpenv: {
1026 Function *F = CGM.getIntrinsic(IID: Intrinsic::set_fpenv,
1027 Tys: {llvm::Type::getInt64Ty(C&: getLLVMContext())});
1028 llvm::Value *Env = EmitScalarExpr(E: E->getArg(Arg: 0));
1029 return Builder.CreateCall(Callee: F, Args: {Env});
1030 }
1031 case AMDGPU::BI__builtin_amdgcn_processor_is: {
1032 assert(CGM.getTriple().isSPIRV() &&
1033 "__builtin_amdgcn_processor_is should never reach CodeGen for "
1034 "concrete targets!");
1035 StringRef Proc = cast<clang::StringLiteral>(Val: E->getArg(Arg: 0))->getString();
1036 return GetAMDGPUPredicate(CGF&: *this, Name: "is." + Proc);
1037 }
1038 case AMDGPU::BI__builtin_amdgcn_is_invocable: {
1039 assert(CGM.getTriple().isSPIRV() &&
1040 "__builtin_amdgcn_is_invocable should never reach CodeGen for "
1041 "concrete targets!");
1042 auto *FD = cast<FunctionDecl>(
1043 Val: cast<DeclRefExpr>(Val: E->getArg(Arg: 0))->getReferencedDeclOfCallee());
1044 StringRef RF =
1045 getContext().BuiltinInfo.getRequiredFeatures(ID: FD->getBuiltinID());
1046 return GetAMDGPUPredicate(CGF&: *this, Name: "has." + RF);
1047 }
1048 case AMDGPU::BI__builtin_amdgcn_read_exec:
1049 return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int64Ty, ValueType: Int64Ty, isExecHi: false);
1050 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
1051 return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int32Ty, ValueType: Int32Ty, isExecHi: false);
1052 case AMDGPU::BI__builtin_amdgcn_read_exec_hi:
1053 return EmitAMDGCNBallotForExec(CGF&: *this, E, RegisterType: Int64Ty, ValueType: Int64Ty, isExecHi: true);
1054 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
1055 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
1056 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
1057 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
1058 llvm::Value *NodePtr = EmitScalarExpr(E: E->getArg(Arg: 0));
1059 llvm::Value *RayExtent = EmitScalarExpr(E: E->getArg(Arg: 1));
1060 llvm::Value *RayOrigin = EmitScalarExpr(E: E->getArg(Arg: 2));
1061 llvm::Value *RayDir = EmitScalarExpr(E: E->getArg(Arg: 3));
1062 llvm::Value *RayInverseDir = EmitScalarExpr(E: E->getArg(Arg: 4));
1063 llvm::Value *TextureDescr = EmitScalarExpr(E: E->getArg(Arg: 5));
1064
1065 // The builtins take these arguments as vec4 where the last element is
1066 // ignored. The intrinsic takes them as vec3.
1067 RayOrigin = Builder.CreateShuffleVector(V1: RayOrigin, V2: RayOrigin,
1068 Mask: {0, 1, 2});
1069 RayDir =
1070 Builder.CreateShuffleVector(V1: RayDir, V2: RayDir, Mask: {0, 1, 2});
1071 RayInverseDir = Builder.CreateShuffleVector(V1: RayInverseDir, V2: RayInverseDir,
1072 Mask: {0, 1, 2});
1073
1074 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_image_bvh_intersect_ray,
1075 Tys: {NodePtr->getType(), RayDir->getType()});
1076 return Builder.CreateCall(Callee: F, Args: {NodePtr, RayExtent, RayOrigin, RayDir,
1077 RayInverseDir, TextureDescr});
1078 }
1079 case AMDGPU::BI__builtin_amdgcn_image_bvh8_intersect_ray:
1080 case AMDGPU::BI__builtin_amdgcn_image_bvh_dual_intersect_ray: {
1081 Intrinsic::ID IID;
1082 switch (BuiltinID) {
1083 case AMDGPU::BI__builtin_amdgcn_image_bvh8_intersect_ray:
1084 IID = Intrinsic::amdgcn_image_bvh8_intersect_ray;
1085 break;
1086 case AMDGPU::BI__builtin_amdgcn_image_bvh_dual_intersect_ray:
1087 IID = Intrinsic::amdgcn_image_bvh_dual_intersect_ray;
1088 break;
1089 }
1090 llvm::Value *NodePtr = EmitScalarExpr(E: E->getArg(Arg: 0));
1091 llvm::Value *RayExtent = EmitScalarExpr(E: E->getArg(Arg: 1));
1092 llvm::Value *InstanceMask = EmitScalarExpr(E: E->getArg(Arg: 2));
1093 llvm::Value *RayOrigin = EmitScalarExpr(E: E->getArg(Arg: 3));
1094 llvm::Value *RayDir = EmitScalarExpr(E: E->getArg(Arg: 4));
1095 llvm::Value *Offset = EmitScalarExpr(E: E->getArg(Arg: 5));
1096 llvm::Value *TextureDescr = EmitScalarExpr(E: E->getArg(Arg: 6));
1097
1098 Address RetRayOriginPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 7));
1099 Address RetRayDirPtr = EmitPointerWithAlignment(Addr: E->getArg(Arg: 8));
1100
1101 llvm::Function *IntrinsicFunc = CGM.getIntrinsic(IID);
1102
1103 llvm::CallInst *CI = Builder.CreateCall(
1104 Callee: IntrinsicFunc, Args: {NodePtr, RayExtent, InstanceMask, RayOrigin, RayDir,
1105 Offset, TextureDescr});
1106
1107 llvm::Value *RetVData = Builder.CreateExtractValue(Agg: CI, Idxs: 0);
1108 llvm::Value *RetRayOrigin = Builder.CreateExtractValue(Agg: CI, Idxs: 1);
1109 llvm::Value *RetRayDir = Builder.CreateExtractValue(Agg: CI, Idxs: 2);
1110
1111 Builder.CreateStore(Val: RetRayOrigin, Addr: RetRayOriginPtr);
1112 Builder.CreateStore(Val: RetRayDir, Addr: RetRayDirPtr);
1113
1114 return RetVData;
1115 }
1116
1117 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn:
1118 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push4_pop1_rtn:
1119 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push8_pop1_rtn:
1120 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push8_pop2_rtn: {
1121 Intrinsic::ID IID;
1122 switch (BuiltinID) {
1123 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn:
1124 IID = Intrinsic::amdgcn_ds_bvh_stack_rtn;
1125 break;
1126 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push4_pop1_rtn:
1127 IID = Intrinsic::amdgcn_ds_bvh_stack_push4_pop1_rtn;
1128 break;
1129 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push8_pop1_rtn:
1130 IID = Intrinsic::amdgcn_ds_bvh_stack_push8_pop1_rtn;
1131 break;
1132 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_push8_pop2_rtn:
1133 IID = Intrinsic::amdgcn_ds_bvh_stack_push8_pop2_rtn;
1134 break;
1135 }
1136
1137 SmallVector<Value *, 4> Args;
1138 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
1139 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i)));
1140
1141 Function *F = CGM.getIntrinsic(IID);
1142 Value *Call = Builder.CreateCall(Callee: F, Args);
1143 Value *Rtn = Builder.CreateExtractValue(Agg: Call, Idxs: 0);
1144 Value *A = Builder.CreateExtractValue(Agg: Call, Idxs: 1);
1145 llvm::Type *RetTy = ConvertType(T: E->getType());
1146 Value *I0 = Builder.CreateInsertElement(Vec: PoisonValue::get(T: RetTy), NewElt: Rtn,
1147 Idx: (uint64_t)0);
1148 // ds_bvh_stack_push8_pop2_rtn returns {i64, i32} but the builtin returns
1149 // <2 x i64>, zext the second value.
1150 if (A->getType()->getPrimitiveSizeInBits() <
1151 RetTy->getScalarType()->getPrimitiveSizeInBits())
1152 A = Builder.CreateZExt(V: A, DestTy: RetTy->getScalarType());
1153
1154 return Builder.CreateInsertElement(Vec: I0, NewElt: A, Idx: 1);
1155 }
1156 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
1157 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
1158 return emitAMDGCNImageOverloadedReturnType(
1159 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_1d, IsImageStore: false);
1160 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
1161 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32:
1162 return emitAMDGCNImageOverloadedReturnType(
1163 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_1darray, IsImageStore: false);
1164 case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32:
1165 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32:
1166 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32:
1167 return emitAMDGCNImageOverloadedReturnType(
1168 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_2d, IsImageStore: false);
1169 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32:
1170 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32:
1171 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32:
1172 return emitAMDGCNImageOverloadedReturnType(
1173 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_2darray, IsImageStore: false);
1174 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32:
1175 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32:
1176 return emitAMDGCNImageOverloadedReturnType(
1177 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_3d, IsImageStore: false);
1178 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32:
1179 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32:
1180 return emitAMDGCNImageOverloadedReturnType(
1181 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_cube, IsImageStore: false);
1182 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32:
1183 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32:
1184 return emitAMDGCNImageOverloadedReturnType(
1185 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_1d, IsImageStore: false);
1186 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32:
1187 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32:
1188 return emitAMDGCNImageOverloadedReturnType(
1189 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_1darray, IsImageStore: false);
1190 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32:
1191 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32:
1192 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32:
1193 return emitAMDGCNImageOverloadedReturnType(
1194 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_2d, IsImageStore: false);
1195 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32:
1196 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32:
1197 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32:
1198 return emitAMDGCNImageOverloadedReturnType(
1199 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_2darray, IsImageStore: false);
1200 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32:
1201 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32:
1202 return emitAMDGCNImageOverloadedReturnType(
1203 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_3d, IsImageStore: false);
1204 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32:
1205 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32:
1206 return emitAMDGCNImageOverloadedReturnType(
1207 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_load_mip_cube, IsImageStore: false);
1208 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32:
1209 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32:
1210 return emitAMDGCNImageOverloadedReturnType(
1211 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_1d, IsImageStore: true);
1212 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32:
1213 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32:
1214 return emitAMDGCNImageOverloadedReturnType(
1215 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_1darray, IsImageStore: true);
1216 case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32:
1217 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32:
1218 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32:
1219 return emitAMDGCNImageOverloadedReturnType(
1220 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_2d, IsImageStore: true);
1221 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32:
1222 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32:
1223 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32:
1224 return emitAMDGCNImageOverloadedReturnType(
1225 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_2darray, IsImageStore: true);
1226 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32:
1227 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32:
1228 return emitAMDGCNImageOverloadedReturnType(
1229 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_3d, IsImageStore: true);
1230 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32:
1231 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32:
1232 return emitAMDGCNImageOverloadedReturnType(
1233 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_cube, IsImageStore: true);
1234 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32:
1235 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32:
1236 return emitAMDGCNImageOverloadedReturnType(
1237 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_1d, IsImageStore: true);
1238 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32:
1239 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32:
1240 return emitAMDGCNImageOverloadedReturnType(
1241 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_1darray, IsImageStore: true);
1242 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32:
1243 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32:
1244 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32:
1245 return emitAMDGCNImageOverloadedReturnType(
1246 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_2d, IsImageStore: true);
1247 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32:
1248 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32:
1249 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32:
1250 return emitAMDGCNImageOverloadedReturnType(
1251 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_2darray, IsImageStore: true);
1252 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32:
1253 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32:
1254 return emitAMDGCNImageOverloadedReturnType(
1255 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_3d, IsImageStore: true);
1256 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32:
1257 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32:
1258 return emitAMDGCNImageOverloadedReturnType(
1259 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_store_mip_cube, IsImageStore: true);
1260 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32:
1261 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32:
1262 return emitAMDGCNImageOverloadedReturnType(
1263 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_1d, IsImageStore: false);
1264 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32:
1265 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32:
1266 return emitAMDGCNImageOverloadedReturnType(
1267 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_1darray, IsImageStore: false);
1268 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32:
1269 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32:
1270 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32:
1271 return emitAMDGCNImageOverloadedReturnType(
1272 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_2d, IsImageStore: false);
1273 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32:
1274 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32:
1275 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32:
1276 return emitAMDGCNImageOverloadedReturnType(
1277 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_2darray, IsImageStore: false);
1278 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32:
1279 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32:
1280 return emitAMDGCNImageOverloadedReturnType(
1281 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_3d, IsImageStore: false);
1282 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32:
1283 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32:
1284 return emitAMDGCNImageOverloadedReturnType(
1285 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_cube, IsImageStore: false);
1286 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32:
1287 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32:
1288 return emitAMDGCNImageOverloadedReturnType(
1289 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_1d, IsImageStore: false);
1290 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32:
1291 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32:
1292 return emitAMDGCNImageOverloadedReturnType(
1293 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_1d, IsImageStore: false);
1294 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32:
1295 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32:
1296 return emitAMDGCNImageOverloadedReturnType(
1297 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_d_1d, IsImageStore: false);
1298 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32:
1299 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32:
1300 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32:
1301 return emitAMDGCNImageOverloadedReturnType(
1302 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_2d, IsImageStore: false);
1303 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32:
1304 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32:
1305 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32:
1306 return emitAMDGCNImageOverloadedReturnType(
1307 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_2d, IsImageStore: false);
1308 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32:
1309 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32:
1310 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32:
1311 return emitAMDGCNImageOverloadedReturnType(
1312 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_d_2d, IsImageStore: false);
1313 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32:
1314 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32:
1315 return emitAMDGCNImageOverloadedReturnType(
1316 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_3d, IsImageStore: false);
1317 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32:
1318 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32:
1319 return emitAMDGCNImageOverloadedReturnType(
1320 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_3d, IsImageStore: false);
1321 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32:
1322 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32:
1323 return emitAMDGCNImageOverloadedReturnType(
1324 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_d_3d, IsImageStore: false);
1325 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32:
1326 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32:
1327 return emitAMDGCNImageOverloadedReturnType(
1328 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_cube, IsImageStore: false);
1329 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32:
1330 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32:
1331 return emitAMDGCNImageOverloadedReturnType(
1332 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_cube, IsImageStore: false);
1333 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32:
1334 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32:
1335 return emitAMDGCNImageOverloadedReturnType(
1336 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_1darray, IsImageStore: false);
1337 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32:
1338 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32:
1339 return emitAMDGCNImageOverloadedReturnType(
1340 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_1darray, IsImageStore: false);
1341 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32:
1342 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32:
1343 return emitAMDGCNImageOverloadedReturnType(
1344 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_d_1darray, IsImageStore: false);
1345 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32:
1346 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32:
1347 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32:
1348 return emitAMDGCNImageOverloadedReturnType(
1349 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_lz_2darray, IsImageStore: false);
1350 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32:
1351 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32:
1352 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32:
1353 return emitAMDGCNImageOverloadedReturnType(
1354 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_l_2darray, IsImageStore: false);
1355 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32:
1356 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32:
1357 case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32:
1358 return emitAMDGCNImageOverloadedReturnType(
1359 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_sample_d_2darray, IsImageStore: false);
1360 case clang::AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32:
1361 return emitAMDGCNImageOverloadedReturnType(
1362 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_image_gather4_lz_2d, IsImageStore: false);
1363 case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
1364 case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
1365 llvm::FixedVectorType *VT = FixedVectorType::get(ElementType: Builder.getInt32Ty(), NumElts: 8);
1366 Function *F = CGM.getIntrinsic(
1367 IID: BuiltinID == AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4
1368 ? Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4
1369 : Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4,
1370 Tys: {VT, VT});
1371
1372 SmallVector<Value *, 9> Args;
1373 for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
1374 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: I)));
1375 return Builder.CreateCall(Callee: F, Args);
1376 }
1377 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
1378 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32:
1379 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
1380 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64:
1381 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
1382 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32:
1383 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
1384 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64:
1385 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
1386 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
1387 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
1388 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
1389 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
1390 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
1391 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
1392 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
1393 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12:
1394 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12:
1395 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12:
1396 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12:
1397 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12:
1398 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12:
1399 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12:
1400 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12:
1401 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12:
1402 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12:
1403 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12:
1404 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12:
1405 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12:
1406 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12:
1407 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12:
1408 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12:
1409 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12:
1410 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12:
1411 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12:
1412 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12:
1413 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12:
1414 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12:
1415 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32:
1416 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64:
1417 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32:
1418 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64:
1419 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32:
1420 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64:
1421 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32:
1422 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64:
1423 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32:
1424 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64:
1425 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32:
1426 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64:
1427 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32:
1428 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64:
1429 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32:
1430 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64:
1431 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32:
1432 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64:
1433 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32:
1434 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64:
1435 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32:
1436 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64:
1437 // GFX1250 WMMA builtins
1438 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x4_f32:
1439 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_bf16:
1440 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_f16:
1441 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x32_f16:
1442 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x32_bf16:
1443 case AMDGPU::BI__builtin_amdgcn_wmma_bf16f32_16x16x32_bf16:
1444 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_fp8_fp8:
1445 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_fp8_bf8:
1446 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_bf8_fp8:
1447 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_bf8_bf8:
1448 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_fp8_fp8:
1449 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_fp8_bf8:
1450 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_bf8_fp8:
1451 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_bf8_bf8:
1452 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_fp8_fp8:
1453 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_fp8_bf8:
1454 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_bf8_fp8:
1455 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_bf8_bf8:
1456 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_fp8_fp8:
1457 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_fp8_bf8:
1458 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_fp8:
1459 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_bf8:
1460 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8:
1461 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_f8f6f4:
1462 case AMDGPU::BI__builtin_amdgcn_wmma_f32_32x16x128_f4:
1463 case AMDGPU::BI__builtin_amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
1464 case AMDGPU::BI__builtin_amdgcn_wmma_scale16_f32_16x16x128_f8f6f4:
1465 case AMDGPU::BI__builtin_amdgcn_wmma_scale_f32_32x16x128_f4:
1466 case AMDGPU::BI__builtin_amdgcn_wmma_scale16_f32_32x16x128_f4:
1467 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_f16:
1468 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_bf16:
1469 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x64_f16:
1470 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x64_bf16:
1471 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16f32_16x16x64_bf16:
1472 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_fp8_fp8:
1473 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_fp8_bf8:
1474 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_bf8_fp8:
1475 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_bf8_bf8:
1476 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_fp8_fp8:
1477 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_fp8_bf8:
1478 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_bf8_fp8:
1479 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_bf8_bf8:
1480 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8: {
1481
1482 // These operations perform a matrix multiplication and accumulation of
1483 // the form:
1484 // D = A * B + C
1485 // We need to specify one type for matrices AB and one for matrices CD.
1486 // Sparse matrix operations can have different types for A and B as well as
1487 // an additional type for sparsity index.
1488 // Destination type should be put before types used for source operands.
1489 SmallVector<unsigned, 2> ArgsForMatchingMatrixTypes;
1490 // On GFX12, the intrinsics with 16-bit accumulator use a packed layout.
1491 // There is no need for the variable opsel argument, so always set it to
1492 // "false".
1493 bool AppendFalseForOpselArg = false;
1494 unsigned BuiltinWMMAOp;
1495 // Need return type when D and C are of different types.
1496 bool NeedReturnType = false;
1497 // Need to remove unused neg modifiers.
1498 bool RemoveABNeg = false;
1499
1500 switch (BuiltinID) {
1501 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
1502 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
1503 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12:
1504 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12:
1505 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1506 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16;
1507 break;
1508 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
1509 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
1510 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12:
1511 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12:
1512 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1513 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16;
1514 break;
1515 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12:
1516 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12:
1517 AppendFalseForOpselArg = true;
1518 [[fallthrough]];
1519 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
1520 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
1521 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1522 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16;
1523 break;
1524 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12:
1525 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12:
1526 AppendFalseForOpselArg = true;
1527 [[fallthrough]];
1528 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
1529 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
1530 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1531 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16;
1532 break;
1533 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32:
1534 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64:
1535 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1536 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16_tied;
1537 break;
1538 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32:
1539 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64:
1540 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1541 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16_tied;
1542 break;
1543 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
1544 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
1545 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12:
1546 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12:
1547 ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
1548 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8;
1549 break;
1550 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
1551 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
1552 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12:
1553 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12:
1554 ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
1555 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4;
1556 break;
1557 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12:
1558 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12:
1559 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1560 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_fp8;
1561 break;
1562 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12:
1563 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12:
1564 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1565 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_bf8;
1566 break;
1567 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12:
1568 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12:
1569 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1570 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_fp8;
1571 break;
1572 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12:
1573 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12:
1574 ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
1575 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_bf8;
1576 break;
1577 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12:
1578 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12:
1579 ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
1580 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x32_iu4;
1581 break;
1582 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32:
1583 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64:
1584 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1585 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_f16;
1586 break;
1587 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32:
1588 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64:
1589 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1590 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf16;
1591 break;
1592 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32:
1593 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64:
1594 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1595 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x32_f16;
1596 break;
1597 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32:
1598 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64:
1599 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1600 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_bf16_16x16x32_bf16;
1601 break;
1602 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32:
1603 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64:
1604 ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
1605 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu8;
1606 break;
1607 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32:
1608 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64:
1609 ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
1610 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu4;
1611 break;
1612 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32:
1613 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64:
1614 ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
1615 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x64_iu4;
1616 break;
1617 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32:
1618 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64:
1619 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1620 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_fp8;
1621 break;
1622 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32:
1623 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64:
1624 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1625 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_bf8;
1626 break;
1627 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32:
1628 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64:
1629 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1630 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_fp8;
1631 break;
1632 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32:
1633 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64:
1634 ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
1635 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_bf8;
1636 break;
1637 // GFX1250 WMMA builtins
1638 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x4_f32:
1639 ArgsForMatchingMatrixTypes = {3, 0};
1640 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x4_f32;
1641 RemoveABNeg = true;
1642 break;
1643 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_bf16:
1644 ArgsForMatchingMatrixTypes = {3, 0};
1645 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x32_bf16;
1646 RemoveABNeg = true;
1647 break;
1648 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_f16:
1649 ArgsForMatchingMatrixTypes = {3, 0};
1650 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x32_f16;
1651 RemoveABNeg = true;
1652 break;
1653 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x32_f16:
1654 ArgsForMatchingMatrixTypes = {3, 0};
1655 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x32_f16;
1656 RemoveABNeg = true;
1657 break;
1658 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x32_bf16:
1659 ArgsForMatchingMatrixTypes = {3, 0};
1660 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x32_bf16;
1661 RemoveABNeg = true;
1662 break;
1663 case AMDGPU::BI__builtin_amdgcn_wmma_bf16f32_16x16x32_bf16:
1664 NeedReturnType = true;
1665 ArgsForMatchingMatrixTypes = {0, 3};
1666 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16f32_16x16x32_bf16;
1667 RemoveABNeg = true;
1668 break;
1669 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_fp8_fp8:
1670 ArgsForMatchingMatrixTypes = {3, 0};
1671 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x64_fp8_fp8;
1672 break;
1673 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_fp8_bf8:
1674 ArgsForMatchingMatrixTypes = {3, 0};
1675 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x64_fp8_bf8;
1676 break;
1677 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_bf8_fp8:
1678 ArgsForMatchingMatrixTypes = {3, 0};
1679 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x64_bf8_fp8;
1680 break;
1681 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x64_bf8_bf8:
1682 ArgsForMatchingMatrixTypes = {3, 0};
1683 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x64_bf8_bf8;
1684 break;
1685 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_fp8_fp8:
1686 ArgsForMatchingMatrixTypes = {3, 0};
1687 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x64_fp8_fp8;
1688 break;
1689 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_fp8_bf8:
1690 ArgsForMatchingMatrixTypes = {3, 0};
1691 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x64_fp8_bf8;
1692 break;
1693 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_bf8_fp8:
1694 ArgsForMatchingMatrixTypes = {3, 0};
1695 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x64_bf8_fp8;
1696 break;
1697 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x64_bf8_bf8:
1698 ArgsForMatchingMatrixTypes = {3, 0};
1699 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x64_bf8_bf8;
1700 break;
1701 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_fp8_fp8:
1702 ArgsForMatchingMatrixTypes = {3, 0};
1703 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x128_fp8_fp8;
1704 break;
1705 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_fp8_bf8:
1706 ArgsForMatchingMatrixTypes = {3, 0};
1707 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x128_fp8_bf8;
1708 break;
1709 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_bf8_fp8:
1710 ArgsForMatchingMatrixTypes = {3, 0};
1711 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x128_bf8_fp8;
1712 break;
1713 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x128_bf8_bf8:
1714 ArgsForMatchingMatrixTypes = {3, 0};
1715 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x128_bf8_bf8;
1716 break;
1717 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_fp8_fp8:
1718 ArgsForMatchingMatrixTypes = {3, 0};
1719 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_fp8_fp8;
1720 break;
1721 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_fp8_bf8:
1722 ArgsForMatchingMatrixTypes = {3, 0};
1723 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_fp8_bf8;
1724 break;
1725 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_fp8:
1726 ArgsForMatchingMatrixTypes = {3, 0};
1727 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_bf8_fp8;
1728 break;
1729 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_bf8:
1730 ArgsForMatchingMatrixTypes = {3, 0};
1731 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_bf8_bf8;
1732 break;
1733 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8:
1734 ArgsForMatchingMatrixTypes = {4, 1};
1735 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x64_iu8;
1736 break;
1737 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_f8f6f4:
1738 ArgsForMatchingMatrixTypes = {5, 1, 3};
1739 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4;
1740 break;
1741 case AMDGPU::BI__builtin_amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
1742 ArgsForMatchingMatrixTypes = {5, 1, 3};
1743 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4;
1744 break;
1745 case AMDGPU::BI__builtin_amdgcn_wmma_scale16_f32_16x16x128_f8f6f4:
1746 ArgsForMatchingMatrixTypes = {5, 1, 3};
1747 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4;
1748 break;
1749 case AMDGPU::BI__builtin_amdgcn_wmma_f32_32x16x128_f4:
1750 ArgsForMatchingMatrixTypes = {3, 0, 1};
1751 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_32x16x128_f4;
1752 break;
1753 case AMDGPU::BI__builtin_amdgcn_wmma_scale_f32_32x16x128_f4:
1754 ArgsForMatchingMatrixTypes = {3, 0, 1};
1755 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_scale_f32_32x16x128_f4;
1756 break;
1757 case AMDGPU::BI__builtin_amdgcn_wmma_scale16_f32_32x16x128_f4:
1758 ArgsForMatchingMatrixTypes = {3, 0, 1};
1759 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_scale16_f32_32x16x128_f4;
1760 break;
1761 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_f16:
1762 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1763 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x64_f16;
1764 break;
1765 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_bf16:
1766 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1767 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x64_bf16;
1768 break;
1769 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x64_f16:
1770 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1771 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x64_f16;
1772 break;
1773 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x64_bf16:
1774 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1775 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_bf16_16x16x64_bf16;
1776 break;
1777 case AMDGPU::BI__builtin_amdgcn_swmmac_bf16f32_16x16x64_bf16:
1778 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1779 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_bf16f32_16x16x64_bf16;
1780 break;
1781 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_fp8_fp8:
1782 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1783 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x128_fp8_fp8;
1784 break;
1785 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_fp8_bf8:
1786 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1787 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x128_fp8_bf8;
1788 break;
1789 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_bf8_fp8:
1790 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1791 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x128_bf8_fp8;
1792 break;
1793 case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x128_bf8_bf8:
1794 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1795 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x128_bf8_bf8;
1796 break;
1797 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_fp8_fp8:
1798 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1799 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x128_fp8_fp8;
1800 break;
1801 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_fp8_bf8:
1802 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1803 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x128_fp8_bf8;
1804 break;
1805 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_bf8_fp8:
1806 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1807 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x128_bf8_fp8;
1808 break;
1809 case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x128_bf8_bf8:
1810 ArgsForMatchingMatrixTypes = {2, 0, 1, 3};
1811 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x128_bf8_bf8;
1812 break;
1813 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8:
1814 ArgsForMatchingMatrixTypes = {4, 1, 3, 5};
1815 BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x128_iu8;
1816 break;
1817 }
1818
1819 SmallVector<Value *, 6> Args;
1820 for (int i = 0, e = E->getNumArgs(); i != e; ++i) {
1821 // Remove unused neg modifiers.
1822 if (RemoveABNeg && (i == 0 || i == 2))
1823 continue;
1824 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: i)));
1825 }
1826 if (AppendFalseForOpselArg)
1827 Args.push_back(Elt: Builder.getFalse());
1828
1829 // Handle the optional clamp argument of the following two builtins.
1830 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8) {
1831 if (Args.size() == 7)
1832 Args.push_back(Elt: Builder.getFalse());
1833 assert(Args.size() == 8 && "Expected 8 arguments");
1834 Args[7] = Builder.CreateZExtOrTrunc(V: Args[7], DestTy: Builder.getInt1Ty());
1835 } else if (BuiltinID ==
1836 AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8) {
1837 if (Args.size() == 8)
1838 Args.push_back(Elt: Builder.getFalse());
1839 assert(Args.size() == 9 && "Expected 9 arguments");
1840 Args[8] = Builder.CreateZExtOrTrunc(V: Args[8], DestTy: Builder.getInt1Ty());
1841 }
1842
1843 SmallVector<llvm::Type *, 6> ArgTypes;
1844 if (NeedReturnType)
1845 ArgTypes.push_back(Elt: ConvertType(T: E->getType()));
1846 for (auto ArgIdx : ArgsForMatchingMatrixTypes)
1847 ArgTypes.push_back(Elt: Args[ArgIdx]->getType());
1848
1849 Function *F = CGM.getIntrinsic(IID: BuiltinWMMAOp, Tys: ArgTypes);
1850 return Builder.CreateCall(Callee: F, Args);
1851 }
1852 // amdgcn workgroup size
1853 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
1854 return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 0);
1855 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
1856 return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 1);
1857 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
1858 return EmitAMDGPUWorkGroupSize(CGF&: *this, Index: 2);
1859
1860 // amdgcn grid size
1861 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
1862 return EmitAMDGPUGridSize(CGF&: *this, Index: 0);
1863 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
1864 return EmitAMDGPUGridSize(CGF&: *this, Index: 1);
1865 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
1866 return EmitAMDGPUGridSize(CGF&: *this, Index: 2);
1867
1868 // r600 intrinsics
1869 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
1870 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
1871 return emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E,
1872 IntrinsicID: Intrinsic::r600_recipsqrt_ieee);
1873 case AMDGPU::BI__builtin_amdgcn_alignbit: {
1874 llvm::Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
1875 llvm::Value *Src1 = EmitScalarExpr(E: E->getArg(Arg: 1));
1876 llvm::Value *Src2 = EmitScalarExpr(E: E->getArg(Arg: 2));
1877 Function *F = CGM.getIntrinsic(IID: Intrinsic::fshr, Tys: Src0->getType());
1878 return Builder.CreateCall(Callee: F, Args: { Src0, Src1, Src2 });
1879 }
1880 case AMDGPU::BI__builtin_amdgcn_fence: {
1881 ProcessOrderScopeAMDGCN(Order: EmitScalarExpr(E: E->getArg(Arg: 0)),
1882 Scope: EmitScalarExpr(E: E->getArg(Arg: 1)), AO, SSID);
1883 FenceInst *Fence = Builder.CreateFence(Ordering: AO, SSID);
1884 if (E->getNumArgs() > 2)
1885 AddAMDGPUFenceAddressSpaceMMRA(Inst: Fence, E);
1886 return Fence;
1887 }
1888 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
1889 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
1890 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
1891 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
1892 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
1893 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
1894 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
1895 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16:
1896 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
1897 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
1898 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
1899 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
1900 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
1901 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
1902 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
1903 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
1904 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
1905 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
1906 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
1907 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
1908 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
1909 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
1910 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
1911 llvm::AtomicRMWInst::BinOp BinOp;
1912 switch (BuiltinID) {
1913 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
1914 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
1915 BinOp = llvm::AtomicRMWInst::UIncWrap;
1916 break;
1917 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
1918 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
1919 BinOp = llvm::AtomicRMWInst::UDecWrap;
1920 break;
1921 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
1922 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
1923 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
1924 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
1925 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16:
1926 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
1927 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
1928 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
1929 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
1930 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
1931 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
1932 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
1933 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
1934 BinOp = llvm::AtomicRMWInst::FAdd;
1935 break;
1936 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
1937 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
1938 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
1939 BinOp = llvm::AtomicRMWInst::FMin;
1940 break;
1941 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
1942 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
1943 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
1944 BinOp = llvm::AtomicRMWInst::FMax;
1945 break;
1946 }
1947
1948 Address Ptr = CheckAtomicAlignment(CGF&: *this, E);
1949 Value *Val = EmitScalarExpr(E: E->getArg(Arg: 1));
1950 llvm::Type *OrigTy = Val->getType();
1951 QualType PtrTy = E->getArg(Arg: 0)->IgnoreImpCasts()->getType();
1952
1953 bool Volatile;
1954
1955 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_faddf ||
1956 BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fminf ||
1957 BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fmaxf) {
1958 // __builtin_amdgcn_ds_faddf/fminf/fmaxf has an explicit volatile argument
1959 Volatile =
1960 cast<ConstantInt>(Val: EmitScalarExpr(E: E->getArg(Arg: 4)))->getZExtValue();
1961 } else {
1962 // Infer volatile from the passed type.
1963 Volatile =
1964 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1965 }
1966
1967 if (E->getNumArgs() >= 4) {
1968 // Some of the builtins have explicit ordering and scope arguments.
1969 ProcessOrderScopeAMDGCN(Order: EmitScalarExpr(E: E->getArg(Arg: 2)),
1970 Scope: EmitScalarExpr(E: E->getArg(Arg: 3)), AO, SSID);
1971 } else {
1972 // Most of the builtins do not have syncscope/order arguments. For DS
1973 // atomics the scope doesn't really matter, as they implicitly operate at
1974 // workgroup scope.
1975 //
1976 // The global/flat cases need to use agent scope to consistently produce
1977 // the native instruction instead of a cmpxchg expansion.
1978 if (getTarget().getTriple().isSPIRV())
1979 SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: "device");
1980 else
1981 SSID = getLLVMContext().getOrInsertSyncScopeID(SSN: "agent");
1982 AO = AtomicOrdering::Monotonic;
1983
1984 // The v2bf16 builtin uses i16 instead of a natural bfloat type.
1985 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16 ||
1986 BuiltinID == AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16 ||
1987 BuiltinID == AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16) {
1988 llvm::Type *V2BF16Ty = FixedVectorType::get(
1989 ElementType: llvm::Type::getBFloatTy(C&: Builder.getContext()), NumElts: 2);
1990 Val = Builder.CreateBitCast(V: Val, DestTy: V2BF16Ty);
1991 }
1992 }
1993
1994 llvm::AtomicRMWInst *RMW =
1995 Builder.CreateAtomicRMW(Op: BinOp, Addr: Ptr, Val, Ordering: AO, SSID);
1996 if (Volatile)
1997 RMW->setVolatile(true);
1998
1999 unsigned AddrSpace = Ptr.getType()->getAddressSpace();
2000 if (AddrSpace != llvm::AMDGPUAS::LOCAL_ADDRESS) {
2001 // Most targets require "amdgpu.no.fine.grained.memory" to emit the native
2002 // instruction for flat and global operations.
2003 llvm::MDTuple *EmptyMD = MDNode::get(Context&: getLLVMContext(), MDs: {});
2004 RMW->setMetadata(Kind: "amdgpu.no.fine.grained.memory", Node: EmptyMD);
2005
2006 // Most targets require "amdgpu.ignore.denormal.mode" to emit the native
2007 // instruction, but this only matters for float fadd.
2008 if (BinOp == llvm::AtomicRMWInst::FAdd && Val->getType()->isFloatTy())
2009 RMW->setMetadata(Kind: "amdgpu.ignore.denormal.mode", Node: EmptyMD);
2010 }
2011
2012 return Builder.CreateBitCast(V: RMW, DestTy: OrigTy);
2013 }
2014 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
2015 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
2016 llvm::Value *Arg = EmitScalarExpr(E: E->getArg(Arg: 0));
2017 llvm::Type *ResultType = ConvertType(T: E->getType());
2018 // s_sendmsg_rtn is mangled using return type only.
2019 Function *F =
2020 CGM.getIntrinsic(IID: Intrinsic::amdgcn_s_sendmsg_rtn, Tys: {ResultType});
2021 return Builder.CreateCall(Callee: F, Args: {Arg});
2022 }
2023 case AMDGPU::BI__builtin_amdgcn_permlane16_swap:
2024 case AMDGPU::BI__builtin_amdgcn_permlane32_swap: {
2025 // Because builtin types are limited, and the intrinsic uses a struct/pair
2026 // output, marshal the pair-of-i32 to <2 x i32>.
2027 Value *VDstOld = EmitScalarExpr(E: E->getArg(Arg: 0));
2028 Value *VSrcOld = EmitScalarExpr(E: E->getArg(Arg: 1));
2029 Value *FI = EmitScalarExpr(E: E->getArg(Arg: 2));
2030 Value *BoundCtrl = EmitScalarExpr(E: E->getArg(Arg: 3));
2031 Function *F =
2032 CGM.getIntrinsic(IID: BuiltinID == AMDGPU::BI__builtin_amdgcn_permlane16_swap
2033 ? Intrinsic::amdgcn_permlane16_swap
2034 : Intrinsic::amdgcn_permlane32_swap);
2035 llvm::CallInst *Call =
2036 Builder.CreateCall(Callee: F, Args: {VDstOld, VSrcOld, FI, BoundCtrl});
2037
2038 llvm::Value *Elt0 = Builder.CreateExtractValue(Agg: Call, Idxs: 0);
2039 llvm::Value *Elt1 = Builder.CreateExtractValue(Agg: Call, Idxs: 1);
2040
2041 llvm::Type *ResultType = ConvertType(T: E->getType());
2042
2043 llvm::Value *Insert0 = Builder.CreateInsertElement(
2044 Vec: llvm::PoisonValue::get(T: ResultType), NewElt: Elt0, UINT64_C(0));
2045 llvm::Value *AsVector =
2046 Builder.CreateInsertElement(Vec: Insert0, NewElt: Elt1, UINT64_C(1));
2047 return AsVector;
2048 }
2049 case AMDGPU::BI__builtin_amdgcn_bitop3_b32:
2050 case AMDGPU::BI__builtin_amdgcn_bitop3_b16:
2051 return emitBuiltinWithOneOverloadedType<4>(CGF&: *this, E,
2052 IntrinsicID: Intrinsic::amdgcn_bitop3);
2053 case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc: {
2054 // TODO: LLVM has this overloaded to allow for fat pointers, but since
2055 // those haven't been plumbed through to Clang yet, default to creating the
2056 // resource type.
2057 SmallVector<Value *, 4> Args;
2058 for (unsigned I = 0; I < 4; ++I)
2059 Args.push_back(Elt: EmitScalarExpr(E: E->getArg(Arg: I)));
2060 llvm::PointerType *RetTy = llvm::PointerType::get(
2061 C&: Builder.getContext(), AddressSpace: llvm::AMDGPUAS::BUFFER_RESOURCE);
2062 Function *F = CGM.getIntrinsic(IID: Intrinsic::amdgcn_make_buffer_rsrc,
2063 Tys: {RetTy, Args[0]->getType()});
2064 return Builder.CreateCall(Callee: F, Args);
2065 }
2066 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b8:
2067 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b16:
2068 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b32:
2069 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b64:
2070 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b96:
2071 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b128:
2072 return emitBuiltinWithOneOverloadedType<5>(
2073 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_store);
2074 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_format_v4f32:
2075 case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_format_v4f16:
2076 return emitBuiltinWithOneOverloadedType<5>(
2077 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_store_format);
2078 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8:
2079 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16:
2080 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32:
2081 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64:
2082 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96:
2083 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128: {
2084 llvm::Type *RetTy = nullptr;
2085 switch (BuiltinID) {
2086 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8:
2087 RetTy = Int8Ty;
2088 break;
2089 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16:
2090 RetTy = Int16Ty;
2091 break;
2092 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32:
2093 RetTy = Int32Ty;
2094 break;
2095 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64:
2096 RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 2);
2097 break;
2098 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96:
2099 RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 3);
2100 break;
2101 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128:
2102 RetTy = llvm::FixedVectorType::get(ElementType: Int32Ty, /*NumElements=*/NumElts: 4);
2103 break;
2104 }
2105 Function *F =
2106 CGM.getIntrinsic(IID: Intrinsic::amdgcn_raw_ptr_buffer_load, Tys: RetTy);
2107 return Builder.CreateCall(
2108 Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1)),
2109 EmitScalarExpr(E: E->getArg(Arg: 2)), EmitScalarExpr(E: E->getArg(Arg: 3))});
2110 }
2111 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_format_v4f32:
2112 case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_format_v4f16: {
2113 llvm::Type *RetTy = ConvertType(T: E->getType());
2114 Function *F =
2115 CGM.getIntrinsic(IID: Intrinsic::amdgcn_raw_ptr_buffer_load_format, Tys: {RetTy});
2116
2117 return Builder.CreateCall(
2118 Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1)),
2119 EmitScalarExpr(E: E->getArg(Arg: 2)), EmitScalarExpr(E: E->getArg(Arg: 3))});
2120 }
2121 case AMDGPU::BI__builtin_amdgcn_struct_buffer_store_format_v4f32:
2122 case AMDGPU::BI__builtin_amdgcn_struct_buffer_store_format_v4f16:
2123 return emitBuiltinWithOneOverloadedType<6>(
2124 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_struct_ptr_buffer_store_format);
2125 case AMDGPU::BI__builtin_amdgcn_struct_buffer_load_format_v4f32:
2126 case AMDGPU::BI__builtin_amdgcn_struct_buffer_load_format_v4f16: {
2127 llvm::Type *RetTy = ConvertType(T: E->getType());
2128 Function *F = CGM.getIntrinsic(
2129 IID: Intrinsic::amdgcn_struct_ptr_buffer_load_format, Tys: {RetTy});
2130
2131 return Builder.CreateCall(
2132 Callee: F, Args: {EmitScalarExpr(E: E->getArg(Arg: 0)), EmitScalarExpr(E: E->getArg(Arg: 1)),
2133 EmitScalarExpr(E: E->getArg(Arg: 2)), EmitScalarExpr(E: E->getArg(Arg: 3)),
2134 EmitScalarExpr(E: E->getArg(Arg: 4))});
2135 }
2136 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_add_i32:
2137 return emitBuiltinWithOneOverloadedType<5>(
2138 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_atomic_add);
2139 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fadd_f32:
2140 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fadd_v2f16:
2141 return emitBuiltinWithOneOverloadedType<5>(
2142 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_atomic_fadd);
2143 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fmin_f32:
2144 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fmin_f64:
2145 return emitBuiltinWithOneOverloadedType<5>(
2146 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_atomic_fmin);
2147 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f32:
2148 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f64:
2149 return emitBuiltinWithOneOverloadedType<5>(
2150 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_raw_ptr_buffer_atomic_fmax);
2151 case AMDGPU::BI__builtin_amdgcn_s_prefetch_data:
2152 return emitBuiltinWithOneOverloadedType<2>(
2153 CGF&: *this, E, IntrinsicID: Intrinsic::amdgcn_s_prefetch_data);
2154 case Builtin::BIlogbf:
2155 case Builtin::BI__builtin_logbf: {
2156 Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
2157 Function *FrExpFunc = CGM.getIntrinsic(
2158 IID: Intrinsic::frexp, Tys: {Src0->getType(), Builder.getInt32Ty()});
2159 CallInst *FrExp = Builder.CreateCall(Callee: FrExpFunc, Args: Src0);
2160 Value *Exp = Builder.CreateExtractValue(Agg: FrExp, Idxs: 1);
2161 Value *Add = Builder.CreateAdd(
2162 LHS: Exp, RHS: ConstantInt::getSigned(Ty: Exp->getType(), V: -1), Name: "", HasNUW: false, HasNSW: true);
2163 Value *SIToFP = Builder.CreateSIToFP(V: Add, DestTy: Builder.getFloatTy());
2164 Value *Fabs =
2165 emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::fabs);
2166 Value *FCmpONE = Builder.CreateFCmpONE(
2167 LHS: Fabs, RHS: ConstantFP::getInfinity(Ty: Builder.getFloatTy()));
2168 Value *Sel1 = Builder.CreateSelect(C: FCmpONE, True: SIToFP, False: Fabs);
2169 Value *FCmpOEQ =
2170 Builder.CreateFCmpOEQ(LHS: Src0, RHS: ConstantFP::getZero(Ty: Builder.getFloatTy()));
2171 Value *Sel2 = Builder.CreateSelect(
2172 C: FCmpOEQ,
2173 True: ConstantFP::getInfinity(Ty: Builder.getFloatTy(), /*Negative=*/true), False: Sel1);
2174 return Sel2;
2175 }
2176 case Builtin::BIlogb:
2177 case Builtin::BI__builtin_logb: {
2178 Value *Src0 = EmitScalarExpr(E: E->getArg(Arg: 0));
2179 Function *FrExpFunc = CGM.getIntrinsic(
2180 IID: Intrinsic::frexp, Tys: {Src0->getType(), Builder.getInt32Ty()});
2181 CallInst *FrExp = Builder.CreateCall(Callee: FrExpFunc, Args: Src0);
2182 Value *Exp = Builder.CreateExtractValue(Agg: FrExp, Idxs: 1);
2183 Value *Add = Builder.CreateAdd(
2184 LHS: Exp, RHS: ConstantInt::getSigned(Ty: Exp->getType(), V: -1), Name: "", HasNUW: false, HasNSW: true);
2185 Value *SIToFP = Builder.CreateSIToFP(V: Add, DestTy: Builder.getDoubleTy());
2186 Value *Fabs =
2187 emitBuiltinWithOneOverloadedType<1>(CGF&: *this, E, IntrinsicID: Intrinsic::fabs);
2188 Value *FCmpONE = Builder.CreateFCmpONE(
2189 LHS: Fabs, RHS: ConstantFP::getInfinity(Ty: Builder.getDoubleTy()));
2190 Value *Sel1 = Builder.CreateSelect(C: FCmpONE, True: SIToFP, False: Fabs);
2191 Value *FCmpOEQ =
2192 Builder.CreateFCmpOEQ(LHS: Src0, RHS: ConstantFP::getZero(Ty: Builder.getDoubleTy()));
2193 Value *Sel2 = Builder.CreateSelect(
2194 C: FCmpOEQ,
2195 True: ConstantFP::getInfinity(Ty: Builder.getDoubleTy(), /*Negative=*/true),
2196 False: Sel1);
2197 return Sel2;
2198 }
2199 case Builtin::BIscalbnf:
2200 case Builtin::BI__builtin_scalbnf:
2201 case Builtin::BIscalbn:
2202 case Builtin::BI__builtin_scalbn:
2203 return emitBinaryExpMaybeConstrainedFPBuiltin(
2204 CGF&: *this, E, IntrinsicID: Intrinsic::ldexp, ConstrainedIntrinsicID: Intrinsic::experimental_constrained_ldexp);
2205 default:
2206 return nullptr;
2207 }
2208}
2209