| 1 | //===----------------------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "AMDGPUArgumentUsageInfo.h" |
| 10 | #include "AMDGPU.h" |
| 11 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
| 12 | #include "SIRegisterInfo.h" |
| 13 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 14 | #include "llvm/IR/Function.h" |
| 15 | #include "llvm/Support/NativeFormatting.h" |
| 16 | #include "llvm/Support/raw_ostream.h" |
| 17 | |
| 18 | using namespace llvm; |
| 19 | |
| 20 | #define DEBUG_TYPE "amdgpu-argument-reg-usage-info" |
| 21 | |
| 22 | INITIALIZE_PASS(AMDGPUArgumentUsageInfoWrapperLegacy, DEBUG_TYPE, |
| 23 | "Argument Register Usage Information Storage" , false, true) |
| 24 | |
| 25 | void ArgDescriptor::print(raw_ostream &OS, |
| 26 | const TargetRegisterInfo *TRI) const { |
| 27 | if (!isSet()) { |
| 28 | OS << "<not set>\n" ; |
| 29 | return; |
| 30 | } |
| 31 | |
| 32 | if (isRegister()) |
| 33 | OS << "Reg " << printReg(Reg: getRegister(), TRI); |
| 34 | else |
| 35 | OS << "Stack offset " << getStackOffset(); |
| 36 | |
| 37 | if (isMasked()) { |
| 38 | OS << " & " ; |
| 39 | llvm::write_hex(S&: OS, N: Mask, Style: llvm::HexPrintStyle::PrefixLower); |
| 40 | } |
| 41 | |
| 42 | OS << '\n'; |
| 43 | } |
| 44 | |
| 45 | char AMDGPUArgumentUsageInfoWrapperLegacy::ID = 0; |
| 46 | |
| 47 | const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::ExternFunctionInfo{}; |
| 48 | |
| 49 | // Hardcoded registers from fixed function ABI |
| 50 | const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::FixedABIFunctionInfo |
| 51 | = AMDGPUFunctionArgInfo::fixedABILayout(); |
| 52 | |
| 53 | // TODO: Print preload kernargs? |
| 54 | void AMDGPUArgumentUsageInfo::print(raw_ostream &OS, const Module *M) const { |
| 55 | for (const auto &FI : ArgInfoMap) { |
| 56 | OS << "Arguments for " << FI.first->getName() << '\n' |
| 57 | << " PrivateSegmentBuffer: " << FI.second.PrivateSegmentBuffer |
| 58 | << " DispatchPtr: " << FI.second.DispatchPtr |
| 59 | << " QueuePtr: " << FI.second.QueuePtr |
| 60 | << " KernargSegmentPtr: " << FI.second.KernargSegmentPtr |
| 61 | << " DispatchID: " << FI.second.DispatchID |
| 62 | << " FlatScratchInit: " << FI.second.FlatScratchInit |
| 63 | << " PrivateSegmentSize: " << FI.second.PrivateSegmentSize |
| 64 | << " WorkGroupIDX: " << FI.second.WorkGroupIDX |
| 65 | << " WorkGroupIDY: " << FI.second.WorkGroupIDY |
| 66 | << " WorkGroupIDZ: " << FI.second.WorkGroupIDZ |
| 67 | << " WorkGroupInfo: " << FI.second.WorkGroupInfo |
| 68 | << " LDSKernelId: " << FI.second.LDSKernelId |
| 69 | << " PrivateSegmentWaveByteOffset: " |
| 70 | << FI.second.PrivateSegmentWaveByteOffset |
| 71 | << " ImplicitBufferPtr: " << FI.second.ImplicitBufferPtr |
| 72 | << " ImplicitArgPtr: " << FI.second.ImplicitArgPtr |
| 73 | << " WorkItemIDX " << FI.second.WorkItemIDX |
| 74 | << " WorkItemIDY " << FI.second.WorkItemIDY |
| 75 | << " WorkItemIDZ " << FI.second.WorkItemIDZ |
| 76 | << '\n'; |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | bool AMDGPUArgumentUsageInfo::invalidate(Module &M, const PreservedAnalyses &PA, |
| 81 | ModuleAnalysisManager::Invalidator &) { |
| 82 | auto PAC = PA.getChecker<AMDGPUArgumentUsageAnalysis>(); |
| 83 | return !PAC.preservedWhenStateless(); |
| 84 | } |
| 85 | |
| 86 | std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT> |
| 87 | AMDGPUFunctionArgInfo::getPreloadedValue( |
| 88 | AMDGPUFunctionArgInfo::PreloadedValue Value) const { |
| 89 | switch (Value) { |
| 90 | case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: { |
| 91 | return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr, |
| 92 | &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 32)); |
| 93 | } |
| 94 | case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR: |
| 95 | return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr, |
| 96 | &AMDGPU::SGPR_64RegClass, |
| 97 | LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64)); |
| 98 | case AMDGPUFunctionArgInfo::WORKGROUP_ID_X: |
| 99 | return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr, |
| 100 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 101 | case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y: |
| 102 | return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr, |
| 103 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 104 | case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z: |
| 105 | return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr, |
| 106 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 107 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_ID_X: |
| 108 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_ID_Y: |
| 109 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_ID_Z: |
| 110 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_MAX_ID_X: |
| 111 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_MAX_ID_Y: |
| 112 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_MAX_ID_Z: |
| 113 | case AMDGPUFunctionArgInfo::CLUSTER_WORKGROUP_MAX_FLAT_ID: |
| 114 | return std::tuple(nullptr, &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 115 | case AMDGPUFunctionArgInfo::LDS_KERNEL_ID: |
| 116 | return std::tuple(LDSKernelId ? &LDSKernelId : nullptr, |
| 117 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 118 | case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET: |
| 119 | return std::tuple( |
| 120 | PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr, |
| 121 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 122 | case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_SIZE: |
| 123 | return {PrivateSegmentSize ? &PrivateSegmentSize : nullptr, |
| 124 | &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)}; |
| 125 | case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR: |
| 126 | return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr, |
| 127 | &AMDGPU::SGPR_64RegClass, |
| 128 | LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64)); |
| 129 | case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR: |
| 130 | return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr, |
| 131 | &AMDGPU::SGPR_64RegClass, |
| 132 | LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64)); |
| 133 | case AMDGPUFunctionArgInfo::DISPATCH_ID: |
| 134 | return std::tuple(DispatchID ? &DispatchID : nullptr, |
| 135 | &AMDGPU::SGPR_64RegClass, LLT::scalar(SizeInBits: 64)); |
| 136 | case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT: |
| 137 | return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr, |
| 138 | &AMDGPU::SGPR_64RegClass, LLT::scalar(SizeInBits: 64)); |
| 139 | case AMDGPUFunctionArgInfo::DISPATCH_PTR: |
| 140 | return std::tuple(DispatchPtr ? &DispatchPtr : nullptr, |
| 141 | &AMDGPU::SGPR_64RegClass, |
| 142 | LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64)); |
| 143 | case AMDGPUFunctionArgInfo::QUEUE_PTR: |
| 144 | return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass, |
| 145 | LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64)); |
| 146 | case AMDGPUFunctionArgInfo::WORKITEM_ID_X: |
| 147 | return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr, |
| 148 | &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 149 | case AMDGPUFunctionArgInfo::WORKITEM_ID_Y: |
| 150 | return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr, |
| 151 | &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 152 | case AMDGPUFunctionArgInfo::WORKITEM_ID_Z: |
| 153 | return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr, |
| 154 | &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32)); |
| 155 | } |
| 156 | llvm_unreachable("unexpected preloaded value type" ); |
| 157 | } |
| 158 | |
| 159 | AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() { |
| 160 | AMDGPUFunctionArgInfo AI; |
| 161 | AI.PrivateSegmentBuffer |
| 162 | = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3); |
| 163 | AI.DispatchPtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR4_SGPR5); |
| 164 | AI.QueuePtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR6_SGPR7); |
| 165 | |
| 166 | // Do not pass kernarg segment pointer, only pass increment version in its |
| 167 | // place. |
| 168 | AI.ImplicitArgPtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR8_SGPR9); |
| 169 | AI.DispatchID = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR10_SGPR11); |
| 170 | |
| 171 | // Skip FlatScratchInit/PrivateSegmentSize |
| 172 | AI.WorkGroupIDX = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR12); |
| 173 | AI.WorkGroupIDY = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR13); |
| 174 | AI.WorkGroupIDZ = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR14); |
| 175 | AI.LDSKernelId = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR15); |
| 176 | |
| 177 | const unsigned Mask = 0x3ff; |
| 178 | AI.WorkItemIDX = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask); |
| 179 | AI.WorkItemIDY = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask: Mask << 10); |
| 180 | AI.WorkItemIDZ = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask: Mask << 20); |
| 181 | return AI; |
| 182 | } |
| 183 | |
| 184 | const AMDGPUFunctionArgInfo & |
| 185 | AMDGPUArgumentUsageInfo::lookupFuncArgInfo(const Function &F) const { |
| 186 | auto I = ArgInfoMap.find(Val: &F); |
| 187 | if (I == ArgInfoMap.end()) |
| 188 | return FixedABIFunctionInfo; |
| 189 | return I->second; |
| 190 | } |
| 191 | |
| 192 | AnalysisKey AMDGPUArgumentUsageAnalysis::Key; |
| 193 | |
| 194 | AMDGPUArgumentUsageInfo |
| 195 | AMDGPUArgumentUsageAnalysis::run(Module &M, ModuleAnalysisManager &) { |
| 196 | return AMDGPUArgumentUsageInfo(); |
| 197 | } |
| 198 | |