1//===-- RISCVTargetParser.cpp - Parser for target features ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a target parser to recognise hardware features
10// for RISC-V CPUs.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/TargetParser/RISCVTargetParser.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/TargetParser/RISCVISAInfo.h"
18
19namespace llvm {
20namespace RISCV {
21
22enum CPUKind : unsigned {
23#define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
24 FAST_VECTOR_UNALIGN, MVENDORID, MARCHID, MIMPID) \
25 CK_##ENUM,
26#define TUNE_PROC(ENUM, NAME) CK_##ENUM,
27#include "llvm/TargetParser/RISCVTargetParserDef.inc"
28};
29
30constexpr CPUInfo RISCVCPUInfo[] = {
31#define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
32 FAST_VECTOR_UNALIGN, MVENDORID, MARCHID, MIMPID) \
33 { \
34 NAME, \
35 DEFAULT_MARCH, \
36 FAST_SCALAR_UNALIGN, \
37 FAST_VECTOR_UNALIGN, \
38 {MVENDORID, MARCHID, MIMPID}, \
39 },
40#include "llvm/TargetParser/RISCVTargetParserDef.inc"
41};
42
43static const CPUInfo *getCPUInfoByName(StringRef CPU) {
44 for (auto &C : RISCVCPUInfo)
45 if (C.Name == CPU)
46 return &C;
47 return nullptr;
48}
49
50bool hasFastScalarUnalignedAccess(StringRef CPU) {
51 const CPUInfo *Info = getCPUInfoByName(CPU);
52 return Info && Info->FastScalarUnalignedAccess;
53}
54
55bool hasFastVectorUnalignedAccess(StringRef CPU) {
56 const CPUInfo *Info = getCPUInfoByName(CPU);
57 return Info && Info->FastVectorUnalignedAccess;
58}
59
60bool hasValidCPUModel(StringRef CPU) { return getCPUModel(CPU).isValid(); }
61
62CPUModel getCPUModel(StringRef CPU) {
63 const CPUInfo *Info = getCPUInfoByName(CPU);
64 if (!Info)
65 return {.MVendorID: 0, .MArchID: 0, .MImpID: 0};
66 return Info->Model;
67}
68
69StringRef getCPUNameFromCPUModel(const CPUModel &Model) {
70 if (!Model.isValid())
71 return "";
72
73 for (auto &C : RISCVCPUInfo)
74 if (C.Model == Model)
75 return C.Name;
76 return "";
77}
78
79bool parseCPU(StringRef CPU, bool IsRV64) {
80 const CPUInfo *Info = getCPUInfoByName(CPU);
81
82 if (!Info)
83 return false;
84 return Info->is64Bit() == IsRV64;
85}
86
87bool parseTuneCPU(StringRef TuneCPU, bool IsRV64) {
88 std::optional<CPUKind> Kind =
89 llvm::StringSwitch<std::optional<CPUKind>>(TuneCPU)
90#define TUNE_PROC(ENUM, NAME) .Case(NAME, CK_##ENUM)
91 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
92 .Default(Value: std::nullopt);
93
94 if (Kind.has_value())
95 return true;
96
97 // Fallback to parsing as a CPU.
98 return parseCPU(CPU: TuneCPU, IsRV64);
99}
100
101StringRef getMArchFromMcpu(StringRef CPU) {
102 const CPUInfo *Info = getCPUInfoByName(CPU);
103 if (!Info)
104 return "";
105 return Info->DefaultMarch;
106}
107
108void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
109 for (const auto &C : RISCVCPUInfo) {
110 if (IsRV64 == C.is64Bit())
111 Values.emplace_back(Args: C.Name);
112 }
113}
114
115void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
116 for (const auto &C : RISCVCPUInfo) {
117 if (IsRV64 == C.is64Bit())
118 Values.emplace_back(Args: C.Name);
119 }
120#define TUNE_PROC(ENUM, NAME) Values.emplace_back(StringRef(NAME));
121#include "llvm/TargetParser/RISCVTargetParserDef.inc"
122}
123
124// This function is currently used by IREE, so it's not dead code.
125void getFeaturesForCPU(StringRef CPU,
126 SmallVectorImpl<std::string> &EnabledFeatures,
127 bool NeedPlus) {
128 StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(CPU);
129 if (MarchFromCPU == "")
130 return;
131
132 EnabledFeatures.clear();
133 auto RII = RISCVISAInfo::parseArchString(
134 Arch: MarchFromCPU, /* EnableExperimentalExtension */ true);
135
136 if (llvm::errorToBool(Err: RII.takeError()))
137 return;
138
139 std::vector<std::string> FeatStrings =
140 (*RII)->toFeatures(/* AddAllExtensions */ false);
141 for (const auto &F : FeatStrings)
142 if (NeedPlus)
143 EnabledFeatures.push_back(Elt: F);
144 else
145 EnabledFeatures.push_back(Elt: F.substr(pos: 1));
146}
147
148} // namespace RISCV
149
150namespace RISCVVType {
151// Encode VTYPE into the binary format used by the the VSETVLI instruction which
152// is used by our MC layer representation.
153//
154// Bits | Name | Description
155// -----+------------+------------------------------------------------
156// 8 | altfmt | Alternative format for bf16/ofp8
157// 7 | vma | Vector mask agnostic
158// 6 | vta | Vector tail agnostic
159// 5:3 | vsew[2:0] | Standard element width (SEW) setting
160// 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting
161unsigned encodeVTYPE(VLMUL VLMul, unsigned SEW, bool TailAgnostic,
162 bool MaskAgnostic, bool AltFmt) {
163 assert(isValidSEW(SEW) && "Invalid SEW");
164 unsigned VLMulBits = static_cast<unsigned>(VLMul);
165 unsigned VSEWBits = encodeSEW(SEW);
166 unsigned VTypeI = (VSEWBits << 3) | (VLMulBits & 0x7);
167 if (TailAgnostic)
168 VTypeI |= 0x40;
169 if (MaskAgnostic)
170 VTypeI |= 0x80;
171 if (AltFmt)
172 VTypeI |= 0x100;
173
174 return VTypeI;
175}
176
177unsigned encodeXSfmmVType(unsigned SEW, unsigned Widen, bool AltFmt) {
178 assert(isValidSEW(SEW) && "Invalid SEW");
179 assert((Widen == 1 || Widen == 2 || Widen == 4) && "Invalid Widen");
180 unsigned VSEWBits = encodeSEW(SEW);
181 unsigned TWiden = Log2_32(Value: Widen) + 1;
182 unsigned VTypeI = (VSEWBits << 3) | AltFmt << 8 | TWiden << 9;
183 return VTypeI;
184}
185
186std::pair<unsigned, bool> decodeVLMUL(VLMUL VLMul) {
187 switch (VLMul) {
188 default:
189 llvm_unreachable("Unexpected LMUL value!");
190 case LMUL_1:
191 case LMUL_2:
192 case LMUL_4:
193 case LMUL_8:
194 return std::make_pair(x: 1 << static_cast<unsigned>(VLMul), y: false);
195 case LMUL_F2:
196 case LMUL_F4:
197 case LMUL_F8:
198 return std::make_pair(x: 1 << (8 - static_cast<unsigned>(VLMul)), y: true);
199 }
200}
201
202void printVType(unsigned VType, raw_ostream &OS) {
203 unsigned Sew = getSEW(VType);
204 OS << "e" << Sew;
205
206 bool AltFmt = RISCVVType::isAltFmt(VType);
207 if (AltFmt)
208 OS << "alt";
209
210 unsigned LMul;
211 bool Fractional;
212 std::tie(args&: LMul, args&: Fractional) = decodeVLMUL(VLMul: getVLMUL(VType));
213
214 if (Fractional)
215 OS << ", mf";
216 else
217 OS << ", m";
218 OS << LMul;
219
220 if (isTailAgnostic(VType))
221 OS << ", ta";
222 else
223 OS << ", tu";
224
225 if (isMaskAgnostic(VType))
226 OS << ", ma";
227 else
228 OS << ", mu";
229}
230
231void printXSfmmVType(unsigned VType, raw_ostream &OS) {
232 OS << "e" << getSEW(VType) << ", w" << getXSfmmWiden(VType);
233}
234
235unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) {
236 unsigned LMul;
237 bool Fractional;
238 std::tie(args&: LMul, args&: Fractional) = decodeVLMUL(VLMul);
239
240 // Convert LMul to a fixed point value with 3 fractional bits.
241 LMul = Fractional ? (8 / LMul) : (LMul * 8);
242
243 assert(SEW >= 8 && "Unexpected SEW value");
244 return (SEW * 8) / LMul;
245}
246
247std::optional<VLMUL> getSameRatioLMUL(unsigned Ratio, unsigned EEW) {
248 unsigned EMULFixedPoint = (EEW * 8) / Ratio;
249 bool Fractional = EMULFixedPoint < 8;
250 unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8;
251 if (!isValidLMUL(LMUL: EMUL, Fractional))
252 return std::nullopt;
253 return RISCVVType::encodeLMUL(LMUL: EMUL, Fractional);
254}
255
256} // namespace RISCVVType
257
258} // namespace llvm
259