1//===-- RISCVTargetParser.cpp - Parser for target features ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a target parser to recognise hardware features
10// for RISC-V CPUs.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/TargetParser/RISCVTargetParser.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/TargetParser/RISCVISAInfo.h"
18
19namespace llvm {
20namespace RISCV {
21
22enum CPUKind : unsigned {
23#define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
24 FAST_VECTOR_UNALIGN, MVENDORID, MARCHID, MIMPID) \
25 CK_##ENUM,
26#define TUNE_PROC(ENUM, NAME) CK_##ENUM,
27#include "llvm/TargetParser/RISCVTargetParserDef.inc"
28};
29
30constexpr CPUInfo RISCVCPUInfo[] = {
31#define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
32 FAST_VECTOR_UNALIGN, MVENDORID, MARCHID, MIMPID) \
33 { \
34 NAME, \
35 DEFAULT_MARCH, \
36 FAST_SCALAR_UNALIGN, \
37 FAST_VECTOR_UNALIGN, \
38 {MVENDORID, MARCHID, MIMPID}, \
39 },
40#include "llvm/TargetParser/RISCVTargetParserDef.inc"
41};
42
43static const CPUInfo *getCPUInfoByName(StringRef CPU) {
44 for (auto &C : RISCVCPUInfo)
45 if (C.Name == CPU)
46 return &C;
47 return nullptr;
48}
49
50bool hasFastScalarUnalignedAccess(StringRef CPU) {
51 const CPUInfo *Info = getCPUInfoByName(CPU);
52 return Info && Info->FastScalarUnalignedAccess;
53}
54
55bool hasFastVectorUnalignedAccess(StringRef CPU) {
56 const CPUInfo *Info = getCPUInfoByName(CPU);
57 return Info && Info->FastVectorUnalignedAccess;
58}
59
60bool hasValidCPUModel(StringRef CPU) { return getCPUModel(CPU).isValid(); }
61
62CPUModel getCPUModel(StringRef CPU) {
63 const CPUInfo *Info = getCPUInfoByName(CPU);
64 if (!Info)
65 return {.MVendorID: 0, .MArchID: 0, .MImpID: 0};
66 return Info->Model;
67}
68
69StringRef getCPUNameFromCPUModel(const CPUModel &Model) {
70 if (!Model.isValid())
71 return "";
72
73 for (auto &C : RISCVCPUInfo)
74 if (C.Model == Model)
75 return C.Name;
76 return "";
77}
78
79bool parseCPU(StringRef CPU, bool IsRV64) {
80 const CPUInfo *Info = getCPUInfoByName(CPU);
81
82 if (!Info)
83 return false;
84 return Info->is64Bit() == IsRV64;
85}
86
87bool parseTuneCPU(StringRef TuneCPU, bool IsRV64) {
88 std::optional<CPUKind> Kind =
89 llvm::StringSwitch<std::optional<CPUKind>>(TuneCPU)
90#define TUNE_PROC(ENUM, NAME) .Case(NAME, CK_##ENUM)
91 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
92 .Default(Value: std::nullopt);
93
94 if (Kind.has_value())
95 return true;
96
97 // Fallback to parsing as a CPU.
98 return parseCPU(CPU: TuneCPU, IsRV64);
99}
100
101StringRef getMArchFromMcpu(StringRef CPU) {
102 const CPUInfo *Info = getCPUInfoByName(CPU);
103 if (!Info)
104 return "";
105 return Info->DefaultMarch;
106}
107
108void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
109 for (const auto &C : RISCVCPUInfo) {
110 if (IsRV64 == C.is64Bit())
111 Values.emplace_back(Args: C.Name);
112 }
113}
114
115void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
116 for (const auto &C : RISCVCPUInfo) {
117 if (IsRV64 == C.is64Bit())
118 Values.emplace_back(Args: C.Name);
119 }
120#define TUNE_PROC(ENUM, NAME) Values.emplace_back(StringRef(NAME));
121#include "llvm/TargetParser/RISCVTargetParserDef.inc"
122}
123
124// This function is currently used by IREE, so it's not dead code.
125void getFeaturesForCPU(StringRef CPU,
126 SmallVectorImpl<std::string> &EnabledFeatures,
127 bool NeedPlus) {
128 StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(CPU);
129 if (MarchFromCPU == "")
130 return;
131
132 EnabledFeatures.clear();
133 auto RII = RISCVISAInfo::parseArchString(
134 Arch: MarchFromCPU, /* EnableExperimentalExtension */ true);
135
136 if (llvm::errorToBool(Err: RII.takeError()))
137 return;
138
139 std::vector<std::string> FeatStrings =
140 (*RII)->toFeatures(/* AddAllExtensions */ false);
141 for (const auto &F : FeatStrings)
142 if (NeedPlus)
143 EnabledFeatures.push_back(Elt: F);
144 else
145 EnabledFeatures.push_back(Elt: F.substr(pos: 1));
146}
147
148} // namespace RISCV
149
150namespace RISCVVType {
151// Encode VTYPE into the binary format used by the the VSETVLI instruction which
152// is used by our MC layer representation.
153//
154// Bits | Name | Description
155// -----+------------+------------------------------------------------
156// 7 | vma | Vector mask agnostic
157// 6 | vta | Vector tail agnostic
158// 5:3 | vsew[2:0] | Standard element width (SEW) setting
159// 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting
160unsigned encodeVTYPE(VLMUL VLMul, unsigned SEW, bool TailAgnostic,
161 bool MaskAgnostic) {
162 assert(isValidSEW(SEW) && "Invalid SEW");
163 unsigned VLMulBits = static_cast<unsigned>(VLMul);
164 unsigned VSEWBits = encodeSEW(SEW);
165 unsigned VTypeI = (VSEWBits << 3) | (VLMulBits & 0x7);
166 if (TailAgnostic)
167 VTypeI |= 0x40;
168 if (MaskAgnostic)
169 VTypeI |= 0x80;
170
171 return VTypeI;
172}
173
174unsigned encodeXSfmmVType(unsigned SEW, unsigned Widen, bool AltFmt) {
175 assert(isValidSEW(SEW) && "Invalid SEW");
176 assert((Widen == 1 || Widen == 2 || Widen == 4) && "Invalid Widen");
177 unsigned VSEWBits = encodeSEW(SEW);
178 unsigned TWiden = Log2_32(Value: Widen) + 1;
179 unsigned VTypeI = (VSEWBits << 3) | AltFmt << 8 | TWiden << 9;
180 return VTypeI;
181}
182
183std::pair<unsigned, bool> decodeVLMUL(VLMUL VLMul) {
184 switch (VLMul) {
185 default:
186 llvm_unreachable("Unexpected LMUL value!");
187 case LMUL_1:
188 case LMUL_2:
189 case LMUL_4:
190 case LMUL_8:
191 return std::make_pair(x: 1 << static_cast<unsigned>(VLMul), y: false);
192 case LMUL_F2:
193 case LMUL_F4:
194 case LMUL_F8:
195 return std::make_pair(x: 1 << (8 - static_cast<unsigned>(VLMul)), y: true);
196 }
197}
198
199void printVType(unsigned VType, raw_ostream &OS) {
200 unsigned Sew = getSEW(VType);
201 OS << "e" << Sew;
202
203 unsigned LMul;
204 bool Fractional;
205 std::tie(args&: LMul, args&: Fractional) = decodeVLMUL(VLMul: getVLMUL(VType));
206
207 if (Fractional)
208 OS << ", mf";
209 else
210 OS << ", m";
211 OS << LMul;
212
213 if (isTailAgnostic(VType))
214 OS << ", ta";
215 else
216 OS << ", tu";
217
218 if (isMaskAgnostic(VType))
219 OS << ", ma";
220 else
221 OS << ", mu";
222}
223
224unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) {
225 unsigned LMul;
226 bool Fractional;
227 std::tie(args&: LMul, args&: Fractional) = decodeVLMUL(VLMul);
228
229 // Convert LMul to a fixed point value with 3 fractional bits.
230 LMul = Fractional ? (8 / LMul) : (LMul * 8);
231
232 assert(SEW >= 8 && "Unexpected SEW value");
233 return (SEW * 8) / LMul;
234}
235
236std::optional<VLMUL> getSameRatioLMUL(unsigned SEW, VLMUL VLMul, unsigned EEW) {
237 unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMul);
238 unsigned EMULFixedPoint = (EEW * 8) / Ratio;
239 bool Fractional = EMULFixedPoint < 8;
240 unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8;
241 if (!isValidLMUL(LMUL: EMUL, Fractional))
242 return std::nullopt;
243 return RISCVVType::encodeLMUL(LMUL: EMUL, Fractional);
244}
245
246} // namespace RISCVVType
247
248} // namespace llvm
249