1 | //===-- Intrinsics.cpp - Intrinsic Function Handling ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements functions required for supporting intrinsic functions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/IR/Intrinsics.h" |
14 | #include "llvm/ADT/StringExtras.h" |
15 | #include "llvm/ADT/StringTable.h" |
16 | #include "llvm/IR/ConstantRange.h" |
17 | #include "llvm/IR/Function.h" |
18 | #include "llvm/IR/IntrinsicsAArch64.h" |
19 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
20 | #include "llvm/IR/IntrinsicsARM.h" |
21 | #include "llvm/IR/IntrinsicsBPF.h" |
22 | #include "llvm/IR/IntrinsicsHexagon.h" |
23 | #include "llvm/IR/IntrinsicsLoongArch.h" |
24 | #include "llvm/IR/IntrinsicsMips.h" |
25 | #include "llvm/IR/IntrinsicsNVPTX.h" |
26 | #include "llvm/IR/IntrinsicsPowerPC.h" |
27 | #include "llvm/IR/IntrinsicsR600.h" |
28 | #include "llvm/IR/IntrinsicsRISCV.h" |
29 | #include "llvm/IR/IntrinsicsS390.h" |
30 | #include "llvm/IR/IntrinsicsVE.h" |
31 | #include "llvm/IR/IntrinsicsX86.h" |
32 | #include "llvm/IR/IntrinsicsXCore.h" |
33 | #include "llvm/IR/Module.h" |
34 | #include "llvm/IR/Type.h" |
35 | |
36 | using namespace llvm; |
37 | |
38 | /// Table of string intrinsic names indexed by enum value. |
39 | #define GET_INTRINSIC_NAME_TABLE |
40 | #include "llvm/IR/IntrinsicImpl.inc" |
41 | #undef GET_INTRINSIC_NAME_TABLE |
42 | |
43 | StringRef Intrinsic::getBaseName(ID id) { |
44 | assert(id < num_intrinsics && "Invalid intrinsic ID!" ); |
45 | return IntrinsicNameTable[IntrinsicNameOffsetTable[id]]; |
46 | } |
47 | |
48 | StringRef Intrinsic::getName(ID id) { |
49 | assert(id < num_intrinsics && "Invalid intrinsic ID!" ); |
50 | assert(!Intrinsic::isOverloaded(id) && |
51 | "This version of getName does not support overloading" ); |
52 | return getBaseName(id); |
53 | } |
54 | |
55 | /// Returns a stable mangling for the type specified for use in the name |
56 | /// mangling scheme used by 'any' types in intrinsic signatures. The mangling |
57 | /// of named types is simply their name. Manglings for unnamed types consist |
58 | /// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions) |
59 | /// combined with the mangling of their component types. A vararg function |
60 | /// type will have a suffix of 'vararg'. Since function types can contain |
61 | /// other function types, we close a function type mangling with suffix 'f' |
62 | /// which can't be confused with it's prefix. This ensures we don't have |
63 | /// collisions between two unrelated function types. Otherwise, you might |
64 | /// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.) |
65 | /// The HasUnnamedType boolean is set if an unnamed type was encountered, |
66 | /// indicating that extra care must be taken to ensure a unique name. |
67 | static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) { |
68 | std::string Result; |
69 | if (PointerType *PTyp = dyn_cast<PointerType>(Val: Ty)) { |
70 | Result += "p" + utostr(X: PTyp->getAddressSpace()); |
71 | } else if (ArrayType *ATyp = dyn_cast<ArrayType>(Val: Ty)) { |
72 | Result += "a" + utostr(X: ATyp->getNumElements()) + |
73 | getMangledTypeStr(Ty: ATyp->getElementType(), HasUnnamedType); |
74 | } else if (StructType *STyp = dyn_cast<StructType>(Val: Ty)) { |
75 | if (!STyp->isLiteral()) { |
76 | Result += "s_" ; |
77 | if (STyp->hasName()) |
78 | Result += STyp->getName(); |
79 | else |
80 | HasUnnamedType = true; |
81 | } else { |
82 | Result += "sl_" ; |
83 | for (auto *Elem : STyp->elements()) |
84 | Result += getMangledTypeStr(Ty: Elem, HasUnnamedType); |
85 | } |
86 | // Ensure nested structs are distinguishable. |
87 | Result += "s" ; |
88 | } else if (FunctionType *FT = dyn_cast<FunctionType>(Val: Ty)) { |
89 | Result += "f_" + getMangledTypeStr(Ty: FT->getReturnType(), HasUnnamedType); |
90 | for (size_t i = 0; i < FT->getNumParams(); i++) |
91 | Result += getMangledTypeStr(Ty: FT->getParamType(i), HasUnnamedType); |
92 | if (FT->isVarArg()) |
93 | Result += "vararg" ; |
94 | // Ensure nested function types are distinguishable. |
95 | Result += "f" ; |
96 | } else if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty)) { |
97 | ElementCount EC = VTy->getElementCount(); |
98 | if (EC.isScalable()) |
99 | Result += "nx" ; |
100 | Result += "v" + utostr(X: EC.getKnownMinValue()) + |
101 | getMangledTypeStr(Ty: VTy->getElementType(), HasUnnamedType); |
102 | } else if (TargetExtType *TETy = dyn_cast<TargetExtType>(Val: Ty)) { |
103 | Result += "t" ; |
104 | Result += TETy->getName(); |
105 | for (Type *ParamTy : TETy->type_params()) |
106 | Result += "_" + getMangledTypeStr(Ty: ParamTy, HasUnnamedType); |
107 | for (unsigned IntParam : TETy->int_params()) |
108 | Result += "_" + utostr(X: IntParam); |
109 | // Ensure nested target extension types are distinguishable. |
110 | Result += "t" ; |
111 | } else if (Ty) { |
112 | switch (Ty->getTypeID()) { |
113 | default: |
114 | llvm_unreachable("Unhandled type" ); |
115 | case Type::VoidTyID: |
116 | Result += "isVoid" ; |
117 | break; |
118 | case Type::MetadataTyID: |
119 | Result += "Metadata" ; |
120 | break; |
121 | case Type::HalfTyID: |
122 | Result += "f16" ; |
123 | break; |
124 | case Type::BFloatTyID: |
125 | Result += "bf16" ; |
126 | break; |
127 | case Type::FloatTyID: |
128 | Result += "f32" ; |
129 | break; |
130 | case Type::DoubleTyID: |
131 | Result += "f64" ; |
132 | break; |
133 | case Type::X86_FP80TyID: |
134 | Result += "f80" ; |
135 | break; |
136 | case Type::FP128TyID: |
137 | Result += "f128" ; |
138 | break; |
139 | case Type::PPC_FP128TyID: |
140 | Result += "ppcf128" ; |
141 | break; |
142 | case Type::X86_AMXTyID: |
143 | Result += "x86amx" ; |
144 | break; |
145 | case Type::IntegerTyID: |
146 | Result += "i" + utostr(X: cast<IntegerType>(Val: Ty)->getBitWidth()); |
147 | break; |
148 | } |
149 | } |
150 | return Result; |
151 | } |
152 | |
153 | static std::string getIntrinsicNameImpl(Intrinsic::ID Id, ArrayRef<Type *> Tys, |
154 | Module *M, FunctionType *FT, |
155 | bool EarlyModuleCheck) { |
156 | |
157 | assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!" ); |
158 | assert((Tys.empty() || Intrinsic::isOverloaded(Id)) && |
159 | "This version of getName is for overloaded intrinsics only" ); |
160 | (void)EarlyModuleCheck; |
161 | assert((!EarlyModuleCheck || M || |
162 | !any_of(Tys, [](Type *T) { return isa<PointerType>(T); })) && |
163 | "Intrinsic overloading on pointer types need to provide a Module" ); |
164 | bool HasUnnamedType = false; |
165 | std::string Result(Intrinsic::getBaseName(id: Id)); |
166 | for (Type *Ty : Tys) |
167 | Result += "." + getMangledTypeStr(Ty, HasUnnamedType); |
168 | if (HasUnnamedType) { |
169 | assert(M && "unnamed types need a module" ); |
170 | if (!FT) |
171 | FT = Intrinsic::getType(Context&: M->getContext(), id: Id, Tys); |
172 | else |
173 | assert((FT == Intrinsic::getType(M->getContext(), Id, Tys)) && |
174 | "Provided FunctionType must match arguments" ); |
175 | return M->getUniqueIntrinsicName(BaseName: Result, Id, Proto: FT); |
176 | } |
177 | return Result; |
178 | } |
179 | |
180 | std::string Intrinsic::getName(ID Id, ArrayRef<Type *> Tys, Module *M, |
181 | FunctionType *FT) { |
182 | assert(M && "We need to have a Module" ); |
183 | return getIntrinsicNameImpl(Id, Tys, M, FT, EarlyModuleCheck: true); |
184 | } |
185 | |
186 | std::string Intrinsic::getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys) { |
187 | return getIntrinsicNameImpl(Id, Tys, M: nullptr, FT: nullptr, EarlyModuleCheck: false); |
188 | } |
189 | |
190 | /// IIT_Info - These are enumerators that describe the entries returned by the |
191 | /// getIntrinsicInfoTableEntries function. |
192 | /// |
193 | /// Defined in Intrinsics.td. |
194 | enum IIT_Info { |
195 | #define GET_INTRINSIC_IITINFO |
196 | #include "llvm/IR/IntrinsicImpl.inc" |
197 | #undef GET_INTRINSIC_IITINFO |
198 | }; |
199 | |
200 | static void |
201 | DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, |
202 | IIT_Info LastInfo, |
203 | SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) { |
204 | using namespace Intrinsic; |
205 | |
206 | bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC); |
207 | |
208 | IIT_Info Info = IIT_Info(Infos[NextElt++]); |
209 | unsigned StructElts = 2; |
210 | |
211 | switch (Info) { |
212 | case IIT_Done: |
213 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Void, Field: 0)); |
214 | return; |
215 | case IIT_VARARG: |
216 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::VarArg, Field: 0)); |
217 | return; |
218 | case IIT_MMX: |
219 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::MMX, Field: 0)); |
220 | return; |
221 | case IIT_AMX: |
222 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::AMX, Field: 0)); |
223 | return; |
224 | case IIT_TOKEN: |
225 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Token, Field: 0)); |
226 | return; |
227 | case IIT_METADATA: |
228 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Metadata, Field: 0)); |
229 | return; |
230 | case IIT_F16: |
231 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Half, Field: 0)); |
232 | return; |
233 | case IIT_BF16: |
234 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::BFloat, Field: 0)); |
235 | return; |
236 | case IIT_F32: |
237 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Float, Field: 0)); |
238 | return; |
239 | case IIT_F64: |
240 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Double, Field: 0)); |
241 | return; |
242 | case IIT_F128: |
243 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Quad, Field: 0)); |
244 | return; |
245 | case IIT_PPCF128: |
246 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::PPCQuad, Field: 0)); |
247 | return; |
248 | case IIT_I1: |
249 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 1)); |
250 | return; |
251 | case IIT_I2: |
252 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 2)); |
253 | return; |
254 | case IIT_I4: |
255 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 4)); |
256 | return; |
257 | case IIT_AARCH64_SVCOUNT: |
258 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::AArch64Svcount, Field: 0)); |
259 | return; |
260 | case IIT_I8: |
261 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 8)); |
262 | return; |
263 | case IIT_I16: |
264 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 16)); |
265 | return; |
266 | case IIT_I32: |
267 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 32)); |
268 | return; |
269 | case IIT_I64: |
270 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 64)); |
271 | return; |
272 | case IIT_I128: |
273 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 128)); |
274 | return; |
275 | case IIT_V1: |
276 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 1, IsScalable: IsScalableVector)); |
277 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
278 | return; |
279 | case IIT_V2: |
280 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 2, IsScalable: IsScalableVector)); |
281 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
282 | return; |
283 | case IIT_V3: |
284 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 3, IsScalable: IsScalableVector)); |
285 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
286 | return; |
287 | case IIT_V4: |
288 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 4, IsScalable: IsScalableVector)); |
289 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
290 | return; |
291 | case IIT_V6: |
292 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 6, IsScalable: IsScalableVector)); |
293 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
294 | return; |
295 | case IIT_V8: |
296 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 8, IsScalable: IsScalableVector)); |
297 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
298 | return; |
299 | case IIT_V10: |
300 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 10, IsScalable: IsScalableVector)); |
301 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
302 | return; |
303 | case IIT_V16: |
304 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 16, IsScalable: IsScalableVector)); |
305 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
306 | return; |
307 | case IIT_V32: |
308 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 32, IsScalable: IsScalableVector)); |
309 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
310 | return; |
311 | case IIT_V64: |
312 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 64, IsScalable: IsScalableVector)); |
313 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
314 | return; |
315 | case IIT_V128: |
316 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 128, IsScalable: IsScalableVector)); |
317 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
318 | return; |
319 | case IIT_V256: |
320 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 256, IsScalable: IsScalableVector)); |
321 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
322 | return; |
323 | case IIT_V512: |
324 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 512, IsScalable: IsScalableVector)); |
325 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
326 | return; |
327 | case IIT_V1024: |
328 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 1024, IsScalable: IsScalableVector)); |
329 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
330 | return; |
331 | case IIT_V2048: |
332 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 2048, IsScalable: IsScalableVector)); |
333 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
334 | return; |
335 | case IIT_V4096: |
336 | OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 4096, IsScalable: IsScalableVector)); |
337 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
338 | return; |
339 | case IIT_EXTERNREF: |
340 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 10)); |
341 | return; |
342 | case IIT_FUNCREF: |
343 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 20)); |
344 | return; |
345 | case IIT_PTR: |
346 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 0)); |
347 | return; |
348 | case IIT_ANYPTR: // [ANYPTR addrspace] |
349 | OutputTable.push_back( |
350 | Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: Infos[NextElt++])); |
351 | return; |
352 | case IIT_ARG: { |
353 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
354 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Argument, Field: ArgInfo)); |
355 | return; |
356 | } |
357 | case IIT_EXTEND_ARG: { |
358 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
359 | OutputTable.push_back( |
360 | Elt: IITDescriptor::get(K: IITDescriptor::ExtendArgument, Field: ArgInfo)); |
361 | return; |
362 | } |
363 | case IIT_TRUNC_ARG: { |
364 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
365 | OutputTable.push_back( |
366 | Elt: IITDescriptor::get(K: IITDescriptor::TruncArgument, Field: ArgInfo)); |
367 | return; |
368 | } |
369 | case IIT_ONE_NTH_ELTS_VEC_ARG: { |
370 | unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
371 | unsigned short N = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
372 | OutputTable.push_back( |
373 | Elt: IITDescriptor::get(K: IITDescriptor::OneNthEltsVecArgument, Hi: N, Lo: ArgNo)); |
374 | return; |
375 | } |
376 | case IIT_SAME_VEC_WIDTH_ARG: { |
377 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
378 | OutputTable.push_back( |
379 | Elt: IITDescriptor::get(K: IITDescriptor::SameVecWidthArgument, Field: ArgInfo)); |
380 | return; |
381 | } |
382 | case IIT_VEC_OF_ANYPTRS_TO_ELT: { |
383 | unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
384 | unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
385 | OutputTable.push_back( |
386 | Elt: IITDescriptor::get(K: IITDescriptor::VecOfAnyPtrsToElt, Hi: ArgNo, Lo: RefNo)); |
387 | return; |
388 | } |
389 | case IIT_EMPTYSTRUCT: |
390 | OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Struct, Field: 0)); |
391 | return; |
392 | case IIT_STRUCT9: |
393 | ++StructElts; |
394 | [[fallthrough]]; |
395 | case IIT_STRUCT8: |
396 | ++StructElts; |
397 | [[fallthrough]]; |
398 | case IIT_STRUCT7: |
399 | ++StructElts; |
400 | [[fallthrough]]; |
401 | case IIT_STRUCT6: |
402 | ++StructElts; |
403 | [[fallthrough]]; |
404 | case IIT_STRUCT5: |
405 | ++StructElts; |
406 | [[fallthrough]]; |
407 | case IIT_STRUCT4: |
408 | ++StructElts; |
409 | [[fallthrough]]; |
410 | case IIT_STRUCT3: |
411 | ++StructElts; |
412 | [[fallthrough]]; |
413 | case IIT_STRUCT2: { |
414 | OutputTable.push_back( |
415 | Elt: IITDescriptor::get(K: IITDescriptor::Struct, Field: StructElts)); |
416 | |
417 | for (unsigned i = 0; i != StructElts; ++i) |
418 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
419 | return; |
420 | } |
421 | case IIT_SUBDIVIDE2_ARG: { |
422 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
423 | OutputTable.push_back( |
424 | Elt: IITDescriptor::get(K: IITDescriptor::Subdivide2Argument, Field: ArgInfo)); |
425 | return; |
426 | } |
427 | case IIT_SUBDIVIDE4_ARG: { |
428 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
429 | OutputTable.push_back( |
430 | Elt: IITDescriptor::get(K: IITDescriptor::Subdivide4Argument, Field: ArgInfo)); |
431 | return; |
432 | } |
433 | case IIT_VEC_ELEMENT: { |
434 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
435 | OutputTable.push_back( |
436 | Elt: IITDescriptor::get(K: IITDescriptor::VecElementArgument, Field: ArgInfo)); |
437 | return; |
438 | } |
439 | case IIT_SCALABLE_VEC: { |
440 | DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable); |
441 | return; |
442 | } |
443 | case IIT_VEC_OF_BITCASTS_TO_INT: { |
444 | unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); |
445 | OutputTable.push_back( |
446 | Elt: IITDescriptor::get(K: IITDescriptor::VecOfBitcastsToInt, Field: ArgInfo)); |
447 | return; |
448 | } |
449 | } |
450 | llvm_unreachable("unhandled" ); |
451 | } |
452 | |
453 | #define GET_INTRINSIC_GENERATOR_GLOBAL |
454 | #include "llvm/IR/IntrinsicImpl.inc" |
455 | #undef GET_INTRINSIC_GENERATOR_GLOBAL |
456 | |
457 | void Intrinsic::getIntrinsicInfoTableEntries( |
458 | ID id, SmallVectorImpl<IITDescriptor> &T) { |
459 | static_assert(sizeof(IIT_Table[0]) == 2, |
460 | "Expect 16-bit entries in IIT_Table" ); |
461 | // Check to see if the intrinsic's type was expressible by the table. |
462 | uint16_t TableVal = IIT_Table[id - 1]; |
463 | |
464 | // Decode the TableVal into an array of IITValues. |
465 | SmallVector<unsigned char> IITValues; |
466 | ArrayRef<unsigned char> IITEntries; |
467 | unsigned NextElt = 0; |
468 | if (TableVal >> 15) { |
469 | // This is an offset into the IIT_LongEncodingTable. |
470 | IITEntries = IIT_LongEncodingTable; |
471 | |
472 | // Strip sentinel bit. |
473 | NextElt = TableVal & 0x7fff; |
474 | } else { |
475 | // If the entry was encoded into a single word in the table itself, decode |
476 | // it from an array of nibbles to an array of bytes. |
477 | do { |
478 | IITValues.push_back(Elt: TableVal & 0xF); |
479 | TableVal >>= 4; |
480 | } while (TableVal); |
481 | |
482 | IITEntries = IITValues; |
483 | NextElt = 0; |
484 | } |
485 | |
486 | // Okay, decode the table into the output vector of IITDescriptors. |
487 | DecodeIITType(NextElt, Infos: IITEntries, LastInfo: IIT_Done, OutputTable&: T); |
488 | while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0) |
489 | DecodeIITType(NextElt, Infos: IITEntries, LastInfo: IIT_Done, OutputTable&: T); |
490 | } |
491 | |
492 | static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos, |
493 | ArrayRef<Type *> Tys, LLVMContext &Context) { |
494 | using namespace Intrinsic; |
495 | |
496 | IITDescriptor D = Infos.front(); |
497 | Infos = Infos.slice(N: 1); |
498 | |
499 | switch (D.Kind) { |
500 | case IITDescriptor::Void: |
501 | return Type::getVoidTy(C&: Context); |
502 | case IITDescriptor::VarArg: |
503 | return Type::getVoidTy(C&: Context); |
504 | case IITDescriptor::MMX: |
505 | return llvm::FixedVectorType::get(ElementType: llvm::IntegerType::get(C&: Context, NumBits: 64), NumElts: 1); |
506 | case IITDescriptor::AMX: |
507 | return Type::getX86_AMXTy(C&: Context); |
508 | case IITDescriptor::Token: |
509 | return Type::getTokenTy(C&: Context); |
510 | case IITDescriptor::Metadata: |
511 | return Type::getMetadataTy(C&: Context); |
512 | case IITDescriptor::Half: |
513 | return Type::getHalfTy(C&: Context); |
514 | case IITDescriptor::BFloat: |
515 | return Type::getBFloatTy(C&: Context); |
516 | case IITDescriptor::Float: |
517 | return Type::getFloatTy(C&: Context); |
518 | case IITDescriptor::Double: |
519 | return Type::getDoubleTy(C&: Context); |
520 | case IITDescriptor::Quad: |
521 | return Type::getFP128Ty(C&: Context); |
522 | case IITDescriptor::PPCQuad: |
523 | return Type::getPPC_FP128Ty(C&: Context); |
524 | case IITDescriptor::AArch64Svcount: |
525 | return TargetExtType::get(Context, Name: "aarch64.svcount" ); |
526 | |
527 | case IITDescriptor::Integer: |
528 | return IntegerType::get(C&: Context, NumBits: D.Integer_Width); |
529 | case IITDescriptor::Vector: |
530 | return VectorType::get(ElementType: DecodeFixedType(Infos, Tys, Context), |
531 | EC: D.Vector_Width); |
532 | case IITDescriptor::Pointer: |
533 | return PointerType::get(C&: Context, AddressSpace: D.Pointer_AddressSpace); |
534 | case IITDescriptor::Struct: { |
535 | SmallVector<Type *, 8> Elts; |
536 | for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) |
537 | Elts.push_back(Elt: DecodeFixedType(Infos, Tys, Context)); |
538 | return StructType::get(Context, Elements: Elts); |
539 | } |
540 | case IITDescriptor::Argument: |
541 | return Tys[D.getArgumentNumber()]; |
542 | case IITDescriptor::ExtendArgument: { |
543 | Type *Ty = Tys[D.getArgumentNumber()]; |
544 | if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty)) |
545 | return VectorType::getExtendedElementVectorType(VTy); |
546 | |
547 | return IntegerType::get(C&: Context, NumBits: 2 * cast<IntegerType>(Val: Ty)->getBitWidth()); |
548 | } |
549 | case IITDescriptor::TruncArgument: { |
550 | Type *Ty = Tys[D.getArgumentNumber()]; |
551 | if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty)) |
552 | return VectorType::getTruncatedElementVectorType(VTy); |
553 | |
554 | IntegerType *ITy = cast<IntegerType>(Val: Ty); |
555 | assert(ITy->getBitWidth() % 2 == 0); |
556 | return IntegerType::get(C&: Context, NumBits: ITy->getBitWidth() / 2); |
557 | } |
558 | case IITDescriptor::Subdivide2Argument: |
559 | case IITDescriptor::Subdivide4Argument: { |
560 | Type *Ty = Tys[D.getArgumentNumber()]; |
561 | VectorType *VTy = dyn_cast<VectorType>(Val: Ty); |
562 | assert(VTy && "Expected an argument of Vector Type" ); |
563 | int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2; |
564 | return VectorType::getSubdividedVectorType(VTy, NumSubdivs: SubDivs); |
565 | } |
566 | case IITDescriptor::OneNthEltsVecArgument: |
567 | return VectorType::getOneNthElementsVectorType( |
568 | VTy: cast<VectorType>(Val: Tys[D.getRefArgNumber()]), Denominator: D.getVectorDivisor()); |
569 | case IITDescriptor::SameVecWidthArgument: { |
570 | Type *EltTy = DecodeFixedType(Infos, Tys, Context); |
571 | Type *Ty = Tys[D.getArgumentNumber()]; |
572 | if (auto *VTy = dyn_cast<VectorType>(Val: Ty)) |
573 | return VectorType::get(ElementType: EltTy, EC: VTy->getElementCount()); |
574 | return EltTy; |
575 | } |
576 | case IITDescriptor::VecElementArgument: { |
577 | Type *Ty = Tys[D.getArgumentNumber()]; |
578 | if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty)) |
579 | return VTy->getElementType(); |
580 | llvm_unreachable("Expected an argument of Vector Type" ); |
581 | } |
582 | case IITDescriptor::VecOfBitcastsToInt: { |
583 | Type *Ty = Tys[D.getArgumentNumber()]; |
584 | VectorType *VTy = dyn_cast<VectorType>(Val: Ty); |
585 | assert(VTy && "Expected an argument of Vector Type" ); |
586 | return VectorType::getInteger(VTy); |
587 | } |
588 | case IITDescriptor::VecOfAnyPtrsToElt: |
589 | // Return the overloaded type (which determines the pointers address space) |
590 | return Tys[D.getOverloadArgNumber()]; |
591 | } |
592 | llvm_unreachable("unhandled" ); |
593 | } |
594 | |
595 | FunctionType *Intrinsic::getType(LLVMContext &Context, ID id, |
596 | ArrayRef<Type *> Tys) { |
597 | SmallVector<IITDescriptor, 8> Table; |
598 | getIntrinsicInfoTableEntries(id, T&: Table); |
599 | |
600 | ArrayRef<IITDescriptor> TableRef = Table; |
601 | Type *ResultTy = DecodeFixedType(Infos&: TableRef, Tys, Context); |
602 | |
603 | SmallVector<Type *, 8> ArgTys; |
604 | while (!TableRef.empty()) |
605 | ArgTys.push_back(Elt: DecodeFixedType(Infos&: TableRef, Tys, Context)); |
606 | |
607 | // DecodeFixedType returns Void for IITDescriptor::Void and |
608 | // IITDescriptor::VarArg If we see void type as the type of the last argument, |
609 | // it is vararg intrinsic |
610 | if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) { |
611 | ArgTys.pop_back(); |
612 | return FunctionType::get(Result: ResultTy, Params: ArgTys, isVarArg: true); |
613 | } |
614 | return FunctionType::get(Result: ResultTy, Params: ArgTys, isVarArg: false); |
615 | } |
616 | |
617 | bool Intrinsic::isOverloaded(ID id) { |
618 | #define GET_INTRINSIC_OVERLOAD_TABLE |
619 | #include "llvm/IR/IntrinsicImpl.inc" |
620 | #undef GET_INTRINSIC_OVERLOAD_TABLE |
621 | } |
622 | |
623 | /// Table of per-target intrinsic name tables. |
624 | #define GET_INTRINSIC_TARGET_DATA |
625 | #include "llvm/IR/IntrinsicImpl.inc" |
626 | #undef GET_INTRINSIC_TARGET_DATA |
627 | |
628 | bool Intrinsic::isTargetIntrinsic(Intrinsic::ID IID) { |
629 | return IID > TargetInfos[0].Count; |
630 | } |
631 | |
632 | /// Looks up Name in NameTable via binary search. NameTable must be sorted |
633 | /// and all entries must start with "llvm.". If NameTable contains an exact |
634 | /// match for Name or a prefix of Name followed by a dot, its index in |
635 | /// NameTable is returned. Otherwise, -1 is returned. |
636 | static int lookupLLVMIntrinsicByName(ArrayRef<unsigned> NameOffsetTable, |
637 | StringRef Name, StringRef Target = "" ) { |
638 | assert(Name.starts_with("llvm." ) && "Unexpected intrinsic prefix" ); |
639 | assert(Name.drop_front(5).starts_with(Target) && "Unexpected target" ); |
640 | |
641 | // Do successive binary searches of the dotted name components. For |
642 | // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of |
643 | // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then |
644 | // "llvm.gc.experimental.statepoint", and then we will stop as the range is |
645 | // size 1. During the search, we can skip the prefix that we already know is |
646 | // identical. By using strncmp we consider names with differing suffixes to |
647 | // be part of the equal range. |
648 | size_t CmpEnd = 4; // Skip the "llvm" component. |
649 | if (!Target.empty()) |
650 | CmpEnd += 1 + Target.size(); // skip the .target component. |
651 | |
652 | const unsigned *Low = NameOffsetTable.begin(); |
653 | const unsigned *High = NameOffsetTable.end(); |
654 | const unsigned *LastLow = Low; |
655 | while (CmpEnd < Name.size() && High - Low > 0) { |
656 | size_t CmpStart = CmpEnd; |
657 | CmpEnd = Name.find(C: '.', From: CmpStart + 1); |
658 | CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd; |
659 | auto Cmp = [CmpStart, CmpEnd](auto LHS, auto RHS) { |
660 | // `equal_range` requires the comparison to work with either side being an |
661 | // offset or the value. Detect which kind each side is to set up the |
662 | // compared strings. |
663 | StringRef LHSStr; |
664 | if constexpr (std::is_integral_v<decltype(LHS)>) { |
665 | LHSStr = IntrinsicNameTable[LHS]; |
666 | } else { |
667 | LHSStr = LHS; |
668 | } |
669 | StringRef RHSStr; |
670 | if constexpr (std::is_integral_v<decltype(RHS)>) { |
671 | RHSStr = IntrinsicNameTable[RHS]; |
672 | } else { |
673 | RHSStr = RHS; |
674 | } |
675 | return strncmp(s1: LHSStr.data() + CmpStart, s2: RHSStr.data() + CmpStart, |
676 | n: CmpEnd - CmpStart) < 0; |
677 | }; |
678 | LastLow = Low; |
679 | std::tie(args&: Low, args&: High) = std::equal_range(first: Low, last: High, val: Name.data(), comp: Cmp); |
680 | } |
681 | if (High - Low > 0) |
682 | LastLow = Low; |
683 | |
684 | if (LastLow == NameOffsetTable.end()) |
685 | return -1; |
686 | StringRef NameFound = IntrinsicNameTable[*LastLow]; |
687 | if (Name == NameFound || |
688 | (Name.starts_with(Prefix: NameFound) && Name[NameFound.size()] == '.')) |
689 | return LastLow - NameOffsetTable.begin(); |
690 | return -1; |
691 | } |
692 | |
693 | /// Find the segment of \c IntrinsicNameOffsetTable for intrinsics with the same |
694 | /// target as \c Name, or the generic table if \c Name is not target specific. |
695 | /// |
696 | /// Returns the relevant slice of \c IntrinsicNameOffsetTable and the target |
697 | /// name. |
698 | static std::pair<ArrayRef<unsigned>, StringRef> |
699 | findTargetSubtable(StringRef Name) { |
700 | assert(Name.starts_with("llvm." )); |
701 | |
702 | ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos); |
703 | // Drop "llvm." and take the first dotted component. That will be the target |
704 | // if this is target specific. |
705 | StringRef Target = Name.drop_front(N: 5).split(Separator: '.').first; |
706 | auto It = partition_point( |
707 | Range&: Targets, P: [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; }); |
708 | // We've either found the target or just fall back to the generic set, which |
709 | // is always first. |
710 | const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0]; |
711 | return {ArrayRef(&IntrinsicNameOffsetTable[1] + TI.Offset, TI.Count), |
712 | TI.Name}; |
713 | } |
714 | |
715 | /// This does the actual lookup of an intrinsic ID which matches the given |
716 | /// function name. |
717 | Intrinsic::ID Intrinsic::lookupIntrinsicID(StringRef Name) { |
718 | auto [NameOffsetTable, Target] = findTargetSubtable(Name); |
719 | int Idx = lookupLLVMIntrinsicByName(NameOffsetTable, Name, Target); |
720 | if (Idx == -1) |
721 | return Intrinsic::not_intrinsic; |
722 | |
723 | // Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have |
724 | // an index into a sub-table. |
725 | int Adjust = NameOffsetTable.data() - IntrinsicNameOffsetTable; |
726 | Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust); |
727 | |
728 | // If the intrinsic is not overloaded, require an exact match. If it is |
729 | // overloaded, require either exact or prefix match. |
730 | const auto MatchSize = IntrinsicNameTable[NameOffsetTable[Idx]].size(); |
731 | assert(Name.size() >= MatchSize && "Expected either exact or prefix match" ); |
732 | bool IsExactMatch = Name.size() == MatchSize; |
733 | return IsExactMatch || Intrinsic::isOverloaded(id: ID) ? ID |
734 | : Intrinsic::not_intrinsic; |
735 | } |
736 | |
737 | /// This defines the "Intrinsic::getAttributes(ID id)" method. |
738 | #define GET_INTRINSIC_ATTRIBUTES |
739 | #include "llvm/IR/IntrinsicImpl.inc" |
740 | #undef GET_INTRINSIC_ATTRIBUTES |
741 | |
742 | AttributeSet Intrinsic::getFnAttributes(LLVMContext &C, ID id) { |
743 | if (id == 0) |
744 | return AttributeSet(); |
745 | uint16_t PackedID = IntrinsicsToAttributesMap[id - 1]; |
746 | uint8_t FnAttrID = PackedID >> 8; |
747 | return getIntrinsicFnAttributeSet(C, ID: FnAttrID); |
748 | } |
749 | |
750 | Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id, |
751 | ArrayRef<Type *> Tys) { |
752 | // There can never be multiple globals with the same name of different types, |
753 | // because intrinsics must be a specific type. |
754 | auto *FT = getType(Context&: M->getContext(), id, Tys); |
755 | return cast<Function>( |
756 | Val: M->getOrInsertFunction( |
757 | Name: Tys.empty() ? getName(id) : getName(Id: id, Tys, M, FT), T: FT) |
758 | .getCallee()); |
759 | } |
760 | |
761 | Function *Intrinsic::getDeclarationIfExists(const Module *M, ID id) { |
762 | return M->getFunction(Name: getName(id)); |
763 | } |
764 | |
765 | Function *Intrinsic::getDeclarationIfExists(Module *M, ID id, |
766 | ArrayRef<Type *> Tys, |
767 | FunctionType *FT) { |
768 | return M->getFunction(Name: getName(Id: id, Tys, M, FT)); |
769 | } |
770 | |
771 | // This defines the "Intrinsic::getIntrinsicForClangBuiltin()" method. |
772 | #define GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN |
773 | #include "llvm/IR/IntrinsicImpl.inc" |
774 | #undef GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN |
775 | |
776 | // This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method. |
777 | #define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN |
778 | #include "llvm/IR/IntrinsicImpl.inc" |
779 | #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN |
780 | |
781 | bool Intrinsic::isConstrainedFPIntrinsic(ID QID) { |
782 | switch (QID) { |
783 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
784 | case Intrinsic::INTRINSIC: |
785 | #include "llvm/IR/ConstrainedOps.def" |
786 | #undef INSTRUCTION |
787 | return true; |
788 | default: |
789 | return false; |
790 | } |
791 | } |
792 | |
793 | bool Intrinsic::hasConstrainedFPRoundingModeOperand(Intrinsic::ID QID) { |
794 | switch (QID) { |
795 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
796 | case Intrinsic::INTRINSIC: \ |
797 | return ROUND_MODE == 1; |
798 | #include "llvm/IR/ConstrainedOps.def" |
799 | #undef INSTRUCTION |
800 | default: |
801 | return false; |
802 | } |
803 | } |
804 | |
805 | using DeferredIntrinsicMatchPair = |
806 | std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>; |
807 | |
808 | static bool |
809 | matchIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos, |
810 | SmallVectorImpl<Type *> &ArgTys, |
811 | SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks, |
812 | bool IsDeferredCheck) { |
813 | using namespace Intrinsic; |
814 | |
815 | // If we ran out of descriptors, there are too many arguments. |
816 | if (Infos.empty()) |
817 | return true; |
818 | |
819 | // Do this before slicing off the 'front' part |
820 | auto InfosRef = Infos; |
821 | auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) { |
822 | DeferredChecks.emplace_back(Args&: T, Args&: InfosRef); |
823 | return false; |
824 | }; |
825 | |
826 | IITDescriptor D = Infos.front(); |
827 | Infos = Infos.slice(N: 1); |
828 | |
829 | switch (D.Kind) { |
830 | case IITDescriptor::Void: |
831 | return !Ty->isVoidTy(); |
832 | case IITDescriptor::VarArg: |
833 | return true; |
834 | case IITDescriptor::MMX: { |
835 | FixedVectorType *VT = dyn_cast<FixedVectorType>(Val: Ty); |
836 | return !VT || VT->getNumElements() != 1 || |
837 | !VT->getElementType()->isIntegerTy(Bitwidth: 64); |
838 | } |
839 | case IITDescriptor::AMX: |
840 | return !Ty->isX86_AMXTy(); |
841 | case IITDescriptor::Token: |
842 | return !Ty->isTokenTy(); |
843 | case IITDescriptor::Metadata: |
844 | return !Ty->isMetadataTy(); |
845 | case IITDescriptor::Half: |
846 | return !Ty->isHalfTy(); |
847 | case IITDescriptor::BFloat: |
848 | return !Ty->isBFloatTy(); |
849 | case IITDescriptor::Float: |
850 | return !Ty->isFloatTy(); |
851 | case IITDescriptor::Double: |
852 | return !Ty->isDoubleTy(); |
853 | case IITDescriptor::Quad: |
854 | return !Ty->isFP128Ty(); |
855 | case IITDescriptor::PPCQuad: |
856 | return !Ty->isPPC_FP128Ty(); |
857 | case IITDescriptor::Integer: |
858 | return !Ty->isIntegerTy(Bitwidth: D.Integer_Width); |
859 | case IITDescriptor::AArch64Svcount: |
860 | return !isa<TargetExtType>(Val: Ty) || |
861 | cast<TargetExtType>(Val: Ty)->getName() != "aarch64.svcount" ; |
862 | case IITDescriptor::Vector: { |
863 | VectorType *VT = dyn_cast<VectorType>(Val: Ty); |
864 | return !VT || VT->getElementCount() != D.Vector_Width || |
865 | matchIntrinsicType(Ty: VT->getElementType(), Infos, ArgTys, |
866 | DeferredChecks, IsDeferredCheck); |
867 | } |
868 | case IITDescriptor::Pointer: { |
869 | PointerType *PT = dyn_cast<PointerType>(Val: Ty); |
870 | return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace; |
871 | } |
872 | |
873 | case IITDescriptor::Struct: { |
874 | StructType *ST = dyn_cast<StructType>(Val: Ty); |
875 | if (!ST || !ST->isLiteral() || ST->isPacked() || |
876 | ST->getNumElements() != D.Struct_NumElements) |
877 | return true; |
878 | |
879 | for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) |
880 | if (matchIntrinsicType(Ty: ST->getElementType(N: i), Infos, ArgTys, |
881 | DeferredChecks, IsDeferredCheck)) |
882 | return true; |
883 | return false; |
884 | } |
885 | |
886 | case IITDescriptor::Argument: |
887 | // If this is the second occurrence of an argument, |
888 | // verify that the later instance matches the previous instance. |
889 | if (D.getArgumentNumber() < ArgTys.size()) |
890 | return Ty != ArgTys[D.getArgumentNumber()]; |
891 | |
892 | if (D.getArgumentNumber() > ArgTys.size() || |
893 | D.getArgumentKind() == IITDescriptor::AK_MatchType) |
894 | return IsDeferredCheck || DeferCheck(Ty); |
895 | |
896 | assert(D.getArgumentNumber() == ArgTys.size() && !IsDeferredCheck && |
897 | "Table consistency error" ); |
898 | ArgTys.push_back(Elt: Ty); |
899 | |
900 | switch (D.getArgumentKind()) { |
901 | case IITDescriptor::AK_Any: |
902 | return false; // Success |
903 | case IITDescriptor::AK_AnyInteger: |
904 | return !Ty->isIntOrIntVectorTy(); |
905 | case IITDescriptor::AK_AnyFloat: |
906 | return !Ty->isFPOrFPVectorTy(); |
907 | case IITDescriptor::AK_AnyVector: |
908 | return !isa<VectorType>(Val: Ty); |
909 | case IITDescriptor::AK_AnyPointer: |
910 | return !isa<PointerType>(Val: Ty); |
911 | default: |
912 | break; |
913 | } |
914 | llvm_unreachable("all argument kinds not covered" ); |
915 | |
916 | case IITDescriptor::ExtendArgument: { |
917 | // If this is a forward reference, defer the check for later. |
918 | if (D.getArgumentNumber() >= ArgTys.size()) |
919 | return IsDeferredCheck || DeferCheck(Ty); |
920 | |
921 | Type *NewTy = ArgTys[D.getArgumentNumber()]; |
922 | if (VectorType *VTy = dyn_cast<VectorType>(Val: NewTy)) |
923 | NewTy = VectorType::getExtendedElementVectorType(VTy); |
924 | else if (IntegerType *ITy = dyn_cast<IntegerType>(Val: NewTy)) |
925 | NewTy = IntegerType::get(C&: ITy->getContext(), NumBits: 2 * ITy->getBitWidth()); |
926 | else |
927 | return true; |
928 | |
929 | return Ty != NewTy; |
930 | } |
931 | case IITDescriptor::TruncArgument: { |
932 | // If this is a forward reference, defer the check for later. |
933 | if (D.getArgumentNumber() >= ArgTys.size()) |
934 | return IsDeferredCheck || DeferCheck(Ty); |
935 | |
936 | Type *NewTy = ArgTys[D.getArgumentNumber()]; |
937 | if (VectorType *VTy = dyn_cast<VectorType>(Val: NewTy)) |
938 | NewTy = VectorType::getTruncatedElementVectorType(VTy); |
939 | else if (IntegerType *ITy = dyn_cast<IntegerType>(Val: NewTy)) |
940 | NewTy = IntegerType::get(C&: ITy->getContext(), NumBits: ITy->getBitWidth() / 2); |
941 | else |
942 | return true; |
943 | |
944 | return Ty != NewTy; |
945 | } |
946 | case IITDescriptor::OneNthEltsVecArgument: |
947 | // If this is a forward reference, defer the check for later. |
948 | if (D.getRefArgNumber() >= ArgTys.size()) |
949 | return IsDeferredCheck || DeferCheck(Ty); |
950 | return !isa<VectorType>(Val: ArgTys[D.getRefArgNumber()]) || |
951 | VectorType::getOneNthElementsVectorType( |
952 | VTy: cast<VectorType>(Val: ArgTys[D.getRefArgNumber()]), |
953 | Denominator: D.getVectorDivisor()) != Ty; |
954 | case IITDescriptor::SameVecWidthArgument: { |
955 | if (D.getArgumentNumber() >= ArgTys.size()) { |
956 | // Defer check and subsequent check for the vector element type. |
957 | Infos = Infos.slice(N: 1); |
958 | return IsDeferredCheck || DeferCheck(Ty); |
959 | } |
960 | auto *ReferenceType = dyn_cast<VectorType>(Val: ArgTys[D.getArgumentNumber()]); |
961 | auto *ThisArgType = dyn_cast<VectorType>(Val: Ty); |
962 | // Both must be vectors of the same number of elements or neither. |
963 | if ((ReferenceType != nullptr) != (ThisArgType != nullptr)) |
964 | return true; |
965 | Type *EltTy = Ty; |
966 | if (ThisArgType) { |
967 | if (ReferenceType->getElementCount() != ThisArgType->getElementCount()) |
968 | return true; |
969 | EltTy = ThisArgType->getElementType(); |
970 | } |
971 | return matchIntrinsicType(Ty: EltTy, Infos, ArgTys, DeferredChecks, |
972 | IsDeferredCheck); |
973 | } |
974 | case IITDescriptor::VecOfAnyPtrsToElt: { |
975 | unsigned RefArgNumber = D.getRefArgNumber(); |
976 | if (RefArgNumber >= ArgTys.size()) { |
977 | if (IsDeferredCheck) |
978 | return true; |
979 | // If forward referencing, already add the pointer-vector type and |
980 | // defer the checks for later. |
981 | ArgTys.push_back(Elt: Ty); |
982 | return DeferCheck(Ty); |
983 | } |
984 | |
985 | if (!IsDeferredCheck) { |
986 | assert(D.getOverloadArgNumber() == ArgTys.size() && |
987 | "Table consistency error" ); |
988 | ArgTys.push_back(Elt: Ty); |
989 | } |
990 | |
991 | // Verify the overloaded type "matches" the Ref type. |
992 | // i.e. Ty is a vector with the same width as Ref. |
993 | // Composed of pointers to the same element type as Ref. |
994 | auto *ReferenceType = dyn_cast<VectorType>(Val: ArgTys[RefArgNumber]); |
995 | auto *ThisArgVecTy = dyn_cast<VectorType>(Val: Ty); |
996 | if (!ThisArgVecTy || !ReferenceType || |
997 | (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount())) |
998 | return true; |
999 | return !ThisArgVecTy->getElementType()->isPointerTy(); |
1000 | } |
1001 | case IITDescriptor::VecElementArgument: { |
1002 | if (D.getArgumentNumber() >= ArgTys.size()) |
1003 | return IsDeferredCheck ? true : DeferCheck(Ty); |
1004 | auto *ReferenceType = dyn_cast<VectorType>(Val: ArgTys[D.getArgumentNumber()]); |
1005 | return !ReferenceType || Ty != ReferenceType->getElementType(); |
1006 | } |
1007 | case IITDescriptor::Subdivide2Argument: |
1008 | case IITDescriptor::Subdivide4Argument: { |
1009 | // If this is a forward reference, defer the check for later. |
1010 | if (D.getArgumentNumber() >= ArgTys.size()) |
1011 | return IsDeferredCheck || DeferCheck(Ty); |
1012 | |
1013 | Type *NewTy = ArgTys[D.getArgumentNumber()]; |
1014 | if (auto *VTy = dyn_cast<VectorType>(Val: NewTy)) { |
1015 | int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2; |
1016 | NewTy = VectorType::getSubdividedVectorType(VTy, NumSubdivs: SubDivs); |
1017 | return Ty != NewTy; |
1018 | } |
1019 | return true; |
1020 | } |
1021 | case IITDescriptor::VecOfBitcastsToInt: { |
1022 | if (D.getArgumentNumber() >= ArgTys.size()) |
1023 | return IsDeferredCheck || DeferCheck(Ty); |
1024 | auto *ReferenceType = dyn_cast<VectorType>(Val: ArgTys[D.getArgumentNumber()]); |
1025 | auto *ThisArgVecTy = dyn_cast<VectorType>(Val: Ty); |
1026 | if (!ThisArgVecTy || !ReferenceType) |
1027 | return true; |
1028 | return ThisArgVecTy != VectorType::getInteger(VTy: ReferenceType); |
1029 | } |
1030 | } |
1031 | llvm_unreachable("unhandled" ); |
1032 | } |
1033 | |
1034 | Intrinsic::MatchIntrinsicTypesResult |
1035 | Intrinsic::matchIntrinsicSignature(FunctionType *FTy, |
1036 | ArrayRef<Intrinsic::IITDescriptor> &Infos, |
1037 | SmallVectorImpl<Type *> &ArgTys) { |
1038 | SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks; |
1039 | if (matchIntrinsicType(Ty: FTy->getReturnType(), Infos, ArgTys, DeferredChecks, |
1040 | IsDeferredCheck: false)) |
1041 | return MatchIntrinsicTypes_NoMatchRet; |
1042 | |
1043 | unsigned NumDeferredReturnChecks = DeferredChecks.size(); |
1044 | |
1045 | for (auto *Ty : FTy->params()) |
1046 | if (matchIntrinsicType(Ty, Infos, ArgTys, DeferredChecks, IsDeferredCheck: false)) |
1047 | return MatchIntrinsicTypes_NoMatchArg; |
1048 | |
1049 | for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) { |
1050 | DeferredIntrinsicMatchPair &Check = DeferredChecks[I]; |
1051 | if (matchIntrinsicType(Ty: Check.first, Infos&: Check.second, ArgTys, DeferredChecks, |
1052 | IsDeferredCheck: true)) |
1053 | return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet |
1054 | : MatchIntrinsicTypes_NoMatchArg; |
1055 | } |
1056 | |
1057 | return MatchIntrinsicTypes_Match; |
1058 | } |
1059 | |
1060 | bool Intrinsic::matchIntrinsicVarArg( |
1061 | bool isVarArg, ArrayRef<Intrinsic::IITDescriptor> &Infos) { |
1062 | // If there are no descriptors left, then it can't be a vararg. |
1063 | if (Infos.empty()) |
1064 | return isVarArg; |
1065 | |
1066 | // There should be only one descriptor remaining at this point. |
1067 | if (Infos.size() != 1) |
1068 | return true; |
1069 | |
1070 | // Check and verify the descriptor. |
1071 | IITDescriptor D = Infos.front(); |
1072 | Infos = Infos.slice(N: 1); |
1073 | if (D.Kind == IITDescriptor::VarArg) |
1074 | return !isVarArg; |
1075 | |
1076 | return true; |
1077 | } |
1078 | |
1079 | bool Intrinsic::getIntrinsicSignature(Intrinsic::ID ID, FunctionType *FT, |
1080 | SmallVectorImpl<Type *> &ArgTys) { |
1081 | if (!ID) |
1082 | return false; |
1083 | |
1084 | SmallVector<Intrinsic::IITDescriptor, 8> Table; |
1085 | getIntrinsicInfoTableEntries(id: ID, T&: Table); |
1086 | ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; |
1087 | |
1088 | if (Intrinsic::matchIntrinsicSignature(FTy: FT, Infos&: TableRef, ArgTys) != |
1089 | Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) { |
1090 | return false; |
1091 | } |
1092 | if (Intrinsic::matchIntrinsicVarArg(isVarArg: FT->isVarArg(), Infos&: TableRef)) |
1093 | return false; |
1094 | return true; |
1095 | } |
1096 | |
1097 | bool Intrinsic::getIntrinsicSignature(Function *F, |
1098 | SmallVectorImpl<Type *> &ArgTys) { |
1099 | return getIntrinsicSignature(ID: F->getIntrinsicID(), FT: F->getFunctionType(), |
1100 | ArgTys); |
1101 | } |
1102 | |
1103 | std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) { |
1104 | SmallVector<Type *, 4> ArgTys; |
1105 | if (!getIntrinsicSignature(F, ArgTys)) |
1106 | return std::nullopt; |
1107 | |
1108 | Intrinsic::ID ID = F->getIntrinsicID(); |
1109 | StringRef Name = F->getName(); |
1110 | std::string WantedName = |
1111 | Intrinsic::getName(Id: ID, Tys: ArgTys, M: F->getParent(), FT: F->getFunctionType()); |
1112 | if (Name == WantedName) |
1113 | return std::nullopt; |
1114 | |
1115 | Function *NewDecl = [&] { |
1116 | if (auto *ExistingGV = F->getParent()->getNamedValue(Name: WantedName)) { |
1117 | if (auto *ExistingF = dyn_cast<Function>(Val: ExistingGV)) |
1118 | if (ExistingF->getFunctionType() == F->getFunctionType()) |
1119 | return ExistingF; |
1120 | |
1121 | // The name already exists, but is not a function or has the wrong |
1122 | // prototype. Make place for the new one by renaming the old version. |
1123 | // Either this old version will be removed later on or the module is |
1124 | // invalid and we'll get an error. |
1125 | ExistingGV->setName(WantedName + ".renamed" ); |
1126 | } |
1127 | return Intrinsic::getOrInsertDeclaration(M: F->getParent(), id: ID, Tys: ArgTys); |
1128 | }(); |
1129 | |
1130 | NewDecl->setCallingConv(F->getCallingConv()); |
1131 | assert(NewDecl->getFunctionType() == F->getFunctionType() && |
1132 | "Shouldn't change the signature" ); |
1133 | return NewDecl; |
1134 | } |
1135 | |