1//===-- Intrinsics.cpp - Intrinsic Function Handling ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements functions required for supporting intrinsic functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Intrinsics.h"
14#include "llvm/ADT/StringExtras.h"
15#include "llvm/ADT/StringTable.h"
16#include "llvm/IR/ConstantRange.h"
17#include "llvm/IR/Function.h"
18#include "llvm/IR/IntrinsicsAArch64.h"
19#include "llvm/IR/IntrinsicsAMDGPU.h"
20#include "llvm/IR/IntrinsicsARM.h"
21#include "llvm/IR/IntrinsicsBPF.h"
22#include "llvm/IR/IntrinsicsHexagon.h"
23#include "llvm/IR/IntrinsicsLoongArch.h"
24#include "llvm/IR/IntrinsicsMips.h"
25#include "llvm/IR/IntrinsicsNVPTX.h"
26#include "llvm/IR/IntrinsicsPowerPC.h"
27#include "llvm/IR/IntrinsicsR600.h"
28#include "llvm/IR/IntrinsicsRISCV.h"
29#include "llvm/IR/IntrinsicsS390.h"
30#include "llvm/IR/IntrinsicsSPIRV.h"
31#include "llvm/IR/IntrinsicsVE.h"
32#include "llvm/IR/IntrinsicsX86.h"
33#include "llvm/IR/IntrinsicsXCore.h"
34#include "llvm/IR/Module.h"
35#include "llvm/IR/NVVMIntrinsicUtils.h"
36#include "llvm/IR/Type.h"
37
38using namespace llvm;
39
40/// Table of string intrinsic names indexed by enum value.
41#define GET_INTRINSIC_NAME_TABLE
42#include "llvm/IR/IntrinsicImpl.inc"
43
44StringRef Intrinsic::getBaseName(ID id) {
45 assert(id < num_intrinsics && "Invalid intrinsic ID!");
46 return IntrinsicNameTable[IntrinsicNameOffsetTable[id]];
47}
48
49StringRef Intrinsic::getName(ID id) {
50 assert(id < num_intrinsics && "Invalid intrinsic ID!");
51 assert(!Intrinsic::isOverloaded(id) &&
52 "This version of getName does not support overloading");
53 return getBaseName(id);
54}
55
56/// Returns a stable mangling for the type specified for use in the name
57/// mangling scheme used by 'any' types in intrinsic signatures. The mangling
58/// of named types is simply their name. Manglings for unnamed types consist
59/// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions)
60/// combined with the mangling of their component types. A vararg function
61/// type will have a suffix of 'vararg'. Since function types can contain
62/// other function types, we close a function type mangling with suffix 'f'
63/// which can't be confused with it's prefix. This ensures we don't have
64/// collisions between two unrelated function types. Otherwise, you might
65/// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.)
66/// The HasUnnamedType boolean is set if an unnamed type was encountered,
67/// indicating that extra care must be taken to ensure a unique name.
68static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) {
69 std::string Result;
70 if (PointerType *PTyp = dyn_cast<PointerType>(Val: Ty)) {
71 Result += "p" + utostr(X: PTyp->getAddressSpace());
72 } else if (ArrayType *ATyp = dyn_cast<ArrayType>(Val: Ty)) {
73 Result += "a" + utostr(X: ATyp->getNumElements()) +
74 getMangledTypeStr(Ty: ATyp->getElementType(), HasUnnamedType);
75 } else if (StructType *STyp = dyn_cast<StructType>(Val: Ty)) {
76 if (!STyp->isLiteral()) {
77 Result += "s_";
78 if (STyp->hasName())
79 Result += STyp->getName();
80 else
81 HasUnnamedType = true;
82 } else {
83 Result += "sl_";
84 for (auto *Elem : STyp->elements())
85 Result += getMangledTypeStr(Ty: Elem, HasUnnamedType);
86 }
87 // Ensure nested structs are distinguishable.
88 Result += "s";
89 } else if (FunctionType *FT = dyn_cast<FunctionType>(Val: Ty)) {
90 Result += "f_" + getMangledTypeStr(Ty: FT->getReturnType(), HasUnnamedType);
91 for (size_t i = 0; i < FT->getNumParams(); i++)
92 Result += getMangledTypeStr(Ty: FT->getParamType(i), HasUnnamedType);
93 if (FT->isVarArg())
94 Result += "vararg";
95 // Ensure nested function types are distinguishable.
96 Result += "f";
97 } else if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty)) {
98 ElementCount EC = VTy->getElementCount();
99 if (EC.isScalable())
100 Result += "nx";
101 Result += "v" + utostr(X: EC.getKnownMinValue()) +
102 getMangledTypeStr(Ty: VTy->getElementType(), HasUnnamedType);
103 } else if (TargetExtType *TETy = dyn_cast<TargetExtType>(Val: Ty)) {
104 Result += "t";
105 Result += TETy->getName();
106 for (Type *ParamTy : TETy->type_params())
107 Result += "_" + getMangledTypeStr(Ty: ParamTy, HasUnnamedType);
108 for (unsigned IntParam : TETy->int_params())
109 Result += "_" + utostr(X: IntParam);
110 // Ensure nested target extension types are distinguishable.
111 Result += "t";
112 } else if (Ty) {
113 switch (Ty->getTypeID()) {
114 default:
115 llvm_unreachable("Unhandled type");
116 case Type::VoidTyID:
117 Result += "isVoid";
118 break;
119 case Type::MetadataTyID:
120 Result += "Metadata";
121 break;
122 case Type::HalfTyID:
123 Result += "f16";
124 break;
125 case Type::BFloatTyID:
126 Result += "bf16";
127 break;
128 case Type::FloatTyID:
129 Result += "f32";
130 break;
131 case Type::DoubleTyID:
132 Result += "f64";
133 break;
134 case Type::X86_FP80TyID:
135 Result += "f80";
136 break;
137 case Type::FP128TyID:
138 Result += "f128";
139 break;
140 case Type::PPC_FP128TyID:
141 Result += "ppcf128";
142 break;
143 case Type::X86_AMXTyID:
144 Result += "x86amx";
145 break;
146 case Type::IntegerTyID:
147 Result += "i" + utostr(X: cast<IntegerType>(Val: Ty)->getBitWidth());
148 break;
149 case Type::ByteTyID:
150 Result += "b" + utostr(X: cast<ByteType>(Val: Ty)->getBitWidth());
151 break;
152 }
153 }
154 return Result;
155}
156
157static std::string getIntrinsicNameImpl(Intrinsic::ID Id,
158 ArrayRef<Type *> OverloadTys, Module *M,
159 FunctionType *FT,
160 bool EarlyModuleCheck) {
161
162 assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!");
163 assert((OverloadTys.empty() || Intrinsic::isOverloaded(Id)) &&
164 "This version of getName is for overloaded intrinsics only");
165 (void)EarlyModuleCheck;
166 assert((!EarlyModuleCheck || M ||
167 !any_of(OverloadTys, llvm::IsaPred<PointerType>)) &&
168 "Intrinsic overloading on pointer types need to provide a Module");
169 bool HasUnnamedType = false;
170 std::string Result(Intrinsic::getBaseName(id: Id));
171 for (Type *Ty : OverloadTys)
172 Result += "." + getMangledTypeStr(Ty, HasUnnamedType);
173 if (HasUnnamedType) {
174 assert(M && "unnamed types need a module");
175 if (!FT)
176 FT = Intrinsic::getType(Context&: M->getContext(), id: Id, OverloadTys);
177 else
178 assert(FT == Intrinsic::getType(M->getContext(), Id, OverloadTys) &&
179 "Provided FunctionType must match arguments");
180 return M->getUniqueIntrinsicName(BaseName: Result, Id, Proto: FT);
181 }
182 return Result;
183}
184
185std::string Intrinsic::getName(ID Id, ArrayRef<Type *> OverloadTys, Module *M,
186 FunctionType *FT) {
187 assert(M && "We need to have a Module");
188 return getIntrinsicNameImpl(Id, OverloadTys, M, FT, EarlyModuleCheck: true);
189}
190
191std::string Intrinsic::getNameNoUnnamedTypes(ID Id,
192 ArrayRef<Type *> OverloadTys) {
193 return getIntrinsicNameImpl(Id, OverloadTys, M: nullptr, FT: nullptr, EarlyModuleCheck: false);
194}
195
196/// IIT_Info - These are enumerators that describe the entries returned by the
197/// getIntrinsicInfoTableEntries function.
198///
199/// Defined in Intrinsics.td.
200enum IIT_Info {
201#define GET_INTRINSIC_IITINFO
202#include "llvm/IR/IntrinsicImpl.inc"
203};
204
205static void
206DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
207 IIT_Info LastInfo,
208 SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
209 using namespace Intrinsic;
210
211 bool IsScalableVector = LastInfo == IIT_SCALABLE_VEC;
212
213 IIT_Info Info = IIT_Info(Infos[NextElt++]);
214
215 switch (Info) {
216 case IIT_Done:
217 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Void, Field: 0));
218 return;
219 case IIT_VARARG:
220 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::VarArg, Field: 0));
221 return;
222 case IIT_MMX:
223 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::MMX, Field: 0));
224 return;
225 case IIT_AMX:
226 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::AMX, Field: 0));
227 return;
228 case IIT_TOKEN:
229 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Token, Field: 0));
230 return;
231 case IIT_METADATA:
232 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Metadata, Field: 0));
233 return;
234 case IIT_F16:
235 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Half, Field: 0));
236 return;
237 case IIT_BF16:
238 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::BFloat, Field: 0));
239 return;
240 case IIT_F32:
241 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Float, Field: 0));
242 return;
243 case IIT_F64:
244 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Double, Field: 0));
245 return;
246 case IIT_F128:
247 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Quad, Field: 0));
248 return;
249 case IIT_PPCF128:
250 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::PPCQuad, Field: 0));
251 return;
252 case IIT_I1:
253 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 1));
254 return;
255 case IIT_I2:
256 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 2));
257 return;
258 case IIT_I4:
259 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 4));
260 return;
261 case IIT_AARCH64_SVCOUNT:
262 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::AArch64Svcount, Field: 0));
263 return;
264 case IIT_I8:
265 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 8));
266 return;
267 case IIT_I16:
268 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 16));
269 return;
270 case IIT_I32:
271 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 32));
272 return;
273 case IIT_I64:
274 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 64));
275 return;
276 case IIT_I128:
277 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Integer, Field: 128));
278 return;
279 case IIT_V1:
280 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 1, IsScalable: IsScalableVector));
281 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
282 return;
283 case IIT_V2:
284 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 2, IsScalable: IsScalableVector));
285 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
286 return;
287 case IIT_V3:
288 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 3, IsScalable: IsScalableVector));
289 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
290 return;
291 case IIT_V4:
292 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 4, IsScalable: IsScalableVector));
293 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
294 return;
295 case IIT_V6:
296 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 6, IsScalable: IsScalableVector));
297 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
298 return;
299 case IIT_V8:
300 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 8, IsScalable: IsScalableVector));
301 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
302 return;
303 case IIT_V10:
304 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 10, IsScalable: IsScalableVector));
305 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
306 return;
307 case IIT_V16:
308 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 16, IsScalable: IsScalableVector));
309 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
310 return;
311 case IIT_V32:
312 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 32, IsScalable: IsScalableVector));
313 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
314 return;
315 case IIT_V64:
316 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 64, IsScalable: IsScalableVector));
317 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
318 return;
319 case IIT_V128:
320 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 128, IsScalable: IsScalableVector));
321 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
322 return;
323 case IIT_V256:
324 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 256, IsScalable: IsScalableVector));
325 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
326 return;
327 case IIT_V512:
328 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 512, IsScalable: IsScalableVector));
329 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
330 return;
331 case IIT_V1024:
332 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 1024, IsScalable: IsScalableVector));
333 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
334 return;
335 case IIT_V2048:
336 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 2048, IsScalable: IsScalableVector));
337 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
338 return;
339 case IIT_V4096:
340 OutputTable.push_back(Elt: IITDescriptor::getVector(Width: 4096, IsScalable: IsScalableVector));
341 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
342 return;
343 case IIT_EXTERNREF:
344 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 10));
345 return;
346 case IIT_FUNCREF:
347 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 20));
348 return;
349 case IIT_PTR:
350 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: 0));
351 return;
352 case IIT_PTR_AS: // pointer with address space.
353 OutputTable.push_back(
354 Elt: IITDescriptor::get(K: IITDescriptor::Pointer, Field: Infos[NextElt++]));
355 return;
356 case IIT_ANY: {
357 unsigned OverloadInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
358 OutputTable.push_back(
359 Elt: IITDescriptor::get(K: IITDescriptor::Overloaded, Field: OverloadInfo));
360 return;
361 }
362 case IIT_EXTEND_ARG: {
363 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
364 OutputTable.push_back(
365 Elt: IITDescriptor::get(K: IITDescriptor::Extend, Field: OverloadIndex));
366 return;
367 }
368 case IIT_TRUNC_ARG: {
369 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
370 OutputTable.push_back(
371 Elt: IITDescriptor::get(K: IITDescriptor::Trunc, Field: OverloadIndex));
372 return;
373 }
374 case IIT_ONE_NTH_ELTS_VEC_ARG: {
375 unsigned short OverloadIndex =
376 (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
377 unsigned short N = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
378 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::OneNthEltsVec,
379 /*Hi=*/N, /*Lo=*/OverloadIndex));
380 return;
381 }
382 case IIT_SAME_VEC_WIDTH_ARG: {
383 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
384 OutputTable.push_back(
385 Elt: IITDescriptor::get(K: IITDescriptor::SameVecWidth, Field: OverloadIndex));
386 return;
387 }
388 case IIT_VEC_OF_ANYPTRS_TO_ELT: {
389 unsigned short OverloadIndex =
390 (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
391 unsigned short RefOverloadIndex =
392 (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
393 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::VecOfAnyPtrsToElt,
394 /*Hi=*/RefOverloadIndex,
395 /*Lo=*/OverloadIndex));
396 return;
397 }
398 case IIT_EMPTYSTRUCT:
399 OutputTable.push_back(Elt: IITDescriptor::get(K: IITDescriptor::Struct, Field: 0));
400 return;
401 case IIT_STRUCT: {
402 unsigned StructElts = Infos[NextElt++] + 2;
403
404 OutputTable.push_back(
405 Elt: IITDescriptor::get(K: IITDescriptor::Struct, Field: StructElts));
406
407 for (unsigned i = 0; i != StructElts; ++i)
408 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
409 return;
410 }
411 case IIT_SUBDIVIDE2_ARG: {
412 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
413 OutputTable.push_back(
414 Elt: IITDescriptor::get(K: IITDescriptor::Subdivide2, Field: OverloadIndex));
415 return;
416 }
417 case IIT_SUBDIVIDE4_ARG: {
418 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
419 OutputTable.push_back(
420 Elt: IITDescriptor::get(K: IITDescriptor::Subdivide4, Field: OverloadIndex));
421 return;
422 }
423 case IIT_VEC_ELEMENT: {
424 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
425 OutputTable.push_back(
426 Elt: IITDescriptor::get(K: IITDescriptor::VecElement, Field: OverloadIndex));
427 return;
428 }
429 case IIT_SCALABLE_VEC: {
430 DecodeIITType(NextElt, Infos, LastInfo: Info, OutputTable);
431 return;
432 }
433 case IIT_VEC_OF_BITCASTS_TO_INT: {
434 unsigned OverloadIndex = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
435 OutputTable.push_back(
436 Elt: IITDescriptor::get(K: IITDescriptor::VecOfBitcastsToInt, Field: OverloadIndex));
437 return;
438 }
439 }
440 llvm_unreachable("unhandled");
441}
442
443#define GET_INTRINSIC_GENERATOR_GLOBAL
444#include "llvm/IR/IntrinsicImpl.inc"
445
446void Intrinsic::getIntrinsicInfoTableEntries(
447 ID id, SmallVectorImpl<IITDescriptor> &T) {
448 // Note that `FixedEncodingTy` is defined in IntrinsicImpl.inc and can be
449 // uint16_t or uint32_t based on the the value of `Use16BitFixedEncoding` in
450 // IntrinsicEmitter.cpp.
451 constexpr unsigned FixedEncodingBits = sizeof(FixedEncodingTy) * CHAR_BIT;
452 constexpr unsigned MSBPosition = FixedEncodingBits - 1;
453 // Mask with all bits 1 except the most significant bit.
454 constexpr unsigned Mask = (1U << MSBPosition) - 1;
455
456 FixedEncodingTy TableVal = IIT_Table[id - 1];
457
458 // Array to hold the inlined fixed encoding values expanded from nibbles to
459 // bytes. Its size can be be atmost FixedEncodingBits / 4 i.e., number
460 // of nibbles that can fit in `FixedEncodingTy`.
461 unsigned char IITValues[FixedEncodingBits / 4];
462
463 ArrayRef<unsigned char> IITEntries;
464 unsigned NextElt = 0;
465 // Check to see if the intrinsic's type was inlined in the fixed encoding
466 // table.
467 if (TableVal >> MSBPosition) {
468 // This is an offset into the IIT_LongEncodingTable.
469 IITEntries = IIT_LongEncodingTable;
470
471 // Strip sentinel bit.
472 NextElt = TableVal & Mask;
473 } else {
474 // If the entry was encoded into a single word in the table itself, decode
475 // it from an array of nibbles to an array of bytes.
476 do {
477 IITValues[NextElt++] = TableVal & 0xF;
478 TableVal >>= 4;
479 } while (TableVal);
480
481 IITEntries = ArrayRef(IITValues).take_front(N: NextElt);
482 NextElt = 0;
483 }
484
485 // Okay, decode the table into the output vector of IITDescriptors.
486 DecodeIITType(NextElt, Infos: IITEntries, LastInfo: IIT_Done, OutputTable&: T);
487 while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0)
488 DecodeIITType(NextElt, Infos: IITEntries, LastInfo: IIT_Done, OutputTable&: T);
489}
490
491static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
492 ArrayRef<Type *> OverloadTys,
493 LLVMContext &Context) {
494 using namespace Intrinsic;
495
496 IITDescriptor D = Infos.front();
497 Infos = Infos.slice(N: 1);
498
499 switch (D.Kind) {
500 case IITDescriptor::Void:
501 return Type::getVoidTy(C&: Context);
502 case IITDescriptor::VarArg:
503 return Type::getVoidTy(C&: Context);
504 case IITDescriptor::MMX:
505 return llvm::FixedVectorType::get(ElementType: llvm::IntegerType::get(C&: Context, NumBits: 64), NumElts: 1);
506 case IITDescriptor::AMX:
507 return Type::getX86_AMXTy(C&: Context);
508 case IITDescriptor::Token:
509 return Type::getTokenTy(C&: Context);
510 case IITDescriptor::Metadata:
511 return Type::getMetadataTy(C&: Context);
512 case IITDescriptor::Half:
513 return Type::getHalfTy(C&: Context);
514 case IITDescriptor::BFloat:
515 return Type::getBFloatTy(C&: Context);
516 case IITDescriptor::Float:
517 return Type::getFloatTy(C&: Context);
518 case IITDescriptor::Double:
519 return Type::getDoubleTy(C&: Context);
520 case IITDescriptor::Quad:
521 return Type::getFP128Ty(C&: Context);
522 case IITDescriptor::PPCQuad:
523 return Type::getPPC_FP128Ty(C&: Context);
524 case IITDescriptor::AArch64Svcount:
525 return TargetExtType::get(Context, Name: "aarch64.svcount");
526
527 case IITDescriptor::Integer:
528 return IntegerType::get(C&: Context, NumBits: D.IntegerWidth);
529 case IITDescriptor::Vector:
530 return VectorType::get(ElementType: DecodeFixedType(Infos, OverloadTys, Context),
531 EC: D.VectorWidth);
532 case IITDescriptor::Pointer:
533 return PointerType::get(C&: Context, AddressSpace: D.PointerAddressSpace);
534 case IITDescriptor::Struct: {
535 SmallVector<Type *, 8> Elts;
536 for (unsigned i = 0, e = D.StructNumElements; i != e; ++i)
537 Elts.push_back(Elt: DecodeFixedType(Infos, OverloadTys, Context));
538 return StructType::get(Context, Elements: Elts);
539 }
540 // For any overload kind or partially dependent type, substitute it with the
541 // corresponding concrete type from OverloadTys.
542 case IITDescriptor::Overloaded:
543 case IITDescriptor::VecOfAnyPtrsToElt:
544 return OverloadTys[D.getOverloadIndex()];
545 case IITDescriptor::Extend: {
546 Type *Ty = OverloadTys[D.getOverloadIndex()];
547 if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty))
548 return VectorType::getExtendedElementVectorType(VTy);
549
550 return IntegerType::get(C&: Context, NumBits: 2 * cast<IntegerType>(Val: Ty)->getBitWidth());
551 }
552 case IITDescriptor::Trunc: {
553 Type *Ty = OverloadTys[D.getOverloadIndex()];
554 if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty))
555 return VectorType::getTruncatedElementVectorType(VTy);
556
557 IntegerType *ITy = cast<IntegerType>(Val: Ty);
558 assert(ITy->getBitWidth() % 2 == 0);
559 return IntegerType::get(C&: Context, NumBits: ITy->getBitWidth() / 2);
560 }
561 case IITDescriptor::Subdivide2:
562 case IITDescriptor::Subdivide4: {
563 Type *Ty = OverloadTys[D.getOverloadIndex()];
564 VectorType *VTy = dyn_cast<VectorType>(Val: Ty);
565 assert(VTy && "Expected overload type to be a Vector Type");
566 int SubDivs = D.Kind == IITDescriptor::Subdivide2 ? 1 : 2;
567 return VectorType::getSubdividedVectorType(VTy, NumSubdivs: SubDivs);
568 }
569 case IITDescriptor::OneNthEltsVec:
570 return VectorType::getOneNthElementsVectorType(
571 VTy: cast<VectorType>(Val: OverloadTys[D.getOverloadIndex()]),
572 Denominator: D.getVectorDivisor());
573 case IITDescriptor::SameVecWidth: {
574 Type *EltTy = DecodeFixedType(Infos, OverloadTys, Context);
575 Type *Ty = OverloadTys[D.getOverloadIndex()];
576 if (auto *VTy = dyn_cast<VectorType>(Val: Ty))
577 return VectorType::get(ElementType: EltTy, EC: VTy->getElementCount());
578 return EltTy;
579 }
580 case IITDescriptor::VecElement: {
581 Type *Ty = OverloadTys[D.getOverloadIndex()];
582 if (VectorType *VTy = dyn_cast<VectorType>(Val: Ty))
583 return VTy->getElementType();
584 llvm_unreachable("Expected overload type to be a Vector Type");
585 }
586 case IITDescriptor::VecOfBitcastsToInt: {
587 Type *Ty = OverloadTys[D.getOverloadIndex()];
588 VectorType *VTy = dyn_cast<VectorType>(Val: Ty);
589 assert(VTy && "Expected overload type to be a Vector Type");
590 return VectorType::getInteger(VTy);
591 }
592 }
593 llvm_unreachable("unhandled");
594}
595
596FunctionType *Intrinsic::getType(LLVMContext &Context, ID id,
597 ArrayRef<Type *> OverloadTys) {
598 SmallVector<IITDescriptor, 8> Table;
599 getIntrinsicInfoTableEntries(id, T&: Table);
600
601 ArrayRef<IITDescriptor> TableRef = Table;
602 Type *ResultTy = DecodeFixedType(Infos&: TableRef, OverloadTys, Context);
603
604 SmallVector<Type *, 8> ArgTys;
605 while (!TableRef.empty())
606 ArgTys.push_back(Elt: DecodeFixedType(Infos&: TableRef, OverloadTys, Context));
607
608 // VarArg intrinsics encode a void type as the last argument type. Detect that
609 // and then drop the void argument.
610 bool IsVarArg = false;
611 if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) {
612 ArgTys.pop_back();
613 IsVarArg = true;
614 }
615 return FunctionType::get(Result: ResultTy, Params: ArgTys, isVarArg: IsVarArg);
616}
617
618bool Intrinsic::isOverloaded(ID id) {
619#define GET_INTRINSIC_OVERLOAD_TABLE
620#include "llvm/IR/IntrinsicImpl.inc"
621}
622
623bool Intrinsic::hasPrettyPrintedArgs(ID id){
624#define GET_INTRINSIC_PRETTY_PRINT_TABLE
625#include "llvm/IR/IntrinsicImpl.inc"
626}
627
628/// Table of per-target intrinsic name tables.
629#define GET_INTRINSIC_TARGET_DATA
630#include "llvm/IR/IntrinsicImpl.inc"
631
632bool Intrinsic::isTargetIntrinsic(Intrinsic::ID IID) {
633 return IID > TargetInfos[0].Count;
634}
635
636/// Looks up Name in NameTable via binary search. NameTable must be sorted
637/// and all entries must start with "llvm.". If NameTable contains an exact
638/// match for Name or a prefix of Name followed by a dot, its index in
639/// NameTable is returned. Otherwise, -1 is returned.
640static int lookupLLVMIntrinsicByName(ArrayRef<unsigned> NameOffsetTable,
641 StringRef Name, StringRef Target = "") {
642 assert(Name.starts_with("llvm.") && "Unexpected intrinsic prefix");
643 assert(Name.drop_front(5).starts_with(Target) && "Unexpected target");
644
645 // Do successive binary searches of the dotted name components. For
646 // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
647 // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
648 // "llvm.gc.experimental.statepoint", and then we will stop as the range is
649 // size 1. During the search, we can skip the prefix that we already know is
650 // identical. By using strncmp we consider names with differing suffixes to
651 // be part of the equal range.
652 size_t CmpEnd = 4; // Skip the "llvm" component.
653 if (!Target.empty())
654 CmpEnd += 1 + Target.size(); // skip the .target component.
655
656 const unsigned *Low = NameOffsetTable.begin();
657 const unsigned *High = NameOffsetTable.end();
658 const unsigned *LastLow = Low;
659 while (CmpEnd < Name.size() && High - Low > 0) {
660 size_t CmpStart = CmpEnd;
661 CmpEnd = Name.find(C: '.', From: CmpStart + 1);
662 CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
663 auto Cmp = [CmpStart, CmpEnd](auto LHS, auto RHS) {
664 // `equal_range` requires the comparison to work with either side being an
665 // offset or the value. Detect which kind each side is to set up the
666 // compared strings.
667 const char *LHSStr;
668 if constexpr (std::is_integral_v<decltype(LHS)>)
669 LHSStr = IntrinsicNameTable.getCString(O: LHS);
670 else
671 LHSStr = LHS;
672
673 const char *RHSStr;
674 if constexpr (std::is_integral_v<decltype(RHS)>)
675 RHSStr = IntrinsicNameTable.getCString(O: RHS);
676 else
677 RHSStr = RHS;
678
679 return strncmp(s1: LHSStr + CmpStart, s2: RHSStr + CmpStart, n: CmpEnd - CmpStart) <
680 0;
681 };
682 LastLow = Low;
683 std::tie(args&: Low, args&: High) = std::equal_range(first: Low, last: High, val: Name.data(), comp: Cmp);
684 }
685 if (High - Low > 0)
686 LastLow = Low;
687
688 if (LastLow == NameOffsetTable.end())
689 return -1;
690 StringRef NameFound = IntrinsicNameTable[*LastLow];
691 if (Name == NameFound ||
692 (Name.starts_with(Prefix: NameFound) && Name[NameFound.size()] == '.'))
693 return LastLow - NameOffsetTable.begin();
694 return -1;
695}
696
697/// Find the segment of \c IntrinsicNameOffsetTable for intrinsics with the same
698/// target as \c Name, or the generic table if \c Name is not target specific.
699///
700/// Returns the relevant slice of \c IntrinsicNameOffsetTable and the target
701/// name.
702static std::pair<ArrayRef<unsigned>, StringRef>
703findTargetSubtable(StringRef Name) {
704 assert(Name.starts_with("llvm."));
705
706 ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos);
707 // Drop "llvm." and take the first dotted component. That will be the target
708 // if this is target specific.
709 StringRef Target = Name.drop_front(N: 5).split(Separator: '.').first;
710 auto It = partition_point(
711 Range&: Targets, P: [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; });
712 // We've either found the target or just fall back to the generic set, which
713 // is always first.
714 const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0];
715 return {ArrayRef(&IntrinsicNameOffsetTable[1] + TI.Offset, TI.Count),
716 TI.Name};
717}
718
719/// This does the actual lookup of an intrinsic ID which matches the given
720/// function name.
721Intrinsic::ID Intrinsic::lookupIntrinsicID(StringRef Name) {
722 auto [NameOffsetTable, Target] = findTargetSubtable(Name);
723 int Idx = lookupLLVMIntrinsicByName(NameOffsetTable, Name, Target);
724 if (Idx == -1)
725 return Intrinsic::not_intrinsic;
726
727 // Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have
728 // an index into a sub-table.
729 int Adjust = NameOffsetTable.data() - IntrinsicNameOffsetTable;
730 Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust);
731
732 // If the intrinsic is not overloaded, require an exact match. If it is
733 // overloaded, require either exact or prefix match.
734 const auto MatchSize = IntrinsicNameTable[NameOffsetTable[Idx]].size();
735 assert(Name.size() >= MatchSize && "Expected either exact or prefix match");
736 bool IsExactMatch = Name.size() == MatchSize;
737 return IsExactMatch || Intrinsic::isOverloaded(id: ID) ? ID
738 : Intrinsic::not_intrinsic;
739}
740
741/// This defines the "Intrinsic::getAttributes(ID id)" method.
742#define GET_INTRINSIC_ATTRIBUTES
743#include "llvm/IR/IntrinsicImpl.inc"
744
745static Function *
746getOrInsertIntrinsicDeclarationImpl(Module *M, Intrinsic::ID id,
747 ArrayRef<Type *> OverloadTys,
748 FunctionType *FT) {
749 std::string Name = OverloadTys.empty()
750 ? Intrinsic::getName(id).str()
751 : Intrinsic::getName(Id: id, OverloadTys, M, FT);
752 Function *F = cast<Function>(Val: M->getOrInsertFunction(Name, T: FT).getCallee());
753 if (F->getFunctionType() == FT)
754 return F;
755
756 // It's possible that a declaration for this intrinsic already exists with an
757 // incorrect signature, if the signature has changed, but this particular
758 // declaration has not been auto-upgraded yet. In that case, rename the
759 // invalid declaration and insert a new one with the correct signature. The
760 // invalid declaration will get upgraded later.
761 F->setName(F->getName() + ".invalid");
762 return cast<Function>(Val: M->getOrInsertFunction(Name, T: FT).getCallee());
763}
764
765Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id,
766 ArrayRef<Type *> OverloadTys) {
767 // There can never be multiple globals with the same name of different types,
768 // because intrinsics must be a specific type.
769 FunctionType *FT = getType(Context&: M->getContext(), id, OverloadTys);
770 return getOrInsertIntrinsicDeclarationImpl(M, id, OverloadTys, FT);
771}
772
773Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id, Type *RetTy,
774 ArrayRef<Type *> ArgTys) {
775 // If the intrinsic is not overloaded, use the non-overloaded version.
776 if (!Intrinsic::isOverloaded(id))
777 return getOrInsertDeclaration(M, id);
778
779 // Get the intrinsic signature metadata.
780 SmallVector<Intrinsic::IITDescriptor, 8> Table;
781 getIntrinsicInfoTableEntries(id, T&: Table);
782 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
783
784 FunctionType *FTy = FunctionType::get(Result: RetTy, Params: ArgTys, /*isVarArg=*/false);
785
786 // Automatically determine the overloaded types.
787 SmallVector<Type *, 4> OverloadTys;
788 [[maybe_unused]] Intrinsic::MatchIntrinsicTypesResult Res =
789 matchIntrinsicSignature(FTy, Infos&: TableRef, OverloadTys);
790 assert(Res == Intrinsic::MatchIntrinsicTypes_Match &&
791 "intrinsic signature mismatch");
792
793 // If intrinsic requires vararg, recreate the FunctionType accordingly.
794 if (!matchIntrinsicVarArg(/*isVarArg=*/true, Infos&: TableRef))
795 FTy = FunctionType::get(Result: RetTy, Params: ArgTys, /*isVarArg=*/true);
796
797 assert(TableRef.empty() && "Unprocessed descriptors remain");
798
799 return getOrInsertIntrinsicDeclarationImpl(M, id, OverloadTys, FT: FTy);
800}
801
802Function *Intrinsic::getDeclarationIfExists(const Module *M, ID id) {
803 return M->getFunction(Name: getName(id));
804}
805
806Function *Intrinsic::getDeclarationIfExists(Module *M, ID id,
807 ArrayRef<Type *> OverloadTys,
808 FunctionType *FT) {
809 return M->getFunction(Name: getName(Id: id, OverloadTys, M, FT));
810}
811
812// This defines the "Intrinsic::getIntrinsicForClangBuiltin()" method.
813#define GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN
814#include "llvm/IR/IntrinsicImpl.inc"
815
816// This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
817#define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
818#include "llvm/IR/IntrinsicImpl.inc"
819
820bool Intrinsic::isConstrainedFPIntrinsic(ID QID) {
821 switch (QID) {
822#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
823 case Intrinsic::INTRINSIC:
824#include "llvm/IR/ConstrainedOps.def"
825#undef INSTRUCTION
826 return true;
827 default:
828 return false;
829 }
830}
831
832bool Intrinsic::hasConstrainedFPRoundingModeOperand(Intrinsic::ID QID) {
833 switch (QID) {
834#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
835 case Intrinsic::INTRINSIC: \
836 return ROUND_MODE == 1;
837#include "llvm/IR/ConstrainedOps.def"
838#undef INSTRUCTION
839 default:
840 return false;
841 }
842}
843
844using DeferredIntrinsicMatchPair =
845 std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>;
846
847static bool
848matchIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
849 SmallVectorImpl<Type *> &OverloadTys,
850 SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks,
851 bool IsDeferredCheck) {
852 using namespace Intrinsic;
853
854 // If we ran out of descriptors, there are too many arguments.
855 if (Infos.empty())
856 return true;
857
858 // Do this before slicing off the 'front' part
859 auto InfosRef = Infos;
860 auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) {
861 DeferredChecks.emplace_back(Args&: T, Args&: InfosRef);
862 return false;
863 };
864
865 IITDescriptor D = Infos.front();
866 Infos = Infos.slice(N: 1);
867
868 switch (D.Kind) {
869 case IITDescriptor::Void:
870 return !Ty->isVoidTy();
871 case IITDescriptor::VarArg:
872 return true;
873 case IITDescriptor::MMX: {
874 FixedVectorType *VT = dyn_cast<FixedVectorType>(Val: Ty);
875 return !VT || VT->getNumElements() != 1 ||
876 !VT->getElementType()->isIntegerTy(Bitwidth: 64);
877 }
878 case IITDescriptor::AMX:
879 return !Ty->isX86_AMXTy();
880 case IITDescriptor::Token:
881 return !Ty->isTokenTy();
882 case IITDescriptor::Metadata:
883 return !Ty->isMetadataTy();
884 case IITDescriptor::Half:
885 return !Ty->isHalfTy();
886 case IITDescriptor::BFloat:
887 return !Ty->isBFloatTy();
888 case IITDescriptor::Float:
889 return !Ty->isFloatTy();
890 case IITDescriptor::Double:
891 return !Ty->isDoubleTy();
892 case IITDescriptor::Quad:
893 return !Ty->isFP128Ty();
894 case IITDescriptor::PPCQuad:
895 return !Ty->isPPC_FP128Ty();
896 case IITDescriptor::Integer:
897 return !Ty->isIntegerTy(Bitwidth: D.IntegerWidth);
898 case IITDescriptor::AArch64Svcount:
899 return !isa<TargetExtType>(Val: Ty) ||
900 cast<TargetExtType>(Val: Ty)->getName() != "aarch64.svcount";
901 case IITDescriptor::Vector: {
902 VectorType *VT = dyn_cast<VectorType>(Val: Ty);
903 return !VT || VT->getElementCount() != D.VectorWidth ||
904 matchIntrinsicType(Ty: VT->getElementType(), Infos, OverloadTys,
905 DeferredChecks, IsDeferredCheck);
906 }
907 case IITDescriptor::Pointer: {
908 PointerType *PT = dyn_cast<PointerType>(Val: Ty);
909 return !PT || PT->getAddressSpace() != D.PointerAddressSpace;
910 }
911
912 case IITDescriptor::Struct: {
913 StructType *ST = dyn_cast<StructType>(Val: Ty);
914 if (!ST || !ST->isLiteral() || ST->isPacked() ||
915 ST->getNumElements() != D.StructNumElements)
916 return true;
917
918 for (unsigned i = 0, e = D.StructNumElements; i != e; ++i)
919 if (matchIntrinsicType(Ty: ST->getElementType(N: i), Infos, OverloadTys,
920 DeferredChecks, IsDeferredCheck))
921 return true;
922 return false;
923 }
924
925 case IITDescriptor::Overloaded:
926 // If this is the second occurrence of an argument,
927 // verify that the later instance matches the previous instance.
928 if (D.getOverloadIndex() < OverloadTys.size())
929 return Ty != OverloadTys[D.getOverloadIndex()];
930
931 if (D.getOverloadIndex() > OverloadTys.size() ||
932 D.getOverloadKind() == IITDescriptor::AK_MatchType)
933 return IsDeferredCheck || DeferCheck(Ty);
934
935 assert(D.getOverloadIndex() == OverloadTys.size() && !IsDeferredCheck &&
936 "Table consistency error");
937 OverloadTys.push_back(Elt: Ty);
938
939 switch (D.getOverloadKind()) {
940 case IITDescriptor::AK_Any:
941 return false; // Success
942 case IITDescriptor::AK_AnyInteger:
943 return !Ty->isIntOrIntVectorTy();
944 case IITDescriptor::AK_AnyFloat:
945 return !Ty->isFPOrFPVectorTy();
946 case IITDescriptor::AK_AnyVector:
947 return !isa<VectorType>(Val: Ty);
948 case IITDescriptor::AK_AnyPointer:
949 return !isa<PointerType>(Val: Ty);
950 default:
951 break;
952 }
953 llvm_unreachable("all argument kinds not covered");
954
955 case IITDescriptor::Extend: {
956 // If this is a forward reference, defer the check for later.
957 if (D.getOverloadIndex() >= OverloadTys.size())
958 return IsDeferredCheck || DeferCheck(Ty);
959
960 Type *NewTy = OverloadTys[D.getOverloadIndex()];
961 if (VectorType *VTy = dyn_cast<VectorType>(Val: NewTy))
962 NewTy = VectorType::getExtendedElementVectorType(VTy);
963 else if (IntegerType *ITy = dyn_cast<IntegerType>(Val: NewTy))
964 NewTy = IntegerType::get(C&: ITy->getContext(), NumBits: 2 * ITy->getBitWidth());
965 else
966 return true;
967
968 return Ty != NewTy;
969 }
970 case IITDescriptor::Trunc: {
971 // If this is a forward reference, defer the check for later.
972 if (D.getOverloadIndex() >= OverloadTys.size())
973 return IsDeferredCheck || DeferCheck(Ty);
974
975 Type *NewTy = OverloadTys[D.getOverloadIndex()];
976 if (VectorType *VTy = dyn_cast<VectorType>(Val: NewTy))
977 NewTy = VectorType::getTruncatedElementVectorType(VTy);
978 else if (IntegerType *ITy = dyn_cast<IntegerType>(Val: NewTy))
979 NewTy = IntegerType::get(C&: ITy->getContext(), NumBits: ITy->getBitWidth() / 2);
980 else
981 return true;
982
983 return Ty != NewTy;
984 }
985 case IITDescriptor::OneNthEltsVec: {
986 // If this is a forward reference, defer the check for later.
987 if (D.getOverloadIndex() >= OverloadTys.size())
988 return IsDeferredCheck || DeferCheck(Ty);
989 auto *VTy = dyn_cast<VectorType>(Val: OverloadTys[D.getOverloadIndex()]);
990 if (!VTy)
991 return true;
992 if (!VTy->getElementCount().isKnownMultipleOf(RHS: D.getVectorDivisor()))
993 return true;
994 return VectorType::getOneNthElementsVectorType(VTy, Denominator: D.getVectorDivisor()) !=
995 Ty;
996 }
997 case IITDescriptor::SameVecWidth: {
998 if (D.getOverloadIndex() >= OverloadTys.size()) {
999 // Defer check and subsequent check for the vector element type.
1000 Infos = Infos.slice(N: 1);
1001 return IsDeferredCheck || DeferCheck(Ty);
1002 }
1003 auto *ReferenceType =
1004 dyn_cast<VectorType>(Val: OverloadTys[D.getOverloadIndex()]);
1005 auto *ThisArgType = dyn_cast<VectorType>(Val: Ty);
1006 // Both must be vectors of the same number of elements or neither.
1007 if ((ReferenceType != nullptr) != (ThisArgType != nullptr))
1008 return true;
1009 Type *EltTy = Ty;
1010 if (ThisArgType) {
1011 if (ReferenceType->getElementCount() != ThisArgType->getElementCount())
1012 return true;
1013 EltTy = ThisArgType->getElementType();
1014 }
1015 return matchIntrinsicType(Ty: EltTy, Infos, OverloadTys, DeferredChecks,
1016 IsDeferredCheck);
1017 }
1018 case IITDescriptor::VecOfAnyPtrsToElt: {
1019 unsigned RefOverloadIndex = D.getRefOverloadIndex();
1020 if (RefOverloadIndex >= OverloadTys.size()) {
1021 if (IsDeferredCheck)
1022 return true;
1023 // If forward referencing, already add the pointer-vector type and
1024 // defer the checks for later.
1025 OverloadTys.push_back(Elt: Ty);
1026 return DeferCheck(Ty);
1027 }
1028
1029 if (!IsDeferredCheck) {
1030 assert(D.getOverloadIndex() == OverloadTys.size() &&
1031 "Table consistency error");
1032 OverloadTys.push_back(Elt: Ty);
1033 }
1034
1035 // Verify the overloaded type "matches" the Ref type.
1036 // i.e. Ty is a vector with the same width as Ref.
1037 // Composed of pointers to the same element type as Ref.
1038 auto *ReferenceType = dyn_cast<VectorType>(Val: OverloadTys[RefOverloadIndex]);
1039 auto *ThisArgVecTy = dyn_cast<VectorType>(Val: Ty);
1040 if (!ThisArgVecTy || !ReferenceType ||
1041 (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount()))
1042 return true;
1043 return !ThisArgVecTy->getElementType()->isPointerTy();
1044 }
1045 case IITDescriptor::VecElement: {
1046 if (D.getOverloadIndex() >= OverloadTys.size())
1047 return IsDeferredCheck ? true : DeferCheck(Ty);
1048 auto *ReferenceType =
1049 dyn_cast<VectorType>(Val: OverloadTys[D.getOverloadIndex()]);
1050 return !ReferenceType || Ty != ReferenceType->getElementType();
1051 }
1052 case IITDescriptor::Subdivide2:
1053 case IITDescriptor::Subdivide4: {
1054 // If this is a forward reference, defer the check for later.
1055 if (D.getOverloadIndex() >= OverloadTys.size())
1056 return IsDeferredCheck || DeferCheck(Ty);
1057
1058 Type *NewTy = OverloadTys[D.getOverloadIndex()];
1059 if (auto *VTy = dyn_cast<VectorType>(Val: NewTy)) {
1060 int SubDivs = D.Kind == IITDescriptor::Subdivide2 ? 1 : 2;
1061 NewTy = VectorType::getSubdividedVectorType(VTy, NumSubdivs: SubDivs);
1062 return Ty != NewTy;
1063 }
1064 return true;
1065 }
1066 case IITDescriptor::VecOfBitcastsToInt: {
1067 if (D.getOverloadIndex() >= OverloadTys.size())
1068 return IsDeferredCheck || DeferCheck(Ty);
1069 auto *ReferenceType =
1070 dyn_cast<VectorType>(Val: OverloadTys[D.getOverloadIndex()]);
1071 auto *ThisArgVecTy = dyn_cast<VectorType>(Val: Ty);
1072 if (!ThisArgVecTy || !ReferenceType)
1073 return true;
1074 return ThisArgVecTy != VectorType::getInteger(VTy: ReferenceType);
1075 }
1076 }
1077 llvm_unreachable("unhandled");
1078}
1079
1080Intrinsic::MatchIntrinsicTypesResult
1081Intrinsic::matchIntrinsicSignature(FunctionType *FTy,
1082 ArrayRef<Intrinsic::IITDescriptor> &Infos,
1083 SmallVectorImpl<Type *> &OverloadTys) {
1084 SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks;
1085 if (matchIntrinsicType(Ty: FTy->getReturnType(), Infos, OverloadTys,
1086 DeferredChecks, IsDeferredCheck: false))
1087 return MatchIntrinsicTypes_NoMatchRet;
1088
1089 unsigned NumDeferredReturnChecks = DeferredChecks.size();
1090
1091 for (auto *Ty : FTy->params())
1092 if (matchIntrinsicType(Ty, Infos, OverloadTys, DeferredChecks, IsDeferredCheck: false))
1093 return MatchIntrinsicTypes_NoMatchArg;
1094
1095 for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) {
1096 DeferredIntrinsicMatchPair &Check = DeferredChecks[I];
1097 if (matchIntrinsicType(Ty: Check.first, Infos&: Check.second, OverloadTys,
1098 DeferredChecks, IsDeferredCheck: true))
1099 return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet
1100 : MatchIntrinsicTypes_NoMatchArg;
1101 }
1102
1103 return MatchIntrinsicTypes_Match;
1104}
1105
1106bool Intrinsic::matchIntrinsicVarArg(
1107 bool isVarArg, ArrayRef<Intrinsic::IITDescriptor> &Infos) {
1108 // If there are no descriptors left, then it can't be a vararg.
1109 if (Infos.empty())
1110 return isVarArg;
1111
1112 // There should be only one descriptor remaining at this point.
1113 if (Infos.size() != 1)
1114 return true;
1115
1116 // Check and verify the descriptor.
1117 IITDescriptor D = Infos.front();
1118 Infos = Infos.slice(N: 1);
1119 if (D.Kind == IITDescriptor::VarArg)
1120 return !isVarArg;
1121
1122 return true;
1123}
1124
1125bool Intrinsic::getIntrinsicSignature(Intrinsic::ID ID, FunctionType *FT,
1126 SmallVectorImpl<Type *> &OverloadTys) {
1127 if (!ID)
1128 return false;
1129
1130 SmallVector<Intrinsic::IITDescriptor, 8> Table;
1131 getIntrinsicInfoTableEntries(id: ID, T&: Table);
1132 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
1133
1134 if (Intrinsic::matchIntrinsicSignature(FTy: FT, Infos&: TableRef, OverloadTys) !=
1135 Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) {
1136 return false;
1137 }
1138 if (Intrinsic::matchIntrinsicVarArg(isVarArg: FT->isVarArg(), Infos&: TableRef))
1139 return false;
1140 return true;
1141}
1142
1143bool Intrinsic::getIntrinsicSignature(Function *F,
1144 SmallVectorImpl<Type *> &OverloadTys) {
1145 return getIntrinsicSignature(ID: F->getIntrinsicID(), FT: F->getFunctionType(),
1146 OverloadTys);
1147}
1148
1149std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
1150 SmallVector<Type *, 4> OverloadTys;
1151 if (!getIntrinsicSignature(F, OverloadTys))
1152 return std::nullopt;
1153
1154 Intrinsic::ID ID = F->getIntrinsicID();
1155 StringRef Name = F->getName();
1156 std::string WantedName =
1157 Intrinsic::getName(Id: ID, OverloadTys, M: F->getParent(), FT: F->getFunctionType());
1158 if (Name == WantedName)
1159 return std::nullopt;
1160
1161 Function *NewDecl = [&] {
1162 if (auto *ExistingGV = F->getParent()->getNamedValue(Name: WantedName)) {
1163 if (auto *ExistingF = dyn_cast<Function>(Val: ExistingGV))
1164 if (ExistingF->getFunctionType() == F->getFunctionType())
1165 return ExistingF;
1166
1167 // The name already exists, but is not a function or has the wrong
1168 // prototype. Make place for the new one by renaming the old version.
1169 // Either this old version will be removed later on or the module is
1170 // invalid and we'll get an error.
1171 ExistingGV->setName(WantedName + ".renamed");
1172 }
1173 return Intrinsic::getOrInsertDeclaration(M: F->getParent(), id: ID, OverloadTys);
1174 }();
1175
1176 NewDecl->setCallingConv(F->getCallingConv());
1177 assert(NewDecl->getFunctionType() == F->getFunctionType() &&
1178 "Shouldn't change the signature");
1179 return NewDecl;
1180}
1181
1182struct InterleaveIntrinsic {
1183 Intrinsic::ID Interleave, Deinterleave;
1184};
1185
1186static InterleaveIntrinsic InterleaveIntrinsics[] = {
1187 {.Interleave: Intrinsic::vector_interleave2, .Deinterleave: Intrinsic::vector_deinterleave2},
1188 {.Interleave: Intrinsic::vector_interleave3, .Deinterleave: Intrinsic::vector_deinterleave3},
1189 {.Interleave: Intrinsic::vector_interleave4, .Deinterleave: Intrinsic::vector_deinterleave4},
1190 {.Interleave: Intrinsic::vector_interleave5, .Deinterleave: Intrinsic::vector_deinterleave5},
1191 {.Interleave: Intrinsic::vector_interleave6, .Deinterleave: Intrinsic::vector_deinterleave6},
1192 {.Interleave: Intrinsic::vector_interleave7, .Deinterleave: Intrinsic::vector_deinterleave7},
1193 {.Interleave: Intrinsic::vector_interleave8, .Deinterleave: Intrinsic::vector_deinterleave8},
1194};
1195
1196Intrinsic::ID Intrinsic::getInterleaveIntrinsicID(unsigned Factor) {
1197 assert(Factor >= 2 && Factor <= 8 && "Unexpected factor");
1198 return InterleaveIntrinsics[Factor - 2].Interleave;
1199}
1200
1201Intrinsic::ID Intrinsic::getDeinterleaveIntrinsicID(unsigned Factor) {
1202 assert(Factor >= 2 && Factor <= 8 && "Unexpected factor");
1203 return InterleaveIntrinsics[Factor - 2].Deinterleave;
1204}
1205
1206#define GET_INTRINSIC_PRETTY_PRINT_ARGUMENTS
1207#include "llvm/IR/IntrinsicImpl.inc"
1208