1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/Analysis.h"
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetSubtargetInfo.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DerivedTypes.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Target/TargetMachine.h"
27
28using namespace llvm;
29
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
32/// index list.
33unsigned llvm::ComputeLinearIndex(Type *Ty,
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
36 unsigned CurIndex) {
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
39 return CurIndex;
40
41 // Given a struct type, recursively traverse the elements.
42 if (StructType *STy = dyn_cast<StructType>(Val: Ty)) {
43 for (auto I : llvm::enumerate(First: STy->elements())) {
44 Type *ET = I.value();
45 if (Indices && *Indices == I.index())
46 return ComputeLinearIndex(Ty: ET, Indices: Indices + 1, IndicesEnd, CurIndex);
47 CurIndex = ComputeLinearIndex(Ty: ET, Indices: nullptr, IndicesEnd: nullptr, CurIndex);
48 }
49 assert(!Indices && "Unexpected out of bound");
50 return CurIndex;
51 }
52 // Given an array type, recursively traverse the elements.
53 else if (ArrayType *ATy = dyn_cast<ArrayType>(Val: Ty)) {
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
57 unsigned EltLinearOffset = ComputeLinearIndex(Ty: EltTy, Indices: nullptr, IndicesEnd: nullptr, CurIndex: 0);
58 if (Indices) {
59 assert(*Indices < NumElts && "Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
63 return ComputeLinearIndex(Ty: EltTy, Indices: Indices+1, IndicesEnd, CurIndex);
64 }
65 CurIndex += EltLinearOffset*NumElts;
66 return CurIndex;
67 }
68 // We haven't found the type we're looking for, so keep searching.
69 return CurIndex + 1;
70}
71
72void llvm::ComputeValueTypes(const DataLayout &DL, Type *Ty,
73 SmallVectorImpl<Type *> &Types,
74 SmallVectorImpl<TypeSize> *Offsets,
75 TypeSize StartingOffset) {
76 assert((Ty->isScalableTy() == StartingOffset.isScalable() ||
77 StartingOffset.isZero()) &&
78 "Offset/TypeSize mismatch!");
79 // Given a struct type, recursively traverse the elements.
80 if (StructType *STy = dyn_cast<StructType>(Val: Ty)) {
81 // If the Offsets aren't needed, don't query the struct layout. This allows
82 // us to support structs with scalable vectors for operations that don't
83 // need offsets.
84 const StructLayout *SL = Offsets ? DL.getStructLayout(Ty: STy) : nullptr;
85 for (StructType::element_iterator EB = STy->element_begin(), EI = EB,
86 EE = STy->element_end();
87 EI != EE; ++EI) {
88 // Don't compute the element offset if we didn't get a StructLayout above.
89 TypeSize EltOffset =
90 SL ? SL->getElementOffset(Idx: EI - EB) : TypeSize::getZero();
91 ComputeValueTypes(DL, Ty: *EI, Types, Offsets, StartingOffset: StartingOffset + EltOffset);
92 }
93 return;
94 }
95 // Given an array type, recursively traverse the elements.
96 if (ArrayType *ATy = dyn_cast<ArrayType>(Val: Ty)) {
97 Type *EltTy = ATy->getElementType();
98 TypeSize EltSize = DL.getTypeAllocSize(Ty: EltTy);
99 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
100 ComputeValueTypes(DL, Ty: EltTy, Types, Offsets,
101 StartingOffset: StartingOffset + i * EltSize);
102 return;
103 }
104 // Interpret void as zero return values.
105 if (Ty->isVoidTy())
106 return;
107 Types.push_back(Elt: Ty);
108 if (Offsets)
109 Offsets->push_back(Elt: StartingOffset);
110}
111
112/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
113/// EVTs that represent all the individual underlying
114/// non-aggregate types that comprise it.
115///
116/// If Offsets is non-null, it points to a vector to be filled in
117/// with the in-memory offsets of each of the individual values.
118///
119void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
120 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
121 SmallVectorImpl<EVT> *MemVTs,
122 SmallVectorImpl<TypeSize> *Offsets,
123 TypeSize StartingOffset) {
124 SmallVector<Type *> Types;
125 ComputeValueTypes(DL, Ty, Types, Offsets, StartingOffset);
126 ValueVTs.reserve(N: Types.size());
127 if (MemVTs)
128 MemVTs->reserve(N: Types.size());
129 for (Type *Ty : Types) {
130 ValueVTs.push_back(Elt: TLI.getValueType(DL, Ty));
131 if (MemVTs)
132 MemVTs->push_back(Elt: TLI.getMemValueType(DL, Ty));
133 }
134}
135
136void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
137 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
138 SmallVectorImpl<EVT> *MemVTs,
139 SmallVectorImpl<uint64_t> *FixedOffsets,
140 uint64_t StartingOffset) {
141 TypeSize Offset = TypeSize::getFixed(ExactSize: StartingOffset);
142 if (FixedOffsets) {
143 SmallVector<TypeSize, 4> Offsets;
144 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets: &Offsets, StartingOffset: Offset);
145 FixedOffsets->reserve(N: Offsets.size());
146 for (TypeSize Offset : Offsets)
147 FixedOffsets->push_back(Elt: Offset.getFixedValue());
148 } else {
149 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets: nullptr, StartingOffset: Offset);
150 }
151}
152
153void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
154 SmallVectorImpl<LLT> &ValueLLTs,
155 SmallVectorImpl<TypeSize> *Offsets,
156 TypeSize StartingOffset) {
157 SmallVector<Type *> ValTys;
158 ComputeValueTypes(DL, Ty: &Ty, Types&: ValTys, Offsets, StartingOffset);
159 ValueLLTs.reserve(N: ValTys.size());
160 for (Type *ValTy : ValTys)
161 ValueLLTs.push_back(Elt: getLLTForType(Ty&: *ValTy, DL));
162}
163
164void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
165 SmallVectorImpl<LLT> &ValueLLTs,
166 SmallVectorImpl<uint64_t> *FixedOffsets,
167 uint64_t FixedStartingOffset) {
168 TypeSize StartingOffset = TypeSize::getFixed(ExactSize: FixedStartingOffset);
169 if (FixedOffsets) {
170 SmallVector<TypeSize, 4> Offsets;
171 computeValueLLTs(DL, Ty, ValueLLTs, Offsets: &Offsets, StartingOffset);
172 FixedOffsets->reserve(N: Offsets.size());
173 for (TypeSize Offset : Offsets)
174 FixedOffsets->push_back(Elt: Offset.getFixedValue());
175 } else {
176 computeValueLLTs(DL, Ty, ValueLLTs, Offsets: nullptr, StartingOffset);
177 }
178}
179
180/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
181GlobalValue *llvm::ExtractTypeInfo(Value *V) {
182 V = V->stripPointerCasts();
183 GlobalValue *GV = dyn_cast<GlobalValue>(Val: V);
184 GlobalVariable *Var = dyn_cast<GlobalVariable>(Val: V);
185
186 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
187 assert(Var->hasInitializer() &&
188 "The EH catch-all value must have an initializer");
189 Value *Init = Var->getInitializer();
190 GV = dyn_cast<GlobalValue>(Val: Init);
191 if (!GV) V = cast<ConstantPointerNull>(Val: Init);
192 }
193
194 assert((GV || isa<ConstantPointerNull>(V)) &&
195 "TypeInfo must be a global variable or NULL");
196 return GV;
197}
198
199/// getFCmpCondCode - Return the ISD condition code corresponding to
200/// the given LLVM IR floating-point condition code. This includes
201/// consideration of global floating-point math flags.
202///
203ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
204 switch (Pred) {
205 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
206 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
207 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
208 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
209 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
210 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
211 case FCmpInst::FCMP_ONE: return ISD::SETONE;
212 case FCmpInst::FCMP_ORD: return ISD::SETO;
213 case FCmpInst::FCMP_UNO: return ISD::SETUO;
214 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
215 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
216 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
217 case FCmpInst::FCMP_ULT: return ISD::SETULT;
218 case FCmpInst::FCMP_ULE: return ISD::SETULE;
219 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
220 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
221 default: llvm_unreachable("Invalid FCmp predicate opcode!");
222 }
223}
224
225ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
226 switch (CC) {
227 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
228 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
229 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
230 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
231 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
232 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
233 default: return CC;
234 }
235}
236
237ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
238 switch (Pred) {
239 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
240 case ICmpInst::ICMP_NE: return ISD::SETNE;
241 case ICmpInst::ICMP_SLE: return ISD::SETLE;
242 case ICmpInst::ICMP_ULE: return ISD::SETULE;
243 case ICmpInst::ICMP_SGE: return ISD::SETGE;
244 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
245 case ICmpInst::ICMP_SLT: return ISD::SETLT;
246 case ICmpInst::ICMP_ULT: return ISD::SETULT;
247 case ICmpInst::ICMP_SGT: return ISD::SETGT;
248 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
249 default:
250 llvm_unreachable("Invalid ICmp predicate opcode!");
251 }
252}
253
254ICmpInst::Predicate llvm::getICmpCondCode(ISD::CondCode Pred) {
255 switch (Pred) {
256 case ISD::SETEQ:
257 return ICmpInst::ICMP_EQ;
258 case ISD::SETNE:
259 return ICmpInst::ICMP_NE;
260 case ISD::SETLE:
261 return ICmpInst::ICMP_SLE;
262 case ISD::SETULE:
263 return ICmpInst::ICMP_ULE;
264 case ISD::SETGE:
265 return ICmpInst::ICMP_SGE;
266 case ISD::SETUGE:
267 return ICmpInst::ICMP_UGE;
268 case ISD::SETLT:
269 return ICmpInst::ICMP_SLT;
270 case ISD::SETULT:
271 return ICmpInst::ICMP_ULT;
272 case ISD::SETGT:
273 return ICmpInst::ICMP_SGT;
274 case ISD::SETUGT:
275 return ICmpInst::ICMP_UGT;
276 default:
277 llvm_unreachable("Invalid ISD integer condition code!");
278 }
279}
280
281static bool isNoopBitcast(Type *T1, Type *T2,
282 const TargetLoweringBase& TLI) {
283 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
284 (isa<VectorType>(Val: T1) && isa<VectorType>(Val: T2) &&
285 TLI.isTypeLegal(VT: EVT::getEVT(Ty: T1)) && TLI.isTypeLegal(VT: EVT::getEVT(Ty: T2)));
286}
287
288/// Look through operations that will be free to find the earliest source of
289/// this value.
290///
291/// @param ValLoc If V has aggregate type, we will be interested in a particular
292/// scalar component. This records its address; the reverse of this list gives a
293/// sequence of indices appropriate for an extractvalue to locate the important
294/// value. This value is updated during the function and on exit will indicate
295/// similar information for the Value returned.
296///
297/// @param DataBits If this function looks through truncate instructions, this
298/// will record the smallest size attained.
299static const Value *getNoopInput(const Value *V,
300 SmallVectorImpl<unsigned> &ValLoc,
301 unsigned &DataBits,
302 const TargetLoweringBase &TLI,
303 const DataLayout &DL) {
304 while (true) {
305 // Try to look through V1; if V1 is not an instruction, it can't be looked
306 // through.
307 const Instruction *I = dyn_cast<Instruction>(Val: V);
308 if (!I || I->getNumOperands() == 0) return V;
309 const Value *NoopInput = nullptr;
310
311 Value *Op = I->getOperand(i: 0);
312 if (isa<BitCastInst>(Val: I)) {
313 // Look through truly no-op bitcasts.
314 if (isNoopBitcast(T1: Op->getType(), T2: I->getType(), TLI))
315 NoopInput = Op;
316 } else if (isa<GetElementPtrInst>(Val: I)) {
317 // Look through getelementptr
318 if (cast<GetElementPtrInst>(Val: I)->hasAllZeroIndices())
319 NoopInput = Op;
320 } else if (isa<IntToPtrInst>(Val: I)) {
321 // Look through inttoptr.
322 // Make sure this isn't a truncating or extending cast. We could
323 // support this eventually, but don't bother for now.
324 if (!isa<VectorType>(Val: I->getType()) &&
325 DL.getPointerSizeInBits() ==
326 cast<IntegerType>(Val: Op->getType())->getBitWidth())
327 NoopInput = Op;
328 } else if (isa<PtrToIntInst>(Val: I)) {
329 // Look through ptrtoint.
330 // Make sure this isn't a truncating or extending cast. We could
331 // support this eventually, but don't bother for now.
332 if (!isa<VectorType>(Val: I->getType()) &&
333 DL.getPointerSizeInBits() ==
334 cast<IntegerType>(Val: I->getType())->getBitWidth())
335 NoopInput = Op;
336 } else if (isa<TruncInst>(Val: I) &&
337 TLI.allowTruncateForTailCall(FromTy: Op->getType(), ToTy: I->getType())) {
338 DataBits =
339 std::min(a: (uint64_t)DataBits,
340 b: I->getType()->getPrimitiveSizeInBits().getFixedValue());
341 NoopInput = Op;
342 } else if (auto *CB = dyn_cast<CallBase>(Val: I)) {
343 const Value *ReturnedOp = CB->getReturnedArgOperand();
344 if (ReturnedOp && isNoopBitcast(T1: ReturnedOp->getType(), T2: I->getType(), TLI))
345 NoopInput = ReturnedOp;
346 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: V)) {
347 // Value may come from either the aggregate or the scalar
348 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
349 if (ValLoc.size() >= InsertLoc.size() &&
350 std::equal(first1: InsertLoc.begin(), last1: InsertLoc.end(), first2: ValLoc.rbegin())) {
351 // The type being inserted is a nested sub-type of the aggregate; we
352 // have to remove those initial indices to get the location we're
353 // interested in for the operand.
354 ValLoc.resize(N: ValLoc.size() - InsertLoc.size());
355 NoopInput = IVI->getInsertedValueOperand();
356 } else {
357 // The struct we're inserting into has the value we're interested in, no
358 // change of address.
359 NoopInput = Op;
360 }
361 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: V)) {
362 // The part we're interested in will inevitably be some sub-section of the
363 // previous aggregate. Combine the two paths to obtain the true address of
364 // our element.
365 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
366 ValLoc.append(in_start: ExtractLoc.rbegin(), in_end: ExtractLoc.rend());
367 NoopInput = Op;
368 }
369 // Terminate if we couldn't find anything to look through.
370 if (!NoopInput)
371 return V;
372
373 V = NoopInput;
374 }
375}
376
377/// Return true if this scalar return value only has bits discarded on its path
378/// from the "tail call" to the "ret". This includes the obvious noop
379/// instructions handled by getNoopInput above as well as free truncations (or
380/// extensions prior to the call).
381static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
382 SmallVectorImpl<unsigned> &RetIndices,
383 SmallVectorImpl<unsigned> &CallIndices,
384 bool AllowDifferingSizes,
385 const TargetLoweringBase &TLI,
386 const DataLayout &DL) {
387
388 // Trace the sub-value needed by the return value as far back up the graph as
389 // possible, in the hope that it will intersect with the value produced by the
390 // call. In the simple case with no "returned" attribute, the hope is actually
391 // that we end up back at the tail call instruction itself.
392 unsigned BitsRequired = UINT_MAX;
393 RetVal = getNoopInput(V: RetVal, ValLoc&: RetIndices, DataBits&: BitsRequired, TLI, DL);
394
395 // If this slot in the value returned is undef, it doesn't matter what the
396 // call puts there, it'll be fine.
397 if (isa<UndefValue>(Val: RetVal))
398 return true;
399
400 // Now do a similar search up through the graph to find where the value
401 // actually returned by the "tail call" comes from. In the simple case without
402 // a "returned" attribute, the search will be blocked immediately and the loop
403 // a Noop.
404 unsigned BitsProvided = UINT_MAX;
405 CallVal = getNoopInput(V: CallVal, ValLoc&: CallIndices, DataBits&: BitsProvided, TLI, DL);
406
407 // There's no hope if we can't actually trace them to (the same part of!) the
408 // same value.
409 if (CallVal != RetVal || CallIndices != RetIndices)
410 return false;
411
412 // However, intervening truncates may have made the call non-tail. Make sure
413 // all the bits that are needed by the "ret" have been provided by the "tail
414 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
415 // extensions too.
416 if (BitsProvided < BitsRequired ||
417 (!AllowDifferingSizes && BitsProvided != BitsRequired))
418 return false;
419
420 return true;
421}
422
423/// For an aggregate type, determine whether a given index is within bounds or
424/// not.
425static bool indexReallyValid(Type *T, unsigned Idx) {
426 if (ArrayType *AT = dyn_cast<ArrayType>(Val: T))
427 return Idx < AT->getNumElements();
428
429 return Idx < cast<StructType>(Val: T)->getNumElements();
430}
431
432/// Move the given iterators to the next leaf type in depth first traversal.
433///
434/// Performs a depth-first traversal of the type as specified by its arguments,
435/// stopping at the next leaf node (which may be a legitimate scalar type or an
436/// empty struct or array).
437///
438/// @param SubTypes List of the partial components making up the type from
439/// outermost to innermost non-empty aggregate. The element currently
440/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
441///
442/// @param Path Set of extractvalue indices leading from the outermost type
443/// (SubTypes[0]) to the leaf node currently represented.
444///
445/// @returns true if a new type was found, false otherwise. Calling this
446/// function again on a finished iterator will repeatedly return
447/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
448/// aggregate or a non-aggregate
449static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
450 SmallVectorImpl<unsigned> &Path) {
451 // First march back up the tree until we can successfully increment one of the
452 // coordinates in Path.
453 while (!Path.empty() && !indexReallyValid(T: SubTypes.back(), Idx: Path.back() + 1)) {
454 Path.pop_back();
455 SubTypes.pop_back();
456 }
457
458 // If we reached the top, then the iterator is done.
459 if (Path.empty())
460 return false;
461
462 // We know there's *some* valid leaf now, so march back down the tree picking
463 // out the left-most element at each node.
464 ++Path.back();
465 Type *DeeperType =
466 ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back());
467 while (DeeperType->isAggregateType()) {
468 if (!indexReallyValid(T: DeeperType, Idx: 0))
469 return true;
470
471 SubTypes.push_back(Elt: DeeperType);
472 Path.push_back(Elt: 0);
473
474 DeeperType = ExtractValueInst::getIndexedType(Agg: DeeperType, Idxs: 0);
475 }
476
477 return true;
478}
479
480/// Find the first non-empty, scalar-like type in Next and setup the iterator
481/// components.
482///
483/// Assuming Next is an aggregate of some kind, this function will traverse the
484/// tree from left to right (i.e. depth-first) looking for the first
485/// non-aggregate type which will play a role in function return.
486///
487/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
488/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
489/// i32 in that type.
490static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
491 SmallVectorImpl<unsigned> &Path) {
492 // First initialise the iterator components to the first "leaf" node
493 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
494 // despite nominally being an aggregate).
495 while (Type *FirstInner = ExtractValueInst::getIndexedType(Agg: Next, Idxs: 0)) {
496 SubTypes.push_back(Elt: Next);
497 Path.push_back(Elt: 0);
498 Next = FirstInner;
499 }
500
501 // If there's no Path now, Next was originally scalar already (or empty
502 // leaf). We're done.
503 if (Path.empty())
504 return true;
505
506 // Otherwise, use normal iteration to keep looking through the tree until we
507 // find a non-aggregate type.
508 while (ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back())
509 ->isAggregateType()) {
510 if (!advanceToNextLeafType(SubTypes, Path))
511 return false;
512 }
513
514 return true;
515}
516
517/// Set the iterator data-structures to the next non-empty, non-aggregate
518/// subtype.
519static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
520 SmallVectorImpl<unsigned> &Path) {
521 do {
522 if (!advanceToNextLeafType(SubTypes, Path))
523 return false;
524
525 assert(!Path.empty() && "found a leaf but didn't set the path?");
526 } while (ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back())
527 ->isAggregateType());
528
529 return true;
530}
531
532
533/// Test if the given instruction is in a position to be optimized
534/// with a tail-call. This roughly means that it's in a block with
535/// a return and there's nothing that needs to be scheduled
536/// between it and the return.
537///
538/// This function only tests target-independent requirements.
539bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM,
540 bool ReturnsFirstArg) {
541 const BasicBlock *ExitBB = Call.getParent();
542 const Instruction *Term = ExitBB->getTerminator();
543 const ReturnInst *Ret = dyn_cast<ReturnInst>(Val: Term);
544
545 // The block must end in a return statement or unreachable.
546 //
547 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
548 // an unreachable, for now. The way tailcall optimization is currently
549 // implemented means it will add an epilogue followed by a jump. That is
550 // not profitable. Also, if the callee is a special function (e.g.
551 // longjmp on x86), it can end up causing miscompilation that has not
552 // been fully understood.
553 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
554 Call.getCallingConv() != CallingConv::Tail &&
555 Call.getCallingConv() != CallingConv::SwiftTail) ||
556 !isa<UnreachableInst>(Val: Term)))
557 return false;
558
559 // If I will have a chain, make sure no other instruction that will have a
560 // chain interposes between I and the return.
561 // Check for all calls including speculatable functions.
562 for (BasicBlock::const_iterator BBI = std::prev(x: ExitBB->end(), n: 2);; --BBI) {
563 if (&*BBI == &Call)
564 break;
565 // Debug info intrinsics do not get in the way of tail call optimization.
566 // Pseudo probe intrinsics do not block tail call optimization either.
567 if (BBI->isDebugOrPseudoInst())
568 continue;
569 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
570 // call optimization.
571 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val&: BBI))
572 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
573 II->getIntrinsicID() == Intrinsic::assume ||
574 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl ||
575 II->getIntrinsicID() == Intrinsic::fake_use)
576 continue;
577 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
578 !isSafeToSpeculativelyExecute(I: &*BBI))
579 return false;
580 }
581
582 const Function *F = ExitBB->getParent();
583 return returnTypeIsEligibleForTailCall(
584 F, I: &Call, Ret, TLI: *TM.getSubtargetImpl(*F)->getTargetLowering(),
585 ReturnsFirstArg);
586}
587
588bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
589 const ReturnInst *Ret,
590 const TargetLoweringBase &TLI,
591 bool *AllowDifferingSizes) {
592 // ADS may be null, so don't write to it directly.
593 bool DummyADS;
594 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
595 ADS = true;
596
597 AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
598 AttrBuilder CalleeAttrs(F->getContext(),
599 cast<CallInst>(Val: I)->getAttributes().getRetAttrs());
600
601 // Following attributes are completely benign as far as calling convention
602 // goes, they shouldn't affect whether the call is a tail call.
603 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
604 Attribute::DereferenceableOrNull, Attribute::NoAlias,
605 Attribute::NonNull, Attribute::NoUndef,
606 Attribute::Range, Attribute::NoFPClass}) {
607 CallerAttrs.removeAttribute(Val: Attr);
608 CalleeAttrs.removeAttribute(Val: Attr);
609 }
610
611 if (CallerAttrs.contains(A: Attribute::ZExt)) {
612 if (!CalleeAttrs.contains(A: Attribute::ZExt))
613 return false;
614
615 ADS = false;
616 CallerAttrs.removeAttribute(Val: Attribute::ZExt);
617 CalleeAttrs.removeAttribute(Val: Attribute::ZExt);
618 } else if (CallerAttrs.contains(A: Attribute::SExt)) {
619 if (!CalleeAttrs.contains(A: Attribute::SExt))
620 return false;
621
622 ADS = false;
623 CallerAttrs.removeAttribute(Val: Attribute::SExt);
624 CalleeAttrs.removeAttribute(Val: Attribute::SExt);
625 }
626
627 // Drop sext and zext return attributes if the result is not used.
628 // This enables tail calls for code like:
629 //
630 // define void @caller() {
631 // entry:
632 // %unused_result = tail call zeroext i1 @callee()
633 // br label %retlabel
634 // retlabel:
635 // ret void
636 // }
637 if (I->use_empty()) {
638 CalleeAttrs.removeAttribute(Val: Attribute::SExt);
639 CalleeAttrs.removeAttribute(Val: Attribute::ZExt);
640 }
641
642 // If they're still different, there's some facet we don't understand
643 // (currently only "inreg", but in future who knows). It may be OK but the
644 // only safe option is to reject the tail call.
645 return CallerAttrs == CalleeAttrs;
646}
647
648bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
649 const Instruction *I,
650 const ReturnInst *Ret,
651 const TargetLoweringBase &TLI,
652 bool ReturnsFirstArg) {
653 // If the block ends with a void return or unreachable, it doesn't matter
654 // what the call's return type is.
655 if (!Ret || Ret->getNumOperands() == 0) return true;
656
657 // If the return value is undef, it doesn't matter what the call's
658 // return type is.
659 if (isa<UndefValue>(Val: Ret->getOperand(i_nocapture: 0))) return true;
660
661 // Make sure the attributes attached to each return are compatible.
662 bool AllowDifferingSizes;
663 if (!attributesPermitTailCall(F, I, Ret, TLI, AllowDifferingSizes: &AllowDifferingSizes))
664 return false;
665
666 // If the return value is the first argument of the call.
667 if (ReturnsFirstArg)
668 return true;
669
670 const Value *RetVal = Ret->getOperand(i_nocapture: 0), *CallVal = I;
671 SmallVector<unsigned, 4> RetPath, CallPath;
672 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
673
674 bool RetEmpty = !firstRealType(Next: RetVal->getType(), SubTypes&: RetSubTypes, Path&: RetPath);
675 bool CallEmpty = !firstRealType(Next: CallVal->getType(), SubTypes&: CallSubTypes, Path&: CallPath);
676
677 // Nothing's actually returned, it doesn't matter what the callee put there
678 // it's a valid tail call.
679 if (RetEmpty)
680 return true;
681
682 // Iterate pairwise through each of the value types making up the tail call
683 // and the corresponding return. For each one we want to know whether it's
684 // essentially going directly from the tail call to the ret, via operations
685 // that end up not generating any code.
686 //
687 // We allow a certain amount of covariance here. For example it's permitted
688 // for the tail call to define more bits than the ret actually cares about
689 // (e.g. via a truncate).
690 do {
691 if (CallEmpty) {
692 // We've exhausted the values produced by the tail call instruction, the
693 // rest are essentially undef. The type doesn't really matter, but we need
694 // *something*.
695 Type *SlotType =
696 ExtractValueInst::getIndexedType(Agg: RetSubTypes.back(), Idxs: RetPath.back());
697 CallVal = UndefValue::get(T: SlotType);
698 }
699
700 // The manipulations performed when we're looking through an insertvalue or
701 // an extractvalue would happen at the front of the RetPath list, so since
702 // we have to copy it anyway it's more efficient to create a reversed copy.
703 SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(C&: RetPath));
704 SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(C&: CallPath));
705
706 // Finally, we can check whether the value produced by the tail call at this
707 // index is compatible with the value we return.
708 if (!slotOnlyDiscardsData(RetVal, CallVal, RetIndices&: TmpRetPath, CallIndices&: TmpCallPath,
709 AllowDifferingSizes, TLI,
710 DL: F->getDataLayout()))
711 return false;
712
713 CallEmpty = !nextRealType(SubTypes&: CallSubTypes, Path&: CallPath);
714 } while(nextRealType(SubTypes&: RetSubTypes, Path&: RetPath));
715
716 return true;
717}
718
719bool llvm::funcReturnsFirstArgOfCall(const CallInst &CI) {
720 const ReturnInst *Ret = dyn_cast<ReturnInst>(Val: CI.getParent()->getTerminator());
721 Value *RetVal = Ret ? Ret->getReturnValue() : nullptr;
722 bool ReturnsFirstArg = false;
723 if (RetVal && ((RetVal == CI.getArgOperand(i: 0))))
724 ReturnsFirstArg = true;
725 return ReturnsFirstArg;
726}
727
728static void collectEHScopeMembers(
729 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
730 const MachineBasicBlock *MBB) {
731 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
732 while (!Worklist.empty()) {
733 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
734 // Don't follow blocks which start new scopes.
735 if (Visiting->isEHPad() && Visiting != MBB)
736 continue;
737
738 // Add this MBB to our scope.
739 auto P = EHScopeMembership.insert(KV: std::make_pair(x&: Visiting, y&: EHScope));
740
741 // Don't revisit blocks.
742 if (!P.second) {
743 assert(P.first->second == EHScope && "MBB is part of two scopes!");
744 continue;
745 }
746
747 // Returns are boundaries where scope transfer can occur, don't follow
748 // successors.
749 if (Visiting->isEHScopeReturnBlock())
750 continue;
751
752 append_range(C&: Worklist, R: Visiting->successors());
753 }
754}
755
756DenseMap<const MachineBasicBlock *, int>
757llvm::getEHScopeMembership(const MachineFunction &MF) {
758 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
759
760 // We don't have anything to do if there aren't any EH pads.
761 if (!MF.hasEHScopes())
762 return EHScopeMembership;
763
764 int EntryBBNumber = MF.front().getNumber();
765 bool IsSEH = isAsynchronousEHPersonality(
766 Pers: classifyEHPersonality(Pers: MF.getFunction().getPersonalityFn()));
767
768 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
769 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
770 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
771 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
772 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
773 for (const MachineBasicBlock &MBB : MF) {
774 if (MBB.isEHScopeEntry()) {
775 EHScopeBlocks.push_back(Elt: &MBB);
776 } else if (IsSEH && MBB.isEHPad()) {
777 SEHCatchPads.push_back(Elt: &MBB);
778 } else if (MBB.pred_empty()) {
779 UnreachableBlocks.push_back(Elt: &MBB);
780 }
781
782 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
783
784 // CatchPads are not scopes for SEH so do not consider CatchRet to
785 // transfer control to another scope.
786 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
787 continue;
788
789 // FIXME: SEH CatchPads are not necessarily in the parent function:
790 // they could be inside a finally block.
791 const MachineBasicBlock *Successor = MBBI->getOperand(i: 0).getMBB();
792 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(i: 1).getMBB();
793 CatchRetSuccessors.push_back(
794 Elt: {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
795 }
796
797 // We don't have anything to do if there aren't any EH pads.
798 if (EHScopeBlocks.empty())
799 return EHScopeMembership;
800
801 // Identify all the basic blocks reachable from the function entry.
802 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB: &MF.front());
803 // All blocks not part of a scope are in the parent function.
804 for (const MachineBasicBlock *MBB : UnreachableBlocks)
805 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB);
806 // Next, identify all the blocks inside the scopes.
807 for (const MachineBasicBlock *MBB : EHScopeBlocks)
808 collectEHScopeMembers(EHScopeMembership, EHScope: MBB->getNumber(), MBB);
809 // SEH CatchPads aren't really scopes, handle them separately.
810 for (const MachineBasicBlock *MBB : SEHCatchPads)
811 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB);
812 // Finally, identify all the targets of a catchret.
813 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
814 CatchRetSuccessors)
815 collectEHScopeMembers(EHScopeMembership, EHScope: CatchRetPair.second,
816 MBB: CatchRetPair.first);
817
818 // Add any remaining blocks in the function to the unreachable set, which
819 // might not otherwise have been identified as unreachable (such as infinite
820 // loops).
821 for (const MachineBasicBlock &MBB : MF)
822 if (!EHScopeMembership.count(Val: &MBB))
823 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB: &MBB);
824
825 return EHScopeMembership;
826}
827