1 | //===-- Operator.cpp - Implement the LLVM operators -----------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the non-inline methods for the LLVM Operator classes. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/IR/Operator.h" |
14 | #include "llvm/IR/DataLayout.h" |
15 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
16 | #include "llvm/IR/Instructions.h" |
17 | |
18 | #include "ConstantsContext.h" |
19 | |
20 | namespace llvm { |
21 | bool Operator::hasPoisonGeneratingFlags() const { |
22 | switch (getOpcode()) { |
23 | case Instruction::Add: |
24 | case Instruction::Sub: |
25 | case Instruction::Mul: |
26 | case Instruction::Shl: { |
27 | auto *OBO = cast<OverflowingBinaryOperator>(Val: this); |
28 | return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap(); |
29 | } |
30 | case Instruction::Trunc: { |
31 | if (auto *TI = dyn_cast<TruncInst>(Val: this)) |
32 | return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap(); |
33 | return false; |
34 | } |
35 | case Instruction::UDiv: |
36 | case Instruction::SDiv: |
37 | case Instruction::AShr: |
38 | case Instruction::LShr: |
39 | return cast<PossiblyExactOperator>(Val: this)->isExact(); |
40 | case Instruction::Or: |
41 | return cast<PossiblyDisjointInst>(Val: this)->isDisjoint(); |
42 | case Instruction::GetElementPtr: { |
43 | auto *GEP = cast<GEPOperator>(Val: this); |
44 | // Note: inrange exists on constexpr only |
45 | return GEP->getNoWrapFlags() != GEPNoWrapFlags::none() || |
46 | GEP->getInRange() != std::nullopt; |
47 | } |
48 | case Instruction::UIToFP: |
49 | case Instruction::ZExt: |
50 | if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: this)) |
51 | return NNI->hasNonNeg(); |
52 | return false; |
53 | default: |
54 | if (const auto *FP = dyn_cast<FPMathOperator>(Val: this)) |
55 | return FP->hasNoNaNs() || FP->hasNoInfs(); |
56 | return false; |
57 | } |
58 | } |
59 | |
60 | bool Operator::hasPoisonGeneratingAnnotations() const { |
61 | if (hasPoisonGeneratingFlags()) |
62 | return true; |
63 | auto *I = dyn_cast<Instruction>(Val: this); |
64 | return I && (I->hasPoisonGeneratingReturnAttributes() || |
65 | I->hasPoisonGeneratingMetadata()); |
66 | } |
67 | |
68 | Type *GEPOperator::getSourceElementType() const { |
69 | if (auto *I = dyn_cast<GetElementPtrInst>(Val: this)) |
70 | return I->getSourceElementType(); |
71 | return cast<GetElementPtrConstantExpr>(Val: this)->getSourceElementType(); |
72 | } |
73 | |
74 | Type *GEPOperator::getResultElementType() const { |
75 | if (auto *I = dyn_cast<GetElementPtrInst>(Val: this)) |
76 | return I->getResultElementType(); |
77 | return cast<GetElementPtrConstantExpr>(Val: this)->getResultElementType(); |
78 | } |
79 | |
80 | std::optional<ConstantRange> GEPOperator::getInRange() const { |
81 | if (auto *CE = dyn_cast<GetElementPtrConstantExpr>(Val: this)) |
82 | return CE->getInRange(); |
83 | return std::nullopt; |
84 | } |
85 | |
86 | Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const { |
87 | /// compute the worse possible offset for every level of the GEP et accumulate |
88 | /// the minimum alignment into Result. |
89 | |
90 | Align Result = Align(llvm::Value::MaximumAlignment); |
91 | for (gep_type_iterator GTI = gep_type_begin(GEP: this), GTE = gep_type_end(GEP: this); |
92 | GTI != GTE; ++GTI) { |
93 | uint64_t Offset; |
94 | ConstantInt *OpC = dyn_cast<ConstantInt>(Val: GTI.getOperand()); |
95 | |
96 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
97 | const StructLayout *SL = DL.getStructLayout(Ty: STy); |
98 | Offset = SL->getElementOffset(Idx: OpC->getZExtValue()); |
99 | } else { |
100 | assert(GTI.isSequential() && "should be sequencial" ); |
101 | /// If the index isn't known, we take 1 because it is the index that will |
102 | /// give the worse alignment of the offset. |
103 | const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1; |
104 | Offset = GTI.getSequentialElementStride(DL) * ElemCount; |
105 | } |
106 | Result = Align(MinAlign(A: Offset, B: Result.value())); |
107 | } |
108 | return Result; |
109 | } |
110 | |
111 | bool GEPOperator::accumulateConstantOffset( |
112 | const DataLayout &DL, APInt &Offset, |
113 | function_ref<bool(Value &, APInt &)> ExternalAnalysis) const { |
114 | assert(Offset.getBitWidth() == |
115 | DL.getIndexSizeInBits(getPointerAddressSpace()) && |
116 | "The offset bit width does not match DL specification." ); |
117 | SmallVector<const Value *> Index(llvm::drop_begin(RangeOrContainer: operand_values())); |
118 | return GEPOperator::accumulateConstantOffset(SourceType: getSourceElementType(), Index, |
119 | DL, Offset, ExternalAnalysis); |
120 | } |
121 | |
122 | bool GEPOperator::accumulateConstantOffset( |
123 | Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL, |
124 | APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) { |
125 | // Fast path for canonical getelementptr i8 form. |
126 | if (SourceType->isIntegerTy(Bitwidth: 8) && !ExternalAnalysis) { |
127 | if (auto *CI = dyn_cast<ConstantInt>(Val: Index.front())) { |
128 | Offset += CI->getValue().sextOrTrunc(width: Offset.getBitWidth()); |
129 | return true; |
130 | } |
131 | return false; |
132 | } |
133 | |
134 | bool UsedExternalAnalysis = false; |
135 | auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool { |
136 | Index = Index.sextOrTrunc(width: Offset.getBitWidth()); |
137 | APInt IndexedSize = APInt(Offset.getBitWidth(), Size); |
138 | // For array or vector indices, scale the index by the size of the type. |
139 | if (!UsedExternalAnalysis) { |
140 | Offset += Index * IndexedSize; |
141 | } else { |
142 | // External Analysis can return a result higher/lower than the value |
143 | // represents. We need to detect overflow/underflow. |
144 | bool Overflow = false; |
145 | APInt OffsetPlus = Index.smul_ov(RHS: IndexedSize, Overflow); |
146 | if (Overflow) |
147 | return false; |
148 | Offset = Offset.sadd_ov(RHS: OffsetPlus, Overflow); |
149 | if (Overflow) |
150 | return false; |
151 | } |
152 | return true; |
153 | }; |
154 | auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin( |
155 | Ty: SourceType, It: Index.begin()); |
156 | auto end = generic_gep_type_iterator<decltype(Index.end())>::end(It: Index.end()); |
157 | for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) { |
158 | // Scalable vectors are multiplied by a runtime constant. |
159 | bool ScalableType = GTI.getIndexedType()->isScalableTy(); |
160 | |
161 | Value *V = GTI.getOperand(); |
162 | StructType *STy = GTI.getStructTypeOrNull(); |
163 | // Handle ConstantInt if possible. |
164 | if (auto ConstOffset = dyn_cast<ConstantInt>(Val: V)) { |
165 | if (ConstOffset->isZero()) |
166 | continue; |
167 | // if the type is scalable and the constant is not zero (vscale * n * 0 = |
168 | // 0) bailout. |
169 | if (ScalableType) |
170 | return false; |
171 | // Handle a struct index, which adds its field offset to the pointer. |
172 | if (STy) { |
173 | unsigned ElementIdx = ConstOffset->getZExtValue(); |
174 | const StructLayout *SL = DL.getStructLayout(Ty: STy); |
175 | // Element offset is in bytes. |
176 | if (!AccumulateOffset( |
177 | APInt(Offset.getBitWidth(), SL->getElementOffset(Idx: ElementIdx)), |
178 | 1)) |
179 | return false; |
180 | continue; |
181 | } |
182 | if (!AccumulateOffset(ConstOffset->getValue(), |
183 | GTI.getSequentialElementStride(DL))) |
184 | return false; |
185 | continue; |
186 | } |
187 | |
188 | // The operand is not constant, check if an external analysis was provided. |
189 | // External analsis is not applicable to a struct type. |
190 | if (!ExternalAnalysis || STy || ScalableType) |
191 | return false; |
192 | APInt AnalysisIndex; |
193 | if (!ExternalAnalysis(*V, AnalysisIndex)) |
194 | return false; |
195 | UsedExternalAnalysis = true; |
196 | if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(DL))) |
197 | return false; |
198 | } |
199 | return true; |
200 | } |
201 | |
202 | bool GEPOperator::collectOffset( |
203 | const DataLayout &DL, unsigned BitWidth, |
204 | MapVector<Value *, APInt> &VariableOffsets, |
205 | APInt &ConstantOffset) const { |
206 | assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) && |
207 | "The offset bit width does not match DL specification." ); |
208 | |
209 | auto CollectConstantOffset = [&](APInt Index, uint64_t Size) { |
210 | Index = Index.sextOrTrunc(width: BitWidth); |
211 | APInt IndexedSize = APInt(BitWidth, Size); |
212 | ConstantOffset += Index * IndexedSize; |
213 | }; |
214 | |
215 | for (gep_type_iterator GTI = gep_type_begin(GEP: this), GTE = gep_type_end(GEP: this); |
216 | GTI != GTE; ++GTI) { |
217 | // Scalable vectors are multiplied by a runtime constant. |
218 | bool ScalableType = GTI.getIndexedType()->isScalableTy(); |
219 | |
220 | Value *V = GTI.getOperand(); |
221 | StructType *STy = GTI.getStructTypeOrNull(); |
222 | // Handle ConstantInt if possible. |
223 | if (auto ConstOffset = dyn_cast<ConstantInt>(Val: V)) { |
224 | if (ConstOffset->isZero()) |
225 | continue; |
226 | // If the type is scalable and the constant is not zero (vscale * n * 0 = |
227 | // 0) bailout. |
228 | // TODO: If the runtime value is accessible at any point before DWARF |
229 | // emission, then we could potentially keep a forward reference to it |
230 | // in the debug value to be filled in later. |
231 | if (ScalableType) |
232 | return false; |
233 | // Handle a struct index, which adds its field offset to the pointer. |
234 | if (STy) { |
235 | unsigned ElementIdx = ConstOffset->getZExtValue(); |
236 | const StructLayout *SL = DL.getStructLayout(Ty: STy); |
237 | // Element offset is in bytes. |
238 | CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(Idx: ElementIdx)), |
239 | 1); |
240 | continue; |
241 | } |
242 | CollectConstantOffset(ConstOffset->getValue(), |
243 | GTI.getSequentialElementStride(DL)); |
244 | continue; |
245 | } |
246 | |
247 | if (STy || ScalableType) |
248 | return false; |
249 | APInt IndexedSize = APInt(BitWidth, GTI.getSequentialElementStride(DL)); |
250 | // Insert an initial offset of 0 for V iff none exists already, then |
251 | // increment the offset by IndexedSize. |
252 | if (!IndexedSize.isZero()) { |
253 | auto *It = VariableOffsets.insert(KV: {V, APInt(BitWidth, 0)}).first; |
254 | It->second += IndexedSize; |
255 | } |
256 | } |
257 | return true; |
258 | } |
259 | |
260 | void FastMathFlags::print(raw_ostream &O) const { |
261 | if (all()) |
262 | O << " fast" ; |
263 | else { |
264 | if (allowReassoc()) |
265 | O << " reassoc" ; |
266 | if (noNaNs()) |
267 | O << " nnan" ; |
268 | if (noInfs()) |
269 | O << " ninf" ; |
270 | if (noSignedZeros()) |
271 | O << " nsz" ; |
272 | if (allowReciprocal()) |
273 | O << " arcp" ; |
274 | if (allowContract()) |
275 | O << " contract" ; |
276 | if (approxFunc()) |
277 | O << " afn" ; |
278 | } |
279 | } |
280 | } // namespace llvm |
281 | |