1//===-- Operator.cpp - Implement the LLVM operators -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the non-inline methods for the LLVM Operator classes.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Operator.h"
14#include "llvm/IR/DataLayout.h"
15#include "llvm/IR/GetElementPtrTypeIterator.h"
16#include "llvm/IR/Instructions.h"
17
18#include "ConstantsContext.h"
19
20namespace llvm {
21bool Operator::hasPoisonGeneratingFlags() const {
22 switch (getOpcode()) {
23 case Instruction::Add:
24 case Instruction::Sub:
25 case Instruction::Mul:
26 case Instruction::Shl: {
27 auto *OBO = cast<OverflowingBinaryOperator>(Val: this);
28 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
29 }
30 case Instruction::Trunc: {
31 if (auto *TI = dyn_cast<TruncInst>(Val: this))
32 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
33 return false;
34 }
35 case Instruction::UDiv:
36 case Instruction::SDiv:
37 case Instruction::AShr:
38 case Instruction::LShr:
39 return cast<PossiblyExactOperator>(Val: this)->isExact();
40 case Instruction::Or:
41 return cast<PossiblyDisjointInst>(Val: this)->isDisjoint();
42 case Instruction::GetElementPtr: {
43 auto *GEP = cast<GEPOperator>(Val: this);
44 // Note: inrange exists on constexpr only
45 return GEP->getNoWrapFlags() != GEPNoWrapFlags::none() ||
46 GEP->getInRange() != std::nullopt;
47 }
48 case Instruction::UIToFP:
49 case Instruction::ZExt:
50 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: this))
51 return NNI->hasNonNeg();
52 return false;
53 case Instruction::ICmp:
54 return cast<ICmpInst>(Val: this)->hasSameSign();
55 default:
56 if (const auto *FP = dyn_cast<FPMathOperator>(Val: this))
57 return FP->hasNoNaNs() || FP->hasNoInfs();
58 return false;
59 }
60}
61
62bool Operator::hasPoisonGeneratingAnnotations() const {
63 if (hasPoisonGeneratingFlags())
64 return true;
65 auto *I = dyn_cast<Instruction>(Val: this);
66 return I && (I->hasPoisonGeneratingReturnAttributes() ||
67 I->hasPoisonGeneratingMetadata());
68}
69
70Type *GEPOperator::getSourceElementType() const {
71 if (auto *I = dyn_cast<GetElementPtrInst>(Val: this))
72 return I->getSourceElementType();
73 return cast<GetElementPtrConstantExpr>(Val: this)->getSourceElementType();
74}
75
76Type *GEPOperator::getResultElementType() const {
77 if (auto *I = dyn_cast<GetElementPtrInst>(Val: this))
78 return I->getResultElementType();
79 return cast<GetElementPtrConstantExpr>(Val: this)->getResultElementType();
80}
81
82std::optional<ConstantRange> GEPOperator::getInRange() const {
83 if (auto *CE = dyn_cast<GetElementPtrConstantExpr>(Val: this))
84 return CE->getInRange();
85 return std::nullopt;
86}
87
88Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
89 /// compute the worse possible offset for every level of the GEP et accumulate
90 /// the minimum alignment into Result.
91
92 Align Result = Align(llvm::Value::MaximumAlignment);
93 for (gep_type_iterator GTI = gep_type_begin(GEP: this), GTE = gep_type_end(GEP: this);
94 GTI != GTE; ++GTI) {
95 uint64_t Offset;
96 ConstantInt *OpC = dyn_cast<ConstantInt>(Val: GTI.getOperand());
97
98 if (StructType *STy = GTI.getStructTypeOrNull()) {
99 const StructLayout *SL = DL.getStructLayout(Ty: STy);
100 Offset = SL->getElementOffset(Idx: OpC->getZExtValue());
101 } else {
102 assert(GTI.isSequential() && "should be sequencial");
103 /// If the index isn't known, we take 1 because it is the index that will
104 /// give the worse alignment of the offset.
105 const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1;
106 Offset = GTI.getSequentialElementStride(DL) * ElemCount;
107 }
108 Result = Align(MinAlign(A: Offset, B: Result.value()));
109 }
110 return Result;
111}
112
113bool GEPOperator::accumulateConstantOffset(
114 const DataLayout &DL, APInt &Offset,
115 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
116 assert(Offset.getBitWidth() ==
117 DL.getIndexSizeInBits(getPointerAddressSpace()) &&
118 "The offset bit width does not match DL specification.");
119 SmallVector<const Value *> Index(llvm::drop_begin(RangeOrContainer: operand_values()));
120 return GEPOperator::accumulateConstantOffset(SourceType: getSourceElementType(), Index,
121 DL, Offset, ExternalAnalysis);
122}
123
124bool GEPOperator::accumulateConstantOffset(
125 Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
126 APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
127 // Fast path for canonical getelementptr i8 form.
128 if (SourceType->isIntegerTy(Bitwidth: 8) && !Index.empty() && !ExternalAnalysis) {
129 auto *CI = dyn_cast<ConstantInt>(Val: Index.front());
130 if (CI && CI->getType()->isIntegerTy()) {
131 Offset += CI->getValue().sextOrTrunc(width: Offset.getBitWidth());
132 return true;
133 }
134 return false;
135 }
136
137 bool UsedExternalAnalysis = false;
138 auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
139 Index = Index.sextOrTrunc(width: Offset.getBitWidth());
140 // Truncate if type size exceeds index space.
141 APInt IndexedSize(Offset.getBitWidth(), Size, /*isSigned=*/false,
142 /*implcitTrunc=*/true);
143 // For array or vector indices, scale the index by the size of the type.
144 if (!UsedExternalAnalysis) {
145 Offset += Index * IndexedSize;
146 } else {
147 // External Analysis can return a result higher/lower than the value
148 // represents. We need to detect overflow/underflow.
149 bool Overflow = false;
150 APInt OffsetPlus = Index.smul_ov(RHS: IndexedSize, Overflow);
151 if (Overflow)
152 return false;
153 Offset = Offset.sadd_ov(RHS: OffsetPlus, Overflow);
154 if (Overflow)
155 return false;
156 }
157 return true;
158 };
159 auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
160 Ty: SourceType, It: Index.begin());
161 auto end = generic_gep_type_iterator<decltype(Index.end())>::end(It: Index.end());
162 for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
163 // Scalable vectors are multiplied by a runtime constant.
164 bool ScalableType = GTI.getIndexedType()->isScalableTy();
165
166 Value *V = GTI.getOperand();
167 StructType *STy = GTI.getStructTypeOrNull();
168 // Handle ConstantInt if possible.
169 auto *ConstOffset = dyn_cast<ConstantInt>(Val: V);
170 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
171 if (ConstOffset->isZero())
172 continue;
173 // if the type is scalable and the constant is not zero (vscale * n * 0 =
174 // 0) bailout.
175 if (ScalableType)
176 return false;
177 // Handle a struct index, which adds its field offset to the pointer.
178 if (STy) {
179 unsigned ElementIdx = ConstOffset->getZExtValue();
180 const StructLayout *SL = DL.getStructLayout(Ty: STy);
181 // Element offset is in bytes.
182 if (!AccumulateOffset(
183 APInt(Offset.getBitWidth(), SL->getElementOffset(Idx: ElementIdx)),
184 1))
185 return false;
186 continue;
187 }
188 if (!AccumulateOffset(ConstOffset->getValue(),
189 GTI.getSequentialElementStride(DL)))
190 return false;
191 continue;
192 }
193
194 // The operand is not constant, check if an external analysis was provided.
195 // External analsis is not applicable to a struct type.
196 if (!ExternalAnalysis || STy || ScalableType)
197 return false;
198 APInt AnalysisIndex;
199 if (!ExternalAnalysis(*V, AnalysisIndex))
200 return false;
201 UsedExternalAnalysis = true;
202 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(DL)))
203 return false;
204 }
205 return true;
206}
207
208bool GEPOperator::collectOffset(
209 const DataLayout &DL, unsigned BitWidth,
210 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
211 APInt &ConstantOffset) const {
212 assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
213 "The offset bit width does not match DL specification.");
214
215 auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
216 Index = Index.sextOrTrunc(width: BitWidth);
217 // Truncate if type size exceeds index space.
218 APInt IndexedSize(BitWidth, Size, /*isSigned=*/false,
219 /*implcitTrunc=*/true);
220 ConstantOffset += Index * IndexedSize;
221 };
222
223 for (gep_type_iterator GTI = gep_type_begin(GEP: this), GTE = gep_type_end(GEP: this);
224 GTI != GTE; ++GTI) {
225 // Scalable vectors are multiplied by a runtime constant.
226 bool ScalableType = GTI.getIndexedType()->isScalableTy();
227
228 Value *V = GTI.getOperand();
229 StructType *STy = GTI.getStructTypeOrNull();
230 // Handle ConstantInt if possible.
231 auto *ConstOffset = dyn_cast<ConstantInt>(Val: V);
232 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
233 if (ConstOffset->isZero())
234 continue;
235 // If the type is scalable and the constant is not zero (vscale * n * 0 =
236 // 0) bailout.
237 // TODO: If the runtime value is accessible at any point before DWARF
238 // emission, then we could potentially keep a forward reference to it
239 // in the debug value to be filled in later.
240 if (ScalableType)
241 return false;
242 // Handle a struct index, which adds its field offset to the pointer.
243 if (STy) {
244 unsigned ElementIdx = ConstOffset->getZExtValue();
245 const StructLayout *SL = DL.getStructLayout(Ty: STy);
246 // Element offset is in bytes.
247 CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(Idx: ElementIdx)),
248 1);
249 continue;
250 }
251 CollectConstantOffset(ConstOffset->getValue(),
252 GTI.getSequentialElementStride(DL));
253 continue;
254 }
255
256 if (STy || ScalableType)
257 return false;
258 // Truncate if type size exceeds index space.
259 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),
260 /*isSigned=*/false, /*implicitTrunc=*/true);
261 // Insert an initial offset of 0 for V iff none exists already, then
262 // increment the offset by IndexedSize.
263 if (!IndexedSize.isZero()) {
264 auto *It = VariableOffsets.insert(KV: {V, APInt(BitWidth, 0)}).first;
265 It->second += IndexedSize;
266 }
267 }
268 return true;
269}
270
271void FastMathFlags::print(raw_ostream &O) const {
272 if (all())
273 O << " fast";
274 else {
275 if (allowReassoc())
276 O << " reassoc";
277 if (noNaNs())
278 O << " nnan";
279 if (noInfs())
280 O << " ninf";
281 if (noSignedZeros())
282 O << " nsz";
283 if (allowReciprocal())
284 O << " arcp";
285 if (allowContract())
286 O << " contract";
287 if (approxFunc())
288 O << " afn";
289 }
290}
291} // namespace llvm
292