1//===- FunctionComparator.h - Function Comparator -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the FunctionComparator and GlobalNumberState classes
10// which are used by the MergeFunctions pass for comparing functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/FunctionComparator.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/IR/Attributes.h"
21#include "llvm/IR/BasicBlock.h"
22#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalValue.h"
28#include "llvm/IR/InlineAsm.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/IR/Instructions.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/Metadata.h"
34#include "llvm/IR/Module.h"
35#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/Casting.h"
39#include "llvm/Support/Compiler.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Support/raw_ostream.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <utility>
47
48using namespace llvm;
49
50#define DEBUG_TYPE "functioncomparator"
51
52int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
53 if (L < R)
54 return -1;
55 if (L > R)
56 return 1;
57 return 0;
58}
59
60int FunctionComparator::cmpAligns(Align L, Align R) const {
61 if (L.value() < R.value())
62 return -1;
63 if (L.value() > R.value())
64 return 1;
65 return 0;
66}
67
68int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
69 if ((int)L < (int)R)
70 return -1;
71 if ((int)L > (int)R)
72 return 1;
73 return 0;
74}
75
76int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
77 if (int Res = cmpNumbers(L: L.getBitWidth(), R: R.getBitWidth()))
78 return Res;
79 if (L.ugt(RHS: R))
80 return 1;
81 if (R.ugt(RHS: L))
82 return -1;
83 return 0;
84}
85
86int FunctionComparator::cmpConstantRanges(const ConstantRange &L,
87 const ConstantRange &R) const {
88 if (int Res = cmpAPInts(L: L.getLower(), R: R.getLower()))
89 return Res;
90 return cmpAPInts(L: L.getUpper(), R: R.getUpper());
91}
92
93int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
94 // Floats are ordered first by semantics (i.e. float, double, half, etc.),
95 // then by value interpreted as a bitstring (aka APInt).
96 const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
97 if (int Res = cmpNumbers(L: APFloat::semanticsPrecision(SL),
98 R: APFloat::semanticsPrecision(SR)))
99 return Res;
100 if (int Res = cmpNumbers(L: APFloat::semanticsMaxExponent(SL),
101 R: APFloat::semanticsMaxExponent(SR)))
102 return Res;
103 if (int Res = cmpNumbers(L: APFloat::semanticsMinExponent(SL),
104 R: APFloat::semanticsMinExponent(SR)))
105 return Res;
106 if (int Res = cmpNumbers(L: APFloat::semanticsSizeInBits(SL),
107 R: APFloat::semanticsSizeInBits(SR)))
108 return Res;
109 return cmpAPInts(L: L.bitcastToAPInt(), R: R.bitcastToAPInt());
110}
111
112int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
113 // Prevent heavy comparison, compare sizes first.
114 if (int Res = cmpNumbers(L: L.size(), R: R.size()))
115 return Res;
116
117 // Compare strings lexicographically only when it is necessary: only when
118 // strings are equal in size.
119 return std::clamp(val: L.compare(RHS: R), lo: -1, hi: 1);
120}
121
122int FunctionComparator::cmpAttrs(const AttributeList L,
123 const AttributeList R) const {
124 if (int Res = cmpNumbers(L: L.getNumAttrSets(), R: R.getNumAttrSets()))
125 return Res;
126
127 for (unsigned i : L.indexes()) {
128 AttributeSet LAS = L.getAttributes(Index: i);
129 AttributeSet RAS = R.getAttributes(Index: i);
130 AttributeSet::iterator LI = LAS.begin(), LE = LAS.end();
131 AttributeSet::iterator RI = RAS.begin(), RE = RAS.end();
132 for (; LI != LE && RI != RE; ++LI, ++RI) {
133 Attribute LA = *LI;
134 Attribute RA = *RI;
135 if (LA.isTypeAttribute() && RA.isTypeAttribute()) {
136 if (LA.getKindAsEnum() != RA.getKindAsEnum())
137 return cmpNumbers(L: LA.getKindAsEnum(), R: RA.getKindAsEnum());
138
139 Type *TyL = LA.getValueAsType();
140 Type *TyR = RA.getValueAsType();
141 if (TyL && TyR) {
142 if (int Res = cmpTypes(TyL, TyR))
143 return Res;
144 continue;
145 }
146
147 // Two pointers, at least one null, so the comparison result is
148 // independent of the value of a real pointer.
149 if (int Res = cmpNumbers(L: (uint64_t)TyL, R: (uint64_t)TyR))
150 return Res;
151 continue;
152 } else if (LA.isConstantRangeAttribute() &&
153 RA.isConstantRangeAttribute()) {
154 if (LA.getKindAsEnum() != RA.getKindAsEnum())
155 return cmpNumbers(L: LA.getKindAsEnum(), R: RA.getKindAsEnum());
156
157 if (int Res = cmpConstantRanges(L: LA.getRange(), R: RA.getRange()))
158 return Res;
159 continue;
160 } else if (LA.isConstantRangeListAttribute() &&
161 RA.isConstantRangeListAttribute()) {
162 if (LA.getKindAsEnum() != RA.getKindAsEnum())
163 return cmpNumbers(L: LA.getKindAsEnum(), R: RA.getKindAsEnum());
164
165 ArrayRef<ConstantRange> CRL = LA.getValueAsConstantRangeList();
166 ArrayRef<ConstantRange> CRR = RA.getValueAsConstantRangeList();
167 if (int Res = cmpNumbers(L: CRL.size(), R: CRR.size()))
168 return Res;
169
170 for (const auto &[L, R] : zip(t&: CRL, u&: CRR))
171 if (int Res = cmpConstantRanges(L, R))
172 return Res;
173 continue;
174 }
175 if (LA < RA)
176 return -1;
177 if (RA < LA)
178 return 1;
179 }
180 if (LI != LE)
181 return 1;
182 if (RI != RE)
183 return -1;
184 }
185 return 0;
186}
187
188int FunctionComparator::cmpMetadata(const Metadata *L,
189 const Metadata *R) const {
190 // TODO: the following routine coerce the metadata contents into constants
191 // or MDStrings before comparison.
192 // It ignores any other cases, so that the metadata nodes are considered
193 // equal even though this is not correct.
194 // We should structurally compare the metadata nodes to be perfect here.
195
196 auto *MDStringL = dyn_cast<MDString>(Val: L);
197 auto *MDStringR = dyn_cast<MDString>(Val: R);
198 if (MDStringL && MDStringR) {
199 if (MDStringL == MDStringR)
200 return 0;
201 return MDStringL->getString().compare(RHS: MDStringR->getString());
202 }
203 if (MDStringR)
204 return -1;
205 if (MDStringL)
206 return 1;
207
208 auto *CL = dyn_cast<ConstantAsMetadata>(Val: L);
209 auto *CR = dyn_cast<ConstantAsMetadata>(Val: R);
210 if (CL == CR)
211 return 0;
212 if (!CL)
213 return -1;
214 if (!CR)
215 return 1;
216 return cmpConstants(L: CL->getValue(), R: CR->getValue());
217}
218
219int FunctionComparator::cmpMDNode(const MDNode *L, const MDNode *R) const {
220 if (L == R)
221 return 0;
222 if (!L)
223 return -1;
224 if (!R)
225 return 1;
226 // TODO: Note that as this is metadata, it is possible to drop and/or merge
227 // this data when considering functions to merge. Thus this comparison would
228 // return 0 (i.e. equivalent), but merging would become more complicated
229 // because the ranges would need to be unioned. It is not likely that
230 // functions differ ONLY in this metadata if they are actually the same
231 // function semantically.
232 if (int Res = cmpNumbers(L: L->getNumOperands(), R: R->getNumOperands()))
233 return Res;
234 for (size_t I = 0; I < L->getNumOperands(); ++I)
235 if (int Res = cmpMetadata(L: L->getOperand(I), R: R->getOperand(I)))
236 return Res;
237 return 0;
238}
239
240int FunctionComparator::cmpInstMetadata(Instruction const *L,
241 Instruction const *R) const {
242 /// These metadata affects the other optimization passes by making assertions
243 /// or constraints.
244 /// Values that carry different expectations should be considered different.
245 SmallVector<std::pair<unsigned, MDNode *>> MDL, MDR;
246 L->getAllMetadataOtherThanDebugLoc(MDs&: MDL);
247 R->getAllMetadataOtherThanDebugLoc(MDs&: MDR);
248 if (MDL.size() > MDR.size())
249 return 1;
250 else if (MDL.size() < MDR.size())
251 return -1;
252 for (size_t I = 0, N = MDL.size(); I < N; ++I) {
253 auto const [KeyL, ML] = MDL[I];
254 auto const [KeyR, MR] = MDR[I];
255 if (int Res = cmpNumbers(L: KeyL, R: KeyR))
256 return Res;
257 if (int Res = cmpMDNode(L: ML, R: MR))
258 return Res;
259 }
260 return 0;
261}
262
263int FunctionComparator::cmpOperandBundlesSchema(const CallBase &LCS,
264 const CallBase &RCS) const {
265 assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!");
266
267 if (int Res =
268 cmpNumbers(L: LCS.getNumOperandBundles(), R: RCS.getNumOperandBundles()))
269 return Res;
270
271 for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) {
272 auto OBL = LCS.getOperandBundleAt(Index: I);
273 auto OBR = RCS.getOperandBundleAt(Index: I);
274
275 if (int Res = OBL.getTagName().compare(RHS: OBR.getTagName()))
276 return Res;
277
278 if (int Res = cmpNumbers(L: OBL.Inputs.size(), R: OBR.Inputs.size()))
279 return Res;
280 }
281
282 return 0;
283}
284
285/// Constants comparison:
286/// 1. Check whether type of L constant could be losslessly bitcasted to R
287/// type.
288/// 2. Compare constant contents.
289/// For more details see declaration comments.
290int FunctionComparator::cmpConstants(const Constant *L,
291 const Constant *R) const {
292 Type *TyL = L->getType();
293 Type *TyR = R->getType();
294
295 // Check whether types are bitcastable. This part is just re-factored
296 // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
297 // we also pack into result which type is "less" for us.
298 int TypesRes = cmpTypes(TyL, TyR);
299 if (TypesRes != 0) {
300 // Types are different, but check whether we can bitcast them.
301 if (!TyL->isFirstClassType()) {
302 if (TyR->isFirstClassType())
303 return -1;
304 // Neither TyL nor TyR are values of first class type. Return the result
305 // of comparing the types
306 return TypesRes;
307 }
308 if (!TyR->isFirstClassType()) {
309 if (TyL->isFirstClassType())
310 return 1;
311 return TypesRes;
312 }
313
314 // Vector -> Vector conversions are always lossless if the two vector types
315 // have the same size, otherwise not.
316 unsigned TyLWidth = 0;
317 unsigned TyRWidth = 0;
318
319 if (auto *VecTyL = dyn_cast<VectorType>(Val: TyL))
320 TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedValue();
321 if (auto *VecTyR = dyn_cast<VectorType>(Val: TyR))
322 TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedValue();
323
324 if (TyLWidth != TyRWidth)
325 return cmpNumbers(L: TyLWidth, R: TyRWidth);
326
327 // Zero bit-width means neither TyL nor TyR are vectors.
328 if (!TyLWidth) {
329 PointerType *PTyL = dyn_cast<PointerType>(Val: TyL);
330 PointerType *PTyR = dyn_cast<PointerType>(Val: TyR);
331 if (PTyL && PTyR) {
332 unsigned AddrSpaceL = PTyL->getAddressSpace();
333 unsigned AddrSpaceR = PTyR->getAddressSpace();
334 if (int Res = cmpNumbers(L: AddrSpaceL, R: AddrSpaceR))
335 return Res;
336 }
337 if (PTyL)
338 return 1;
339 if (PTyR)
340 return -1;
341
342 // TyL and TyR aren't vectors, nor pointers. We don't know how to
343 // bitcast them.
344 return TypesRes;
345 }
346 }
347
348 // OK, types are bitcastable, now check constant contents.
349
350 if (L->isNullValue() && R->isNullValue())
351 return TypesRes;
352 if (L->isNullValue() && !R->isNullValue())
353 return 1;
354 if (!L->isNullValue() && R->isNullValue())
355 return -1;
356
357 auto GlobalValueL = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(Val: L));
358 auto GlobalValueR = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(Val: R));
359 if (GlobalValueL && GlobalValueR) {
360 return cmpGlobalValues(L: GlobalValueL, R: GlobalValueR);
361 }
362
363 if (int Res = cmpNumbers(L: L->getValueID(), R: R->getValueID()))
364 return Res;
365
366 if (const auto *SeqL = dyn_cast<ConstantDataSequential>(Val: L)) {
367 const auto *SeqR = cast<ConstantDataSequential>(Val: R);
368 // This handles ConstantDataArray and ConstantDataVector. Note that we
369 // compare the two raw data arrays, which might differ depending on the host
370 // endianness. This isn't a problem though, because the endiness of a module
371 // will affect the order of the constants, but this order is the same
372 // for a given input module and host platform.
373 return cmpMem(L: SeqL->getRawDataValues(), R: SeqR->getRawDataValues());
374 }
375
376 switch (L->getValueID()) {
377 case Value::UndefValueVal:
378 case Value::PoisonValueVal:
379 case Value::ConstantTokenNoneVal:
380 return TypesRes;
381 case Value::ConstantIntVal: {
382 const APInt &LInt = cast<ConstantInt>(Val: L)->getValue();
383 const APInt &RInt = cast<ConstantInt>(Val: R)->getValue();
384 return cmpAPInts(L: LInt, R: RInt);
385 }
386 case Value::ConstantFPVal: {
387 const APFloat &LAPF = cast<ConstantFP>(Val: L)->getValueAPF();
388 const APFloat &RAPF = cast<ConstantFP>(Val: R)->getValueAPF();
389 return cmpAPFloats(L: LAPF, R: RAPF);
390 }
391 case Value::ConstantArrayVal: {
392 const ConstantArray *LA = cast<ConstantArray>(Val: L);
393 const ConstantArray *RA = cast<ConstantArray>(Val: R);
394 uint64_t NumElementsL = cast<ArrayType>(Val: TyL)->getNumElements();
395 uint64_t NumElementsR = cast<ArrayType>(Val: TyR)->getNumElements();
396 if (int Res = cmpNumbers(L: NumElementsL, R: NumElementsR))
397 return Res;
398 for (uint64_t i = 0; i < NumElementsL; ++i) {
399 if (int Res = cmpConstants(L: cast<Constant>(Val: LA->getOperand(i_nocapture: i)),
400 R: cast<Constant>(Val: RA->getOperand(i_nocapture: i))))
401 return Res;
402 }
403 return 0;
404 }
405 case Value::ConstantStructVal: {
406 const ConstantStruct *LS = cast<ConstantStruct>(Val: L);
407 const ConstantStruct *RS = cast<ConstantStruct>(Val: R);
408 unsigned NumElementsL = cast<StructType>(Val: TyL)->getNumElements();
409 unsigned NumElementsR = cast<StructType>(Val: TyR)->getNumElements();
410 if (int Res = cmpNumbers(L: NumElementsL, R: NumElementsR))
411 return Res;
412 for (unsigned i = 0; i != NumElementsL; ++i) {
413 if (int Res = cmpConstants(L: cast<Constant>(Val: LS->getOperand(i_nocapture: i)),
414 R: cast<Constant>(Val: RS->getOperand(i_nocapture: i))))
415 return Res;
416 }
417 return 0;
418 }
419 case Value::ConstantVectorVal: {
420 const ConstantVector *LV = cast<ConstantVector>(Val: L);
421 const ConstantVector *RV = cast<ConstantVector>(Val: R);
422 unsigned NumElementsL = cast<FixedVectorType>(Val: TyL)->getNumElements();
423 unsigned NumElementsR = cast<FixedVectorType>(Val: TyR)->getNumElements();
424 if (int Res = cmpNumbers(L: NumElementsL, R: NumElementsR))
425 return Res;
426 for (uint64_t i = 0; i < NumElementsL; ++i) {
427 if (int Res = cmpConstants(L: cast<Constant>(Val: LV->getOperand(i_nocapture: i)),
428 R: cast<Constant>(Val: RV->getOperand(i_nocapture: i))))
429 return Res;
430 }
431 return 0;
432 }
433 case Value::ConstantExprVal: {
434 const ConstantExpr *LE = cast<ConstantExpr>(Val: L);
435 const ConstantExpr *RE = cast<ConstantExpr>(Val: R);
436 if (int Res = cmpNumbers(L: LE->getOpcode(), R: RE->getOpcode()))
437 return Res;
438 unsigned NumOperandsL = LE->getNumOperands();
439 unsigned NumOperandsR = RE->getNumOperands();
440 if (int Res = cmpNumbers(L: NumOperandsL, R: NumOperandsR))
441 return Res;
442 for (unsigned i = 0; i < NumOperandsL; ++i) {
443 if (int Res = cmpConstants(L: cast<Constant>(Val: LE->getOperand(i_nocapture: i)),
444 R: cast<Constant>(Val: RE->getOperand(i_nocapture: i))))
445 return Res;
446 }
447 if (auto *GEPL = dyn_cast<GEPOperator>(Val: LE)) {
448 auto *GEPR = cast<GEPOperator>(Val: RE);
449 if (int Res = cmpTypes(TyL: GEPL->getSourceElementType(),
450 TyR: GEPR->getSourceElementType()))
451 return Res;
452 if (int Res = cmpNumbers(L: GEPL->getNoWrapFlags().getRaw(),
453 R: GEPR->getNoWrapFlags().getRaw()))
454 return Res;
455
456 std::optional<ConstantRange> InRangeL = GEPL->getInRange();
457 std::optional<ConstantRange> InRangeR = GEPR->getInRange();
458 if (InRangeL) {
459 if (!InRangeR)
460 return 1;
461 if (int Res = cmpConstantRanges(L: *InRangeL, R: *InRangeR))
462 return Res;
463 } else if (InRangeR) {
464 return -1;
465 }
466 }
467 if (auto *OBOL = dyn_cast<OverflowingBinaryOperator>(Val: LE)) {
468 auto *OBOR = cast<OverflowingBinaryOperator>(Val: RE);
469 if (int Res =
470 cmpNumbers(L: OBOL->hasNoUnsignedWrap(), R: OBOR->hasNoUnsignedWrap()))
471 return Res;
472 if (int Res =
473 cmpNumbers(L: OBOL->hasNoSignedWrap(), R: OBOR->hasNoSignedWrap()))
474 return Res;
475 }
476 return 0;
477 }
478 case Value::BlockAddressVal: {
479 const BlockAddress *LBA = cast<BlockAddress>(Val: L);
480 const BlockAddress *RBA = cast<BlockAddress>(Val: R);
481 if (int Res = cmpValues(L: LBA->getFunction(), R: RBA->getFunction()))
482 return Res;
483 if (LBA->getFunction() == RBA->getFunction()) {
484 // They are BBs in the same function. Order by which comes first in the
485 // BB order of the function. This order is deterministic.
486 Function *F = LBA->getFunction();
487 BasicBlock *LBB = LBA->getBasicBlock();
488 BasicBlock *RBB = RBA->getBasicBlock();
489 if (LBB == RBB)
490 return 0;
491 for (BasicBlock &BB : *F) {
492 if (&BB == LBB) {
493 assert(&BB != RBB);
494 return -1;
495 }
496 if (&BB == RBB)
497 return 1;
498 }
499 llvm_unreachable("Basic Block Address does not point to a basic block in "
500 "its function.");
501 return -1;
502 } else {
503 // cmpValues said the functions are the same. So because they aren't
504 // literally the same pointer, they must respectively be the left and
505 // right functions.
506 assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
507 // cmpValues will tell us if these are equivalent BasicBlocks, in the
508 // context of their respective functions.
509 return cmpValues(L: LBA->getBasicBlock(), R: RBA->getBasicBlock());
510 }
511 }
512 case Value::DSOLocalEquivalentVal: {
513 // dso_local_equivalent is functionally equivalent to whatever it points to.
514 // This means the behavior of the IR should be the exact same as if the
515 // function was referenced directly rather than through a
516 // dso_local_equivalent.
517 const auto *LEquiv = cast<DSOLocalEquivalent>(Val: L);
518 const auto *REquiv = cast<DSOLocalEquivalent>(Val: R);
519 return cmpGlobalValues(L: LEquiv->getGlobalValue(), R: REquiv->getGlobalValue());
520 }
521 case Value::ConstantPtrAuthVal: {
522 // Handle authenticated pointer constants produced by ConstantPtrAuth::get.
523 const ConstantPtrAuth *LPA = cast<ConstantPtrAuth>(Val: L);
524 const ConstantPtrAuth *RPA = cast<ConstantPtrAuth>(Val: R);
525 if (int Res = cmpConstants(L: LPA->getPointer(), R: RPA->getPointer()))
526 return Res;
527 if (int Res = cmpConstants(L: LPA->getKey(), R: RPA->getKey()))
528 return Res;
529 if (int Res =
530 cmpConstants(L: LPA->getDiscriminator(), R: RPA->getDiscriminator()))
531 return Res;
532 return cmpConstants(L: LPA->getAddrDiscriminator(),
533 R: RPA->getAddrDiscriminator());
534 }
535 default: // Unknown constant, abort.
536 LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
537 llvm_unreachable("Constant ValueID not recognized.");
538 return -1;
539 }
540}
541
542int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
543 uint64_t LNumber = GlobalNumbers->getNumber(Global: L);
544 uint64_t RNumber = GlobalNumbers->getNumber(Global: R);
545 return cmpNumbers(L: LNumber, R: RNumber);
546}
547
548/// cmpType - compares two types,
549/// defines total ordering among the types set.
550/// See method declaration comments for more details.
551int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
552 PointerType *PTyL = dyn_cast<PointerType>(Val: TyL);
553 PointerType *PTyR = dyn_cast<PointerType>(Val: TyR);
554
555 const DataLayout &DL = FnL->getDataLayout();
556 if (PTyL && PTyL->getAddressSpace() == 0)
557 TyL = DL.getIntPtrType(TyL);
558 if (PTyR && PTyR->getAddressSpace() == 0)
559 TyR = DL.getIntPtrType(TyR);
560
561 if (TyL == TyR)
562 return 0;
563
564 if (int Res = cmpNumbers(L: TyL->getTypeID(), R: TyR->getTypeID()))
565 return Res;
566
567 switch (TyL->getTypeID()) {
568 default:
569 llvm_unreachable("Unknown type!");
570 case Type::IntegerTyID:
571 return cmpNumbers(L: cast<IntegerType>(Val: TyL)->getBitWidth(),
572 R: cast<IntegerType>(Val: TyR)->getBitWidth());
573 // TyL == TyR would have returned true earlier, because types are uniqued.
574 case Type::VoidTyID:
575 case Type::FloatTyID:
576 case Type::DoubleTyID:
577 case Type::X86_FP80TyID:
578 case Type::FP128TyID:
579 case Type::PPC_FP128TyID:
580 case Type::LabelTyID:
581 case Type::MetadataTyID:
582 case Type::TokenTyID:
583 return 0;
584
585 case Type::PointerTyID:
586 assert(PTyL && PTyR && "Both types must be pointers here.");
587 return cmpNumbers(L: PTyL->getAddressSpace(), R: PTyR->getAddressSpace());
588
589 case Type::StructTyID: {
590 StructType *STyL = cast<StructType>(Val: TyL);
591 StructType *STyR = cast<StructType>(Val: TyR);
592 if (STyL->getNumElements() != STyR->getNumElements())
593 return cmpNumbers(L: STyL->getNumElements(), R: STyR->getNumElements());
594
595 if (STyL->isPacked() != STyR->isPacked())
596 return cmpNumbers(L: STyL->isPacked(), R: STyR->isPacked());
597
598 for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
599 if (int Res = cmpTypes(TyL: STyL->getElementType(N: i), TyR: STyR->getElementType(N: i)))
600 return Res;
601 }
602 return 0;
603 }
604
605 case Type::FunctionTyID: {
606 FunctionType *FTyL = cast<FunctionType>(Val: TyL);
607 FunctionType *FTyR = cast<FunctionType>(Val: TyR);
608 if (FTyL->getNumParams() != FTyR->getNumParams())
609 return cmpNumbers(L: FTyL->getNumParams(), R: FTyR->getNumParams());
610
611 if (FTyL->isVarArg() != FTyR->isVarArg())
612 return cmpNumbers(L: FTyL->isVarArg(), R: FTyR->isVarArg());
613
614 if (int Res = cmpTypes(TyL: FTyL->getReturnType(), TyR: FTyR->getReturnType()))
615 return Res;
616
617 for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
618 if (int Res = cmpTypes(TyL: FTyL->getParamType(i), TyR: FTyR->getParamType(i)))
619 return Res;
620 }
621 return 0;
622 }
623
624 case Type::ArrayTyID: {
625 auto *STyL = cast<ArrayType>(Val: TyL);
626 auto *STyR = cast<ArrayType>(Val: TyR);
627 if (STyL->getNumElements() != STyR->getNumElements())
628 return cmpNumbers(L: STyL->getNumElements(), R: STyR->getNumElements());
629 return cmpTypes(TyL: STyL->getElementType(), TyR: STyR->getElementType());
630 }
631 case Type::FixedVectorTyID:
632 case Type::ScalableVectorTyID: {
633 auto *STyL = cast<VectorType>(Val: TyL);
634 auto *STyR = cast<VectorType>(Val: TyR);
635 if (STyL->getElementCount().isScalable() !=
636 STyR->getElementCount().isScalable())
637 return cmpNumbers(L: STyL->getElementCount().isScalable(),
638 R: STyR->getElementCount().isScalable());
639 if (STyL->getElementCount() != STyR->getElementCount())
640 return cmpNumbers(L: STyL->getElementCount().getKnownMinValue(),
641 R: STyR->getElementCount().getKnownMinValue());
642 return cmpTypes(TyL: STyL->getElementType(), TyR: STyR->getElementType());
643 }
644 }
645}
646
647// Determine whether the two operations are the same except that pointer-to-A
648// and pointer-to-B are equivalent. This should be kept in sync with
649// Instruction::isSameOperationAs.
650// Read method declaration comments for more details.
651int FunctionComparator::cmpOperations(const Instruction *L,
652 const Instruction *R,
653 bool &needToCmpOperands) const {
654 needToCmpOperands = true;
655 if (int Res = cmpValues(L, R))
656 return Res;
657
658 // Differences from Instruction::isSameOperationAs:
659 // * replace type comparison with calls to cmpTypes.
660 // * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
661 // * because of the above, we don't test for the tail bit on calls later on.
662 if (int Res = cmpNumbers(L: L->getOpcode(), R: R->getOpcode()))
663 return Res;
664
665 if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(Val: L)) {
666 needToCmpOperands = false;
667 const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(Val: R);
668 if (int Res =
669 cmpValues(L: GEPL->getPointerOperand(), R: GEPR->getPointerOperand()))
670 return Res;
671 return cmpGEPs(GEPL, GEPR);
672 }
673
674 if (int Res = cmpNumbers(L: L->getNumOperands(), R: R->getNumOperands()))
675 return Res;
676
677 if (int Res = cmpTypes(TyL: L->getType(), TyR: R->getType()))
678 return Res;
679
680 if (int Res = cmpNumbers(L: L->getRawSubclassOptionalData(),
681 R: R->getRawSubclassOptionalData()))
682 return Res;
683
684 // We have two instructions of identical opcode and #operands. Check to see
685 // if all operands are the same type
686 for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
687 if (int Res =
688 cmpTypes(TyL: L->getOperand(i)->getType(), TyR: R->getOperand(i)->getType()))
689 return Res;
690 }
691
692 // Check special state that is a part of some instructions.
693 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: L)) {
694 if (int Res = cmpTypes(TyL: AI->getAllocatedType(),
695 TyR: cast<AllocaInst>(Val: R)->getAllocatedType()))
696 return Res;
697 return cmpAligns(L: AI->getAlign(), R: cast<AllocaInst>(Val: R)->getAlign());
698 }
699 if (const LoadInst *LI = dyn_cast<LoadInst>(Val: L)) {
700 if (int Res = cmpNumbers(L: LI->isVolatile(), R: cast<LoadInst>(Val: R)->isVolatile()))
701 return Res;
702 if (int Res = cmpAligns(L: LI->getAlign(), R: cast<LoadInst>(Val: R)->getAlign()))
703 return Res;
704 if (int Res =
705 cmpOrderings(L: LI->getOrdering(), R: cast<LoadInst>(Val: R)->getOrdering()))
706 return Res;
707 if (int Res = cmpNumbers(L: LI->getSyncScopeID(),
708 R: cast<LoadInst>(Val: R)->getSyncScopeID()))
709 return Res;
710 return cmpInstMetadata(L, R);
711 }
712 if (const StoreInst *SI = dyn_cast<StoreInst>(Val: L)) {
713 if (int Res =
714 cmpNumbers(L: SI->isVolatile(), R: cast<StoreInst>(Val: R)->isVolatile()))
715 return Res;
716 if (int Res = cmpAligns(L: SI->getAlign(), R: cast<StoreInst>(Val: R)->getAlign()))
717 return Res;
718 if (int Res =
719 cmpOrderings(L: SI->getOrdering(), R: cast<StoreInst>(Val: R)->getOrdering()))
720 return Res;
721 return cmpNumbers(L: SI->getSyncScopeID(),
722 R: cast<StoreInst>(Val: R)->getSyncScopeID());
723 }
724 if (const CmpInst *CI = dyn_cast<CmpInst>(Val: L))
725 return cmpNumbers(L: CI->getPredicate(), R: cast<CmpInst>(Val: R)->getPredicate());
726 if (auto *CBL = dyn_cast<CallBase>(Val: L)) {
727 auto *CBR = cast<CallBase>(Val: R);
728 if (int Res = cmpNumbers(L: CBL->getCallingConv(), R: CBR->getCallingConv()))
729 return Res;
730 if (int Res = cmpAttrs(L: CBL->getAttributes(), R: CBR->getAttributes()))
731 return Res;
732 if (int Res = cmpOperandBundlesSchema(LCS: *CBL, RCS: *CBR))
733 return Res;
734 if (const CallInst *CI = dyn_cast<CallInst>(Val: L))
735 if (int Res = cmpNumbers(L: CI->getTailCallKind(),
736 R: cast<CallInst>(Val: R)->getTailCallKind()))
737 return Res;
738 return cmpMDNode(L: L->getMetadata(KindID: LLVMContext::MD_range),
739 R: R->getMetadata(KindID: LLVMContext::MD_range));
740 }
741 if (const SwitchInst *SI = dyn_cast<SwitchInst>(Val: L)) {
742 for (auto [LCase, RCase] : zip(t: SI->cases(), u: cast<SwitchInst>(Val: R)->cases()))
743 if (int Res = cmpConstants(L: LCase.getCaseValue(), R: RCase.getCaseValue()))
744 return Res;
745 return 0;
746 }
747 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: L)) {
748 ArrayRef<unsigned> LIndices = IVI->getIndices();
749 ArrayRef<unsigned> RIndices = cast<InsertValueInst>(Val: R)->getIndices();
750 if (int Res = cmpNumbers(L: LIndices.size(), R: RIndices.size()))
751 return Res;
752 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
753 if (int Res = cmpNumbers(L: LIndices[i], R: RIndices[i]))
754 return Res;
755 }
756 return 0;
757 }
758 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: L)) {
759 ArrayRef<unsigned> LIndices = EVI->getIndices();
760 ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(Val: R)->getIndices();
761 if (int Res = cmpNumbers(L: LIndices.size(), R: RIndices.size()))
762 return Res;
763 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
764 if (int Res = cmpNumbers(L: LIndices[i], R: RIndices[i]))
765 return Res;
766 }
767 }
768 if (const FenceInst *FI = dyn_cast<FenceInst>(Val: L)) {
769 if (int Res =
770 cmpOrderings(L: FI->getOrdering(), R: cast<FenceInst>(Val: R)->getOrdering()))
771 return Res;
772 return cmpNumbers(L: FI->getSyncScopeID(),
773 R: cast<FenceInst>(Val: R)->getSyncScopeID());
774 }
775 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: L)) {
776 if (int Res = cmpNumbers(L: CXI->isVolatile(),
777 R: cast<AtomicCmpXchgInst>(Val: R)->isVolatile()))
778 return Res;
779 if (int Res =
780 cmpNumbers(L: CXI->isWeak(), R: cast<AtomicCmpXchgInst>(Val: R)->isWeak()))
781 return Res;
782 if (int Res =
783 cmpOrderings(L: CXI->getSuccessOrdering(),
784 R: cast<AtomicCmpXchgInst>(Val: R)->getSuccessOrdering()))
785 return Res;
786 if (int Res =
787 cmpOrderings(L: CXI->getFailureOrdering(),
788 R: cast<AtomicCmpXchgInst>(Val: R)->getFailureOrdering()))
789 return Res;
790 return cmpNumbers(L: CXI->getSyncScopeID(),
791 R: cast<AtomicCmpXchgInst>(Val: R)->getSyncScopeID());
792 }
793 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: L)) {
794 if (int Res = cmpNumbers(L: RMWI->getOperation(),
795 R: cast<AtomicRMWInst>(Val: R)->getOperation()))
796 return Res;
797 if (int Res = cmpNumbers(L: RMWI->isVolatile(),
798 R: cast<AtomicRMWInst>(Val: R)->isVolatile()))
799 return Res;
800 if (int Res = cmpOrderings(L: RMWI->getOrdering(),
801 R: cast<AtomicRMWInst>(Val: R)->getOrdering()))
802 return Res;
803 return cmpNumbers(L: RMWI->getSyncScopeID(),
804 R: cast<AtomicRMWInst>(Val: R)->getSyncScopeID());
805 }
806 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: L)) {
807 ArrayRef<int> LMask = SVI->getShuffleMask();
808 ArrayRef<int> RMask = cast<ShuffleVectorInst>(Val: R)->getShuffleMask();
809 if (int Res = cmpNumbers(L: LMask.size(), R: RMask.size()))
810 return Res;
811 for (size_t i = 0, e = LMask.size(); i != e; ++i) {
812 if (int Res = cmpNumbers(L: LMask[i], R: RMask[i]))
813 return Res;
814 }
815 }
816 if (const PHINode *PNL = dyn_cast<PHINode>(Val: L)) {
817 const PHINode *PNR = cast<PHINode>(Val: R);
818 // Ensure that in addition to the incoming values being identical
819 // (checked by the caller of this function), the incoming blocks
820 // are also identical.
821 for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
822 if (int Res =
823 cmpValues(L: PNL->getIncomingBlock(i), R: PNR->getIncomingBlock(i)))
824 return Res;
825 }
826 }
827 return 0;
828}
829
830// Determine whether two GEP operations perform the same underlying arithmetic.
831// Read method declaration comments for more details.
832int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
833 const GEPOperator *GEPR) const {
834 unsigned int ASL = GEPL->getPointerAddressSpace();
835 unsigned int ASR = GEPR->getPointerAddressSpace();
836
837 if (int Res = cmpNumbers(L: ASL, R: ASR))
838 return Res;
839
840 // When we have target data, we can reduce the GEP down to the value in bytes
841 // added to the address.
842 const DataLayout &DL = FnL->getDataLayout();
843 unsigned OffsetBitWidth = DL.getIndexSizeInBits(AS: ASL);
844 APInt OffsetL(OffsetBitWidth, 0), OffsetR(OffsetBitWidth, 0);
845 if (GEPL->accumulateConstantOffset(DL, Offset&: OffsetL) &&
846 GEPR->accumulateConstantOffset(DL, Offset&: OffsetR))
847 return cmpAPInts(L: OffsetL, R: OffsetR);
848 if (int Res =
849 cmpTypes(TyL: GEPL->getSourceElementType(), TyR: GEPR->getSourceElementType()))
850 return Res;
851
852 if (int Res = cmpNumbers(L: GEPL->getNumOperands(), R: GEPR->getNumOperands()))
853 return Res;
854
855 for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
856 if (int Res = cmpValues(L: GEPL->getOperand(i_nocapture: i), R: GEPR->getOperand(i_nocapture: i)))
857 return Res;
858 }
859
860 return 0;
861}
862
863int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
864 const InlineAsm *R) const {
865 // InlineAsm's are uniqued. If they are the same pointer, obviously they are
866 // the same, otherwise compare the fields.
867 if (L == R)
868 return 0;
869 if (int Res = cmpTypes(TyL: L->getFunctionType(), TyR: R->getFunctionType()))
870 return Res;
871 if (int Res = cmpMem(L: L->getAsmString(), R: R->getAsmString()))
872 return Res;
873 if (int Res = cmpMem(L: L->getConstraintString(), R: R->getConstraintString()))
874 return Res;
875 if (int Res = cmpNumbers(L: L->hasSideEffects(), R: R->hasSideEffects()))
876 return Res;
877 if (int Res = cmpNumbers(L: L->isAlignStack(), R: R->isAlignStack()))
878 return Res;
879 if (int Res = cmpNumbers(L: L->getDialect(), R: R->getDialect()))
880 return Res;
881 assert(L->getFunctionType() != R->getFunctionType());
882 return 0;
883}
884
885/// Compare two values used by the two functions under pair-wise comparison. If
886/// this is the first time the values are seen, they're added to the mapping so
887/// that we will detect mismatches on next use.
888/// See comments in declaration for more details.
889int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
890 // Catch self-reference case.
891 if (L == FnL) {
892 if (R == FnR)
893 return 0;
894 return -1;
895 }
896 if (R == FnR) {
897 if (L == FnL)
898 return 0;
899 return 1;
900 }
901
902 const Constant *ConstL = dyn_cast<Constant>(Val: L);
903 const Constant *ConstR = dyn_cast<Constant>(Val: R);
904 if (ConstL && ConstR) {
905 if (L == R)
906 return 0;
907 return cmpConstants(L: ConstL, R: ConstR);
908 }
909
910 if (ConstL)
911 return 1;
912 if (ConstR)
913 return -1;
914
915 const MetadataAsValue *MetadataValueL = dyn_cast<MetadataAsValue>(Val: L);
916 const MetadataAsValue *MetadataValueR = dyn_cast<MetadataAsValue>(Val: R);
917 if (MetadataValueL && MetadataValueR) {
918 if (MetadataValueL == MetadataValueR)
919 return 0;
920
921 return cmpMetadata(L: MetadataValueL->getMetadata(),
922 R: MetadataValueR->getMetadata());
923 }
924
925 if (MetadataValueL)
926 return 1;
927 if (MetadataValueR)
928 return -1;
929
930 const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(Val: L);
931 const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(Val: R);
932
933 if (InlineAsmL && InlineAsmR)
934 return cmpInlineAsm(L: InlineAsmL, R: InlineAsmR);
935 if (InlineAsmL)
936 return 1;
937 if (InlineAsmR)
938 return -1;
939
940 auto LeftSN = sn_mapL.insert(KV: std::make_pair(x&: L, y: sn_mapL.size())),
941 RightSN = sn_mapR.insert(KV: std::make_pair(x&: R, y: sn_mapR.size()));
942
943 return cmpNumbers(L: LeftSN.first->second, R: RightSN.first->second);
944}
945
946// Test whether two basic blocks have equivalent behaviour.
947int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
948 const BasicBlock *BBR) const {
949 BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
950 BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
951
952 do {
953 bool needToCmpOperands = true;
954 if (int Res = cmpOperations(L: &*InstL, R: &*InstR, needToCmpOperands))
955 return Res;
956 if (needToCmpOperands) {
957 assert(InstL->getNumOperands() == InstR->getNumOperands());
958
959 for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
960 Value *OpL = InstL->getOperand(i);
961 Value *OpR = InstR->getOperand(i);
962 if (int Res = cmpValues(L: OpL, R: OpR))
963 return Res;
964 // cmpValues should ensure this is true.
965 assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
966 }
967 }
968
969 ++InstL;
970 ++InstR;
971 } while (InstL != InstLE && InstR != InstRE);
972
973 if (InstL != InstLE && InstR == InstRE)
974 return 1;
975 if (InstL == InstLE && InstR != InstRE)
976 return -1;
977 return 0;
978}
979
980int FunctionComparator::compareSignature() const {
981 if (int Res = cmpAttrs(L: FnL->getAttributes(), R: FnR->getAttributes()))
982 return Res;
983
984 if (int Res = cmpNumbers(L: FnL->hasGC(), R: FnR->hasGC()))
985 return Res;
986
987 if (FnL->hasGC()) {
988 if (int Res = cmpMem(L: FnL->getGC(), R: FnR->getGC()))
989 return Res;
990 }
991
992 if (int Res = cmpNumbers(L: FnL->hasSection(), R: FnR->hasSection()))
993 return Res;
994
995 if (FnL->hasSection()) {
996 if (int Res = cmpMem(L: FnL->getSection(), R: FnR->getSection()))
997 return Res;
998 }
999
1000 if (int Res = cmpNumbers(L: FnL->isVarArg(), R: FnR->isVarArg()))
1001 return Res;
1002
1003 // TODO: if it's internal and only used in direct calls, we could handle this
1004 // case too.
1005 if (int Res = cmpNumbers(L: FnL->getCallingConv(), R: FnR->getCallingConv()))
1006 return Res;
1007
1008 if (int Res = cmpTypes(TyL: FnL->getFunctionType(), TyR: FnR->getFunctionType()))
1009 return Res;
1010
1011 assert(FnL->arg_size() == FnR->arg_size() &&
1012 "Identically typed functions have different numbers of args!");
1013
1014 // Visit the arguments so that they get enumerated in the order they're
1015 // passed in.
1016 for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
1017 ArgRI = FnR->arg_begin(),
1018 ArgLE = FnL->arg_end();
1019 ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
1020 if (cmpValues(L: &*ArgLI, R: &*ArgRI) != 0)
1021 llvm_unreachable("Arguments repeat!");
1022 }
1023 return 0;
1024}
1025
1026// Test whether the two functions have equivalent behaviour.
1027int FunctionComparator::compare() {
1028 beginCompare();
1029
1030 if (int Res = compareSignature())
1031 return Res;
1032
1033 // We do a CFG-ordered walk since the actual ordering of the blocks in the
1034 // linked list is immaterial. Our walk starts at the entry block for both
1035 // functions, then takes each block from each terminator in order. As an
1036 // artifact, this also means that unreachable blocks are ignored.
1037 SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
1038 SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
1039
1040 FnLBBs.push_back(Elt: &FnL->getEntryBlock());
1041 FnRBBs.push_back(Elt: &FnR->getEntryBlock());
1042
1043 VisitedBBs.insert(Ptr: FnLBBs[0]);
1044 while (!FnLBBs.empty()) {
1045 const BasicBlock *BBL = FnLBBs.pop_back_val();
1046 const BasicBlock *BBR = FnRBBs.pop_back_val();
1047
1048 if (int Res = cmpValues(L: BBL, R: BBR))
1049 return Res;
1050
1051 if (int Res = cmpBasicBlocks(BBL, BBR))
1052 return Res;
1053
1054 const Instruction *TermL = BBL->getTerminator();
1055 const Instruction *TermR = BBR->getTerminator();
1056
1057 assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
1058 for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
1059 if (!VisitedBBs.insert(Ptr: TermL->getSuccessor(Idx: i)).second)
1060 continue;
1061
1062 FnLBBs.push_back(Elt: TermL->getSuccessor(Idx: i));
1063 FnRBBs.push_back(Elt: TermR->getSuccessor(Idx: i));
1064 }
1065 }
1066 return 0;
1067}
1068