1//===- InstrProf.cpp - Instrumented profiling format support --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for clang's instrumentation based PGO and
10// coverage.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ProfileData/InstrProf.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/StringExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/Config/config.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/Function.h"
23#include "llvm/IR/GlobalValue.h"
24#include "llvm/IR/GlobalVariable.h"
25#include "llvm/IR/Instruction.h"
26#include "llvm/IR/LLVMContext.h"
27#include "llvm/IR/MDBuilder.h"
28#include "llvm/IR/Metadata.h"
29#include "llvm/IR/Module.h"
30#include "llvm/IR/ProfDataUtils.h"
31#include "llvm/IR/Type.h"
32#include "llvm/ProfileData/InstrProfReader.h"
33#include "llvm/ProfileData/SampleProf.h"
34#include "llvm/Support/Casting.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Compiler.h"
37#include "llvm/Support/Compression.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/Endian.h"
40#include "llvm/Support/Error.h"
41#include "llvm/Support/ErrorHandling.h"
42#include "llvm/Support/LEB128.h"
43#include "llvm/Support/MathExtras.h"
44#include "llvm/Support/Path.h"
45#include "llvm/Support/SwapByteOrder.h"
46#include "llvm/Support/VirtualFileSystem.h"
47#include "llvm/Support/raw_ostream.h"
48#include "llvm/TargetParser/Triple.h"
49#include <algorithm>
50#include <cassert>
51#include <cstddef>
52#include <cstdint>
53#include <cstring>
54#include <memory>
55#include <string>
56#include <system_error>
57#include <type_traits>
58#include <utility>
59#include <vector>
60
61using namespace llvm;
62
63#define DEBUG_TYPE "instrprof"
64
65static cl::opt<bool> StaticFuncFullModulePrefix(
66 "static-func-full-module-prefix", cl::init(Val: true), cl::Hidden,
67 cl::desc("Use full module build paths in the profile counter names for "
68 "static functions."));
69
70// This option is tailored to users that have different top-level directory in
71// profile-gen and profile-use compilation. Users need to specific the number
72// of levels to strip. A value larger than the number of directories in the
73// source file will strip all the directory names and only leave the basename.
74//
75// Note current ThinLTO module importing for the indirect-calls assumes
76// the source directory name not being stripped. A non-zero option value here
77// can potentially prevent some inter-module indirect-call-promotions.
78static cl::opt<unsigned> StaticFuncStripDirNamePrefix(
79 "static-func-strip-dirname-prefix", cl::init(Val: 0), cl::Hidden,
80 cl::desc("Strip specified level of directory name from source path in "
81 "the profile counter name for static functions."));
82
83static std::string getInstrProfErrString(instrprof_error Err,
84 const std::string &ErrMsg = "") {
85 std::string Msg;
86 raw_string_ostream OS(Msg);
87
88 switch (Err) {
89 case instrprof_error::success:
90 OS << "success";
91 break;
92 case instrprof_error::eof:
93 OS << "end of File";
94 break;
95 case instrprof_error::unrecognized_format:
96 OS << "unrecognized instrumentation profile encoding format";
97 break;
98 case instrprof_error::bad_magic:
99 OS << "invalid instrumentation profile data (bad magic)";
100 break;
101 case instrprof_error::bad_header:
102 OS << "invalid instrumentation profile data (file header is corrupt)";
103 break;
104 case instrprof_error::unsupported_version:
105 OS << "unsupported instrumentation profile format version";
106 break;
107 case instrprof_error::unsupported_hash_type:
108 OS << "unsupported instrumentation profile hash type";
109 break;
110 case instrprof_error::too_large:
111 OS << "too much profile data";
112 break;
113 case instrprof_error::truncated:
114 OS << "truncated profile data";
115 break;
116 case instrprof_error::malformed:
117 OS << "malformed instrumentation profile data";
118 break;
119 case instrprof_error::missing_correlation_info:
120 OS << "debug info/binary for correlation is required";
121 break;
122 case instrprof_error::unexpected_correlation_info:
123 OS << "debug info/binary for correlation is not necessary";
124 break;
125 case instrprof_error::unable_to_correlate_profile:
126 OS << "unable to correlate profile";
127 break;
128 case instrprof_error::invalid_prof:
129 OS << "invalid profile created. Please file a bug "
130 "at: " BUG_REPORT_URL
131 " and include the profraw files that caused this error.";
132 break;
133 case instrprof_error::unknown_function:
134 OS << "no profile data available for function";
135 break;
136 case instrprof_error::hash_mismatch:
137 OS << "function control flow change detected (hash mismatch)";
138 break;
139 case instrprof_error::count_mismatch:
140 OS << "function basic block count change detected (counter mismatch)";
141 break;
142 case instrprof_error::bitmap_mismatch:
143 OS << "function bitmap size change detected (bitmap size mismatch)";
144 break;
145 case instrprof_error::counter_overflow:
146 OS << "counter overflow";
147 break;
148 case instrprof_error::value_site_count_mismatch:
149 OS << "function value site count change detected (counter mismatch)";
150 break;
151 case instrprof_error::compress_failed:
152 OS << "failed to compress data (zlib)";
153 break;
154 case instrprof_error::uncompress_failed:
155 OS << "failed to uncompress data (zlib)";
156 break;
157 case instrprof_error::empty_raw_profile:
158 OS << "empty raw profile file";
159 break;
160 case instrprof_error::zlib_unavailable:
161 OS << "profile uses zlib compression but the profile reader was built "
162 "without zlib support";
163 break;
164 case instrprof_error::raw_profile_version_mismatch:
165 OS << "raw profile version mismatch";
166 break;
167 case instrprof_error::counter_value_too_large:
168 OS << "excessively large counter value suggests corrupted profile data";
169 break;
170 }
171
172 // If optional error message is not empty, append it to the message.
173 if (!ErrMsg.empty())
174 OS << ": " << ErrMsg;
175
176 return OS.str();
177}
178
179namespace {
180
181// FIXME: This class is only here to support the transition to llvm::Error. It
182// will be removed once this transition is complete. Clients should prefer to
183// deal with the Error value directly, rather than converting to error_code.
184class InstrProfErrorCategoryType : public std::error_category {
185 const char *name() const noexcept override { return "llvm.instrprof"; }
186
187 std::string message(int IE) const override {
188 return getInstrProfErrString(Err: static_cast<instrprof_error>(IE));
189 }
190};
191
192} // end anonymous namespace
193
194const std::error_category &llvm::instrprof_category() {
195 static InstrProfErrorCategoryType ErrorCategory;
196 return ErrorCategory;
197}
198
199namespace {
200
201const char *InstrProfSectNameCommon[] = {
202#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
203 SectNameCommon,
204#include "llvm/ProfileData/InstrProfData.inc"
205};
206
207const char *InstrProfSectNameCoff[] = {
208#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
209 SectNameCoff,
210#include "llvm/ProfileData/InstrProfData.inc"
211};
212
213const char *InstrProfSectNamePrefix[] = {
214#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
215 Prefix,
216#include "llvm/ProfileData/InstrProfData.inc"
217};
218
219} // namespace
220
221namespace llvm {
222
223cl::opt<bool> DoInstrProfNameCompression(
224 "enable-name-compression",
225 cl::desc("Enable name/filename string compression"), cl::init(Val: true));
226
227cl::opt<bool> EnableVTableValueProfiling(
228 "enable-vtable-value-profiling", cl::init(Val: false),
229 cl::desc("If true, the virtual table address will be instrumented to know "
230 "the types of a C++ pointer. The information is used in indirect "
231 "call promotion to do selective vtable-based comparison."));
232
233cl::opt<bool> EnableVTableProfileUse(
234 "enable-vtable-profile-use", cl::init(Val: false),
235 cl::desc("If ThinLTO and WPD is enabled and this option is true, vtable "
236 "profiles will be used by ICP pass for more efficient indirect "
237 "call sequence. If false, type profiles won't be used."));
238
239std::string getInstrProfSectionName(InstrProfSectKind IPSK,
240 Triple::ObjectFormatType OF,
241 bool AddSegmentInfo) {
242 std::string SectName;
243
244 if (OF == Triple::MachO && AddSegmentInfo)
245 SectName = InstrProfSectNamePrefix[IPSK];
246
247 if (OF == Triple::COFF)
248 SectName += InstrProfSectNameCoff[IPSK];
249 else
250 SectName += InstrProfSectNameCommon[IPSK];
251
252 if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo)
253 SectName += ",regular,live_support";
254
255 return SectName;
256}
257
258std::string InstrProfError::message() const {
259 return getInstrProfErrString(Err, ErrMsg: Msg);
260}
261
262char InstrProfError::ID = 0;
263
264ProfOStream::ProfOStream(raw_fd_ostream &FD)
265 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
266
267ProfOStream::ProfOStream(raw_string_ostream &STR)
268 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
269
270uint64_t ProfOStream::tell() const { return OS.tell(); }
271void ProfOStream::write(uint64_t V) { LE.write<uint64_t>(Val: V); }
272void ProfOStream::write32(uint32_t V) { LE.write<uint32_t>(Val: V); }
273void ProfOStream::writeByte(uint8_t V) { LE.write<uint8_t>(Val: V); }
274
275void ProfOStream::patch(ArrayRef<PatchItem> P) {
276 using namespace support;
277
278 if (IsFDOStream) {
279 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
280 const uint64_t LastPos = FDOStream.tell();
281 for (const auto &K : P) {
282 FDOStream.seek(off: K.Pos);
283 for (uint64_t Elem : K.D)
284 write(V: Elem);
285 }
286 // Reset the stream to the last position after patching so that users
287 // don't accidentally overwrite data. This makes it consistent with
288 // the string stream below which replaces the data directly.
289 FDOStream.seek(off: LastPos);
290 } else {
291 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
292 std::string &Data = SOStream.str(); // with flush
293 for (const auto &K : P) {
294 for (int I = 0, E = K.D.size(); I != E; I++) {
295 uint64_t Bytes =
296 endian::byte_swap<uint64_t>(value: K.D[I], endian: llvm::endianness::little);
297 Data.replace(pos: K.Pos + I * sizeof(uint64_t), n1: sizeof(uint64_t),
298 s: (const char *)&Bytes, n2: sizeof(uint64_t));
299 }
300 }
301 }
302}
303
304std::string getPGOFuncName(StringRef Name, GlobalValue::LinkageTypes Linkage,
305 StringRef FileName,
306 [[maybe_unused]] uint64_t Version) {
307 // Value names may be prefixed with a binary '1' to indicate
308 // that the backend should not modify the symbols due to any platform
309 // naming convention. Do not include that '1' in the PGO profile name.
310 if (Name[0] == '\1')
311 Name = Name.substr(Start: 1);
312
313 std::string NewName = std::string(Name);
314 if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
315 // For local symbols, prepend the main file name to distinguish them.
316 // Do not include the full path in the file name since there's no guarantee
317 // that it will stay the same, e.g., if the files are checked out from
318 // version control in different locations.
319 if (FileName.empty())
320 NewName = NewName.insert(pos: 0, s: "<unknown>:");
321 else
322 NewName = NewName.insert(pos1: 0, str: FileName.str() + ":");
323 }
324 return NewName;
325}
326
327// Strip NumPrefix level of directory name from PathNameStr. If the number of
328// directory separators is less than NumPrefix, strip all the directories and
329// leave base file name only.
330static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) {
331 uint32_t Count = NumPrefix;
332 uint32_t Pos = 0, LastPos = 0;
333 for (const auto &CI : PathNameStr) {
334 ++Pos;
335 if (llvm::sys::path::is_separator(value: CI)) {
336 LastPos = Pos;
337 --Count;
338 }
339 if (Count == 0)
340 break;
341 }
342 return PathNameStr.substr(Start: LastPos);
343}
344
345static StringRef getStrippedSourceFileName(const GlobalObject &GO) {
346 StringRef FileName(GO.getParent()->getSourceFileName());
347 uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1;
348 if (StripLevel < StaticFuncStripDirNamePrefix)
349 StripLevel = StaticFuncStripDirNamePrefix;
350 if (StripLevel)
351 FileName = stripDirPrefix(PathNameStr: FileName, NumPrefix: StripLevel);
352 return FileName;
353}
354
355// The PGO name has the format [<filepath>;]<mangled-name> where <filepath>; is
356// provided if linkage is local and is used to discriminate possibly identical
357// mangled names. ";" is used because it is unlikely to be found in either
358// <filepath> or <mangled-name>.
359//
360// Older compilers used getPGOFuncName() which has the format
361// [<filepath>:]<mangled-name>. This caused trouble for Objective-C functions
362// which commonly have :'s in their names. We still need to compute this name to
363// lookup functions from profiles built by older compilers.
364static std::string
365getIRPGONameForGlobalObject(const GlobalObject &GO,
366 GlobalValue::LinkageTypes Linkage,
367 StringRef FileName) {
368 return GlobalValue::getGlobalIdentifier(Name: GO.getName(), Linkage, FileName);
369}
370
371static std::optional<std::string> lookupPGONameFromMetadata(MDNode *MD) {
372 if (MD != nullptr) {
373 StringRef S = cast<MDString>(Val: MD->getOperand(I: 0))->getString();
374 return S.str();
375 }
376 return {};
377}
378
379// Returns the PGO object name. This function has some special handling
380// when called in LTO optimization. The following only applies when calling in
381// LTO passes (when \c InLTO is true): LTO's internalization privatizes many
382// global linkage symbols. This happens after value profile annotation, but
383// those internal linkage functions should not have a source prefix.
384// Additionally, for ThinLTO mode, exported internal functions are promoted
385// and renamed. We need to ensure that the original internal PGO name is
386// used when computing the GUID that is compared against the profiled GUIDs.
387// To differentiate compiler generated internal symbols from original ones,
388// PGOFuncName meta data are created and attached to the original internal
389// symbols in the value profile annotation step
390// (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta
391// data, its original linkage must be non-internal.
392static std::string getIRPGOObjectName(const GlobalObject &GO, bool InLTO,
393 MDNode *PGONameMetadata) {
394 if (!InLTO) {
395 auto FileName = getStrippedSourceFileName(GO);
396 return getIRPGONameForGlobalObject(GO, Linkage: GO.getLinkage(), FileName);
397 }
398
399 // In LTO mode (when InLTO is true), first check if there is a meta data.
400 if (auto IRPGOFuncName = lookupPGONameFromMetadata(MD: PGONameMetadata))
401 return *IRPGOFuncName;
402
403 // If there is no meta data, the function must be a global before the value
404 // profile annotation pass. Its current linkage may be internal if it is
405 // internalized in LTO mode.
406 return getIRPGONameForGlobalObject(GO, Linkage: GlobalValue::ExternalLinkage, FileName: "");
407}
408
409// Returns the IRPGO function name and does special handling when called
410// in LTO optimization. See the comments of `getIRPGOObjectName` for details.
411std::string getIRPGOFuncName(const Function &F, bool InLTO) {
412 return getIRPGOObjectName(GO: F, InLTO, PGONameMetadata: getPGOFuncNameMetadata(F));
413}
414
415// Please use getIRPGOFuncName for LLVM IR instrumentation. This function is
416// for front-end (Clang, etc) instrumentation.
417// The implementation is kept for profile matching from older profiles.
418// This is similar to `getIRPGOFuncName` except that this function calls
419// 'getPGOFuncName' to get a name and `getIRPGOFuncName` calls
420// 'getIRPGONameForGlobalObject'. See the difference between two callees in the
421// comments of `getIRPGONameForGlobalObject`.
422std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) {
423 if (!InLTO) {
424 auto FileName = getStrippedSourceFileName(GO: F);
425 return getPGOFuncName(Name: F.getName(), Linkage: F.getLinkage(), FileName, Version);
426 }
427
428 // In LTO mode (when InLTO is true), first check if there is a meta data.
429 if (auto PGOFuncName = lookupPGONameFromMetadata(MD: getPGOFuncNameMetadata(F)))
430 return *PGOFuncName;
431
432 // If there is no meta data, the function must be a global before the value
433 // profile annotation pass. Its current linkage may be internal if it is
434 // internalized in LTO mode.
435 return getPGOFuncName(Name: F.getName(), Linkage: GlobalValue::ExternalLinkage, FileName: "");
436}
437
438std::string getPGOName(const GlobalVariable &V, bool InLTO) {
439 // PGONameMetadata should be set by compiler at profile use time
440 // and read by symtab creation to look up symbols corresponding to
441 // a MD5 hash.
442 return getIRPGOObjectName(GO: V, InLTO, PGONameMetadata: V.getMetadata(Kind: getPGONameMetadataName()));
443}
444
445// See getIRPGOObjectName() for a discription of the format.
446std::pair<StringRef, StringRef> getParsedIRPGOName(StringRef IRPGOName) {
447 auto [FileName, MangledName] = IRPGOName.split(Separator: GlobalIdentifierDelimiter);
448 if (MangledName.empty())
449 return std::make_pair(x: StringRef(), y&: IRPGOName);
450 return std::make_pair(x&: FileName, y&: MangledName);
451}
452
453StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) {
454 if (FileName.empty())
455 return PGOFuncName;
456 // Drop the file name including ':' or ';'. See getIRPGONameForGlobalObject as
457 // well.
458 if (PGOFuncName.starts_with(Prefix: FileName))
459 PGOFuncName = PGOFuncName.drop_front(N: FileName.size() + 1);
460 return PGOFuncName;
461}
462
463// \p FuncName is the string used as profile lookup key for the function. A
464// symbol is created to hold the name. Return the legalized symbol name.
465std::string getPGOFuncNameVarName(StringRef FuncName,
466 GlobalValue::LinkageTypes Linkage) {
467 std::string VarName = std::string(getInstrProfNameVarPrefix());
468 VarName += FuncName;
469
470 if (!GlobalValue::isLocalLinkage(Linkage))
471 return VarName;
472
473 // Now fix up illegal chars in local VarName that may upset the assembler.
474 const char InvalidChars[] = "-:;<>/\"'";
475 size_t FoundPos = VarName.find_first_of(s: InvalidChars);
476 while (FoundPos != std::string::npos) {
477 VarName[FoundPos] = '_';
478 FoundPos = VarName.find_first_of(s: InvalidChars, pos: FoundPos + 1);
479 }
480 return VarName;
481}
482
483bool isGPUProfTarget(const Module &M) {
484 const Triple &T = M.getTargetTriple();
485 return T.isGPU();
486}
487
488void setPGOFuncVisibility(Module &M, GlobalVariable *FuncNameVar) {
489 // If the target is a GPU, make the symbol protected so it can
490 // be read from the host device
491 if (isGPUProfTarget(M))
492 FuncNameVar->setVisibility(GlobalValue::ProtectedVisibility);
493 // Hide the symbol so that we correctly get a copy for each executable.
494 else if (!GlobalValue::isLocalLinkage(Linkage: FuncNameVar->getLinkage()))
495 FuncNameVar->setVisibility(GlobalValue::HiddenVisibility);
496}
497
498GlobalVariable *createPGOFuncNameVar(Module &M,
499 GlobalValue::LinkageTypes Linkage,
500 StringRef PGOFuncName) {
501 // Ensure profiling variables on GPU are visible to be read from host
502 if (isGPUProfTarget(M))
503 Linkage = GlobalValue::ExternalLinkage;
504 // We generally want to match the function's linkage, but available_externally
505 // and extern_weak both have the wrong semantics, and anything that doesn't
506 // need to link across compilation units doesn't need to be visible at all.
507 else if (Linkage == GlobalValue::ExternalWeakLinkage)
508 Linkage = GlobalValue::LinkOnceAnyLinkage;
509 else if (Linkage == GlobalValue::AvailableExternallyLinkage)
510 Linkage = GlobalValue::LinkOnceODRLinkage;
511 else if (Linkage == GlobalValue::InternalLinkage ||
512 Linkage == GlobalValue::ExternalLinkage)
513 Linkage = GlobalValue::PrivateLinkage;
514
515 auto *Value =
516 ConstantDataArray::getString(Context&: M.getContext(), Initializer: PGOFuncName, AddNull: false);
517 auto *FuncNameVar =
518 new GlobalVariable(M, Value->getType(), true, Linkage, Value,
519 getPGOFuncNameVarName(FuncName: PGOFuncName, Linkage));
520
521 setPGOFuncVisibility(M, FuncNameVar);
522 return FuncNameVar;
523}
524
525GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) {
526 return createPGOFuncNameVar(M&: *F.getParent(), Linkage: F.getLinkage(), PGOFuncName);
527}
528
529Error InstrProfSymtab::create(Module &M, bool InLTO, bool AddCanonical) {
530 for (Function &F : M) {
531 // Function may not have a name: like using asm("") to overwrite the name.
532 // Ignore in this case.
533 if (!F.hasName())
534 continue;
535 auto IRPGOFuncName = getIRPGOFuncName(F, InLTO);
536 if (Error E = addFuncWithName(F, PGOFuncName: IRPGOFuncName, AddCanonical))
537 return E;
538 // Also use getPGOFuncName() so that we can find records from older profiles
539 auto PGOFuncName = getPGOFuncName(F, InLTO);
540 if (PGOFuncName != IRPGOFuncName)
541 if (Error E = addFuncWithName(F, PGOFuncName, AddCanonical))
542 return E;
543 }
544
545 for (GlobalVariable &G : M.globals()) {
546 if (!G.hasName() || !G.hasMetadata(KindID: LLVMContext::MD_type))
547 continue;
548 if (Error E = addVTableWithName(V&: G, PGOVTableName: getPGOName(V: G, InLTO)))
549 return E;
550 }
551
552 Sorted = false;
553 finalizeSymtab();
554 return Error::success();
555}
556
557Error InstrProfSymtab::addVTableWithName(GlobalVariable &VTable,
558 StringRef VTablePGOName) {
559 auto NameToGUIDMap = [&](StringRef Name) -> Error {
560 if (Error E = addSymbolName(SymbolName: Name))
561 return E;
562
563 bool Inserted = true;
564 std::tie(args: std::ignore, args&: Inserted) = MD5VTableMap.try_emplace(
565 Key: GlobalValue::getGUIDAssumingExternalLinkage(GlobalName: Name), Args: &VTable);
566 if (!Inserted)
567 LLVM_DEBUG(dbgs() << "GUID conflict within one module");
568 return Error::success();
569 };
570 if (Error E = NameToGUIDMap(VTablePGOName))
571 return E;
572
573 StringRef CanonicalName = getCanonicalName(PGOName: VTablePGOName);
574 if (CanonicalName != VTablePGOName)
575 return NameToGUIDMap(CanonicalName);
576
577 return Error::success();
578}
579
580Error readAndDecodeStrings(StringRef NameStrings,
581 std::function<Error(StringRef)> NameCallback) {
582 const uint8_t *P = NameStrings.bytes_begin();
583 const uint8_t *EndP = NameStrings.bytes_end();
584 while (P < EndP) {
585 uint32_t N;
586 uint64_t UncompressedSize = decodeULEB128(p: P, n: &N);
587 P += N;
588 uint64_t CompressedSize = decodeULEB128(p: P, n: &N);
589 P += N;
590 const bool IsCompressed = (CompressedSize != 0);
591 SmallVector<uint8_t, 128> UncompressedNameStrings;
592 StringRef NameStrings;
593 if (IsCompressed) {
594 if (!llvm::compression::zlib::isAvailable())
595 return make_error<InstrProfError>(Args: instrprof_error::zlib_unavailable);
596
597 if (Error E = compression::zlib::decompress(Input: ArrayRef(P, CompressedSize),
598 Output&: UncompressedNameStrings,
599 UncompressedSize)) {
600 consumeError(Err: std::move(E));
601 return make_error<InstrProfError>(Args: instrprof_error::uncompress_failed);
602 }
603 P += CompressedSize;
604 NameStrings = toStringRef(Input: UncompressedNameStrings);
605 } else {
606 NameStrings =
607 StringRef(reinterpret_cast<const char *>(P), UncompressedSize);
608 P += UncompressedSize;
609 }
610 // Now parse the name strings.
611 SmallVector<StringRef, 0> Names;
612 NameStrings.split(A&: Names, Separator: getInstrProfNameSeparator());
613 for (StringRef &Name : Names)
614 if (Error E = NameCallback(Name))
615 return E;
616
617 while (P < EndP && *P == 0)
618 P++;
619 }
620 return Error::success();
621}
622
623Error InstrProfSymtab::create(StringRef NameStrings) {
624 return readAndDecodeStrings(NameStrings,
625 NameCallback: [&](StringRef S) { return addFuncName(FuncName: S); });
626}
627
628Error InstrProfSymtab::create(StringRef FuncNameStrings,
629 StringRef VTableNameStrings) {
630 if (Error E = readAndDecodeStrings(
631 NameStrings: FuncNameStrings, NameCallback: [&](StringRef S) { return addFuncName(FuncName: S); }))
632 return E;
633
634 return readAndDecodeStrings(NameStrings: VTableNameStrings,
635 NameCallback: [&](StringRef S) { return addVTableName(VTableName: S); });
636}
637
638Error InstrProfSymtab::initVTableNamesFromCompressedStrings(
639 StringRef CompressedVTableStrings) {
640 return readAndDecodeStrings(NameStrings: CompressedVTableStrings,
641 NameCallback: [&](StringRef S) { return addVTableName(VTableName: S); });
642}
643
644StringRef InstrProfSymtab::getCanonicalName(StringRef PGOName) {
645 // In ThinLTO, local function may have been promoted to global and have
646 // suffix ".llvm." added to the function name. We need to add the
647 // stripped function name to the symbol table so that we can find a match
648 // from profile.
649 //
650 // ".__uniq." suffix is used to differentiate internal linkage functions in
651 // different modules and should be kept. This is the only suffix with the
652 // pattern ".xxx" which is kept before matching, other suffixes ".llvm." and
653 // ".part" will be stripped.
654 //
655 // Leverage the common canonicalization logic from FunctionSamples. Instead of
656 // removing all suffixes except ".__uniq.", explicitly specify the ones to be
657 // removed. This avoids the issue of colliding the canonical names of
658 // coroutine function with its await suspend wrappers or with its post-split
659 // clones. i.e. coro function foo, its wrappers
660 // (foo.__await_suspend_wrapper__init, and foo.__await_suspend_wrapper__final)
661 // and its post-split clones (foo.resume, foo.cleanup) are all canonicalized
662 // to "foo" otherwise, which can make the symtab lookup return unexpected
663 // result.
664 const SmallVector<StringRef> SuffixesToRemove{".llvm.", ".part."};
665 return FunctionSamples::getCanonicalFnName(FnName: PGOName, Suffixes: SuffixesToRemove);
666}
667
668Error InstrProfSymtab::addFuncWithName(Function &F, StringRef PGOFuncName,
669 bool AddCanonical) {
670 auto NameToGUIDMap = [&](StringRef Name) -> Error {
671 if (Error E = addFuncName(FuncName: Name))
672 return E;
673 MD5FuncMap.emplace_back(args: Function::getGUIDAssumingExternalLinkage(GlobalName: Name), args: &F);
674 return Error::success();
675 };
676 if (Error E = NameToGUIDMap(PGOFuncName))
677 return E;
678
679 if (!AddCanonical)
680 return Error::success();
681
682 StringRef CanonicalFuncName = getCanonicalName(PGOName: PGOFuncName);
683 if (CanonicalFuncName != PGOFuncName)
684 return NameToGUIDMap(CanonicalFuncName);
685
686 return Error::success();
687}
688
689uint64_t InstrProfSymtab::getVTableHashFromAddress(uint64_t Address) const {
690 // Given a runtime address, look up the hash value in the interval map, and
691 // fallback to value 0 if a hash value is not found.
692 return VTableAddrMap.lookup(x: Address, NotFound: 0);
693}
694
695uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) const {
696 finalizeSymtab();
697 auto It = partition_point(Range&: AddrToMD5Map, P: [=](std::pair<uint64_t, uint64_t> A) {
698 return A.first < Address;
699 });
700 // Raw function pointer collected by value profiler may be from
701 // external functions that are not instrumented. They won't have
702 // mapping data to be used by the deserializer. Force the value to
703 // be 0 in this case.
704 if (It != AddrToMD5Map.end() && It->first == Address)
705 return (uint64_t)It->second;
706 return 0;
707}
708
709void InstrProfSymtab::dumpNames(raw_ostream &OS) const {
710 SmallVector<StringRef, 0> Sorted(NameTab.keys());
711 llvm::sort(C&: Sorted);
712 for (StringRef S : Sorted)
713 OS << S << '\n';
714}
715
716Error collectGlobalObjectNameStrings(ArrayRef<std::string> NameStrs,
717 bool DoCompression, std::string &Result) {
718 assert(!NameStrs.empty() && "No name data to emit");
719
720 uint8_t Header[20], *P = Header;
721 std::string UncompressedNameStrings =
722 join(Begin: NameStrs.begin(), End: NameStrs.end(), Separator: getInstrProfNameSeparator());
723
724 assert(StringRef(UncompressedNameStrings)
725 .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) &&
726 "PGO name is invalid (contains separator token)");
727
728 unsigned EncLen = encodeULEB128(Value: UncompressedNameStrings.length(), p: P);
729 P += EncLen;
730
731 auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) {
732 EncLen = encodeULEB128(Value: CompressedLen, p: P);
733 P += EncLen;
734 char *HeaderStr = reinterpret_cast<char *>(&Header[0]);
735 unsigned HeaderLen = P - &Header[0];
736 Result.append(s: HeaderStr, n: HeaderLen);
737 Result += InputStr;
738 return Error::success();
739 };
740
741 if (!DoCompression) {
742 return WriteStringToResult(0, UncompressedNameStrings);
743 }
744
745 SmallVector<uint8_t, 128> CompressedNameStrings;
746 compression::zlib::compress(Input: arrayRefFromStringRef(Input: UncompressedNameStrings),
747 CompressedBuffer&: CompressedNameStrings,
748 Level: compression::zlib::BestSizeCompression);
749
750 return WriteStringToResult(CompressedNameStrings.size(),
751 toStringRef(Input: CompressedNameStrings));
752}
753
754StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) {
755 auto *Arr = cast<ConstantDataArray>(Val: NameVar->getInitializer());
756 StringRef NameStr =
757 Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
758 return NameStr;
759}
760
761Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
762 std::string &Result, bool DoCompression) {
763 std::vector<std::string> NameStrs;
764 for (auto *NameVar : NameVars) {
765 NameStrs.push_back(x: std::string(getPGOFuncNameVarInitializer(NameVar)));
766 }
767 return collectGlobalObjectNameStrings(
768 NameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression, Result);
769}
770
771Error collectVTableStrings(ArrayRef<GlobalVariable *> VTables,
772 std::string &Result, bool DoCompression) {
773 std::vector<std::string> VTableNameStrs;
774 for (auto *VTable : VTables)
775 VTableNameStrs.push_back(x: getPGOName(V: *VTable));
776 return collectGlobalObjectNameStrings(
777 NameStrs: VTableNameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression,
778 Result);
779}
780
781void InstrProfRecord::accumulateCounts(CountSumOrPercent &Sum) const {
782 uint64_t FuncSum = 0;
783 Sum.NumEntries += Counts.size();
784 for (uint64_t Count : Counts)
785 FuncSum += Count;
786 Sum.CountSum += FuncSum;
787
788 for (uint32_t VK = IPVK_First; VK <= IPVK_Last; ++VK) {
789 uint64_t KindSum = 0;
790 uint32_t NumValueSites = getNumValueSites(ValueKind: VK);
791 for (size_t I = 0; I < NumValueSites; ++I) {
792 for (const auto &V : getValueArrayForSite(ValueKind: VK, Site: I))
793 KindSum += V.Count;
794 }
795 Sum.ValueCounts[VK] += KindSum;
796 }
797}
798
799void InstrProfValueSiteRecord::overlap(InstrProfValueSiteRecord &Input,
800 uint32_t ValueKind,
801 OverlapStats &Overlap,
802 OverlapStats &FuncLevelOverlap) {
803 this->sortByTargetValues();
804 Input.sortByTargetValues();
805 double Score = 0.0f, FuncLevelScore = 0.0f;
806 auto I = ValueData.begin();
807 auto IE = ValueData.end();
808 auto J = Input.ValueData.begin();
809 auto JE = Input.ValueData.end();
810 while (I != IE && J != JE) {
811 if (I->Value == J->Value) {
812 Score += OverlapStats::score(Val1: I->Count, Val2: J->Count,
813 Sum1: Overlap.Base.ValueCounts[ValueKind],
814 Sum2: Overlap.Test.ValueCounts[ValueKind]);
815 FuncLevelScore += OverlapStats::score(
816 Val1: I->Count, Val2: J->Count, Sum1: FuncLevelOverlap.Base.ValueCounts[ValueKind],
817 Sum2: FuncLevelOverlap.Test.ValueCounts[ValueKind]);
818 ++I;
819 } else if (I->Value < J->Value) {
820 ++I;
821 continue;
822 }
823 ++J;
824 }
825 Overlap.Overlap.ValueCounts[ValueKind] += Score;
826 FuncLevelOverlap.Overlap.ValueCounts[ValueKind] += FuncLevelScore;
827}
828
829// Return false on mismatch.
830void InstrProfRecord::overlapValueProfData(uint32_t ValueKind,
831 InstrProfRecord &Other,
832 OverlapStats &Overlap,
833 OverlapStats &FuncLevelOverlap) {
834 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
835 assert(ThisNumValueSites == Other.getNumValueSites(ValueKind));
836 if (!ThisNumValueSites)
837 return;
838
839 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
840 getOrCreateValueSitesForKind(ValueKind);
841 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
842 Other.getValueSitesForKind(ValueKind);
843 for (uint32_t I = 0; I < ThisNumValueSites; I++)
844 ThisSiteRecords[I].overlap(Input&: OtherSiteRecords[I], ValueKind, Overlap,
845 FuncLevelOverlap);
846}
847
848void InstrProfRecord::overlap(InstrProfRecord &Other, OverlapStats &Overlap,
849 OverlapStats &FuncLevelOverlap,
850 uint64_t ValueCutoff) {
851 // FuncLevel CountSum for other should already computed and nonzero.
852 assert(FuncLevelOverlap.Test.CountSum >= 1.0f);
853 accumulateCounts(Sum&: FuncLevelOverlap.Base);
854 bool Mismatch = (Counts.size() != Other.Counts.size());
855
856 // Check if the value profiles mismatch.
857 if (!Mismatch) {
858 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) {
859 uint32_t ThisNumValueSites = getNumValueSites(ValueKind: Kind);
860 uint32_t OtherNumValueSites = Other.getNumValueSites(ValueKind: Kind);
861 if (ThisNumValueSites != OtherNumValueSites) {
862 Mismatch = true;
863 break;
864 }
865 }
866 }
867 if (Mismatch) {
868 Overlap.addOneMismatch(MismatchFunc: FuncLevelOverlap.Test);
869 return;
870 }
871
872 // Compute overlap for value counts.
873 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
874 overlapValueProfData(ValueKind: Kind, Other, Overlap, FuncLevelOverlap);
875
876 double Score = 0.0;
877 uint64_t MaxCount = 0;
878 // Compute overlap for edge counts.
879 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
880 Score += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
881 Sum1: Overlap.Base.CountSum, Sum2: Overlap.Test.CountSum);
882 MaxCount = std::max(a: Other.Counts[I], b: MaxCount);
883 }
884 Overlap.Overlap.CountSum += Score;
885 Overlap.Overlap.NumEntries += 1;
886
887 if (MaxCount >= ValueCutoff) {
888 double FuncScore = 0.0;
889 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I)
890 FuncScore += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
891 Sum1: FuncLevelOverlap.Base.CountSum,
892 Sum2: FuncLevelOverlap.Test.CountSum);
893 FuncLevelOverlap.Overlap.CountSum = FuncScore;
894 FuncLevelOverlap.Overlap.NumEntries = Other.Counts.size();
895 FuncLevelOverlap.Valid = true;
896 }
897}
898
899void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input,
900 uint64_t Weight,
901 function_ref<void(instrprof_error)> Warn) {
902 this->sortByTargetValues();
903 Input.sortByTargetValues();
904 auto I = ValueData.begin();
905 auto IE = ValueData.end();
906 std::vector<InstrProfValueData> Merged;
907 Merged.reserve(n: std::max(a: ValueData.size(), b: Input.ValueData.size()));
908 for (const InstrProfValueData &J : Input.ValueData) {
909 while (I != IE && I->Value < J.Value) {
910 Merged.push_back(x: *I);
911 ++I;
912 }
913 if (I != IE && I->Value == J.Value) {
914 bool Overflowed;
915 I->Count = SaturatingMultiplyAdd(X: J.Count, Y: Weight, A: I->Count, ResultOverflowed: &Overflowed);
916 if (Overflowed)
917 Warn(instrprof_error::counter_overflow);
918 Merged.push_back(x: *I);
919 ++I;
920 continue;
921 }
922 Merged.push_back(x: J);
923 }
924 Merged.insert(position: Merged.end(), first: I, last: IE);
925 ValueData = std::move(Merged);
926}
927
928void InstrProfValueSiteRecord::scale(uint64_t N, uint64_t D,
929 function_ref<void(instrprof_error)> Warn) {
930 for (InstrProfValueData &I : ValueData) {
931 bool Overflowed;
932 I.Count = SaturatingMultiply(X: I.Count, Y: N, ResultOverflowed: &Overflowed) / D;
933 if (Overflowed)
934 Warn(instrprof_error::counter_overflow);
935 }
936}
937
938// Merge Value Profile data from Src record to this record for ValueKind.
939// Scale merged value counts by \p Weight.
940void InstrProfRecord::mergeValueProfData(
941 uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight,
942 function_ref<void(instrprof_error)> Warn) {
943 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
944 uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind);
945 if (ThisNumValueSites != OtherNumValueSites) {
946 Warn(instrprof_error::value_site_count_mismatch);
947 return;
948 }
949 if (!ThisNumValueSites)
950 return;
951 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
952 getOrCreateValueSitesForKind(ValueKind);
953 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
954 Src.getValueSitesForKind(ValueKind);
955 for (uint32_t I = 0; I < ThisNumValueSites; I++)
956 ThisSiteRecords[I].merge(Input&: OtherSiteRecords[I], Weight, Warn);
957}
958
959void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight,
960 function_ref<void(instrprof_error)> Warn) {
961 // If the number of counters doesn't match we either have bad data
962 // or a hash collision.
963 if (Counts.size() != Other.Counts.size()) {
964 Warn(instrprof_error::count_mismatch);
965 return;
966 }
967
968 // Special handling of the first count as the PseudoCount.
969 CountPseudoKind OtherKind = Other.getCountPseudoKind();
970 CountPseudoKind ThisKind = getCountPseudoKind();
971 if (OtherKind != NotPseudo || ThisKind != NotPseudo) {
972 // We don't allow the merge of a profile with pseudo counts and
973 // a normal profile (i.e. without pesudo counts).
974 // Profile supplimenation should be done after the profile merge.
975 if (OtherKind == NotPseudo || ThisKind == NotPseudo) {
976 Warn(instrprof_error::count_mismatch);
977 return;
978 }
979 if (OtherKind == PseudoHot || ThisKind == PseudoHot)
980 setPseudoCount(PseudoHot);
981 else
982 setPseudoCount(PseudoWarm);
983 return;
984 }
985
986 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
987 bool Overflowed;
988 uint64_t Value =
989 SaturatingMultiplyAdd(X: Other.Counts[I], Y: Weight, A: Counts[I], ResultOverflowed: &Overflowed);
990 if (Value > getInstrMaxCountValue()) {
991 Value = getInstrMaxCountValue();
992 Overflowed = true;
993 }
994 Counts[I] = Value;
995 if (Overflowed)
996 Warn(instrprof_error::counter_overflow);
997 }
998
999 // If the number of bitmap bytes doesn't match we either have bad data
1000 // or a hash collision.
1001 if (BitmapBytes.size() != Other.BitmapBytes.size()) {
1002 Warn(instrprof_error::bitmap_mismatch);
1003 return;
1004 }
1005
1006 // Bitmap bytes are merged by simply ORing them together.
1007 for (size_t I = 0, E = Other.BitmapBytes.size(); I < E; ++I) {
1008 BitmapBytes[I] = Other.BitmapBytes[I] | BitmapBytes[I];
1009 }
1010
1011 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1012 mergeValueProfData(ValueKind: Kind, Src&: Other, Weight, Warn);
1013}
1014
1015void InstrProfRecord::scaleValueProfData(
1016 uint32_t ValueKind, uint64_t N, uint64_t D,
1017 function_ref<void(instrprof_error)> Warn) {
1018 for (auto &R : getValueSitesForKind(ValueKind))
1019 R.scale(N, D, Warn);
1020}
1021
1022void InstrProfRecord::scale(uint64_t N, uint64_t D,
1023 function_ref<void(instrprof_error)> Warn) {
1024 assert(D != 0 && "D cannot be 0");
1025 for (auto &Count : this->Counts) {
1026 bool Overflowed;
1027 Count = SaturatingMultiply(X: Count, Y: N, ResultOverflowed: &Overflowed) / D;
1028 if (Count > getInstrMaxCountValue()) {
1029 Count = getInstrMaxCountValue();
1030 Overflowed = true;
1031 }
1032 if (Overflowed)
1033 Warn(instrprof_error::counter_overflow);
1034 }
1035 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1036 scaleValueProfData(ValueKind: Kind, N, D, Warn);
1037}
1038
1039// Map indirect call target name hash to name string.
1040uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind,
1041 InstrProfSymtab *SymTab) {
1042 if (!SymTab)
1043 return Value;
1044
1045 if (ValueKind == IPVK_IndirectCallTarget)
1046 return SymTab->getFunctionHashFromAddress(Address: Value);
1047
1048 if (ValueKind == IPVK_VTableTarget)
1049 return SymTab->getVTableHashFromAddress(Address: Value);
1050
1051 return Value;
1052}
1053
1054void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
1055 ArrayRef<InstrProfValueData> VData,
1056 InstrProfSymtab *ValueMap) {
1057 // Remap values.
1058 std::vector<InstrProfValueData> RemappedVD;
1059 RemappedVD.reserve(n: VData.size());
1060 for (const auto &V : VData) {
1061 uint64_t NewValue = remapValue(Value: V.Value, ValueKind, SymTab: ValueMap);
1062 RemappedVD.push_back(x: {.Value: NewValue, .Count: V.Count});
1063 }
1064
1065 std::vector<InstrProfValueSiteRecord> &ValueSites =
1066 getOrCreateValueSitesForKind(ValueKind);
1067 assert(ValueSites.size() == Site);
1068
1069 // Add a new value site with remapped value profiling data.
1070 ValueSites.emplace_back(args: std::move(RemappedVD));
1071}
1072
1073void TemporalProfTraceTy::createBPFunctionNodes(
1074 ArrayRef<TemporalProfTraceTy> Traces, std::vector<BPFunctionNode> &Nodes,
1075 bool RemoveOutlierUNs) {
1076 using IDT = BPFunctionNode::IDT;
1077 using UtilityNodeT = BPFunctionNode::UtilityNodeT;
1078 UtilityNodeT MaxUN = 0;
1079 DenseMap<IDT, size_t> IdToFirstTimestamp;
1080 DenseMap<IDT, UtilityNodeT> IdToFirstUN;
1081 DenseMap<IDT, SmallVector<UtilityNodeT>> IdToUNs;
1082 // TODO: We need to use the Trace.Weight field to give more weight to more
1083 // important utilities
1084 for (auto &Trace : Traces) {
1085 size_t CutoffTimestamp = 1;
1086 for (size_t Timestamp = 0; Timestamp < Trace.FunctionNameRefs.size();
1087 Timestamp++) {
1088 IDT Id = Trace.FunctionNameRefs[Timestamp];
1089 auto [It, WasInserted] = IdToFirstTimestamp.try_emplace(Key: Id, Args&: Timestamp);
1090 if (!WasInserted)
1091 It->getSecond() = std::min<size_t>(a: It->getSecond(), b: Timestamp);
1092 if (Timestamp >= CutoffTimestamp) {
1093 ++MaxUN;
1094 CutoffTimestamp = 2 * Timestamp;
1095 }
1096 IdToFirstUN.try_emplace(Key: Id, Args&: MaxUN);
1097 }
1098 for (auto &[Id, FirstUN] : IdToFirstUN)
1099 for (auto UN = FirstUN; UN <= MaxUN; ++UN)
1100 IdToUNs[Id].push_back(Elt: UN);
1101 ++MaxUN;
1102 IdToFirstUN.clear();
1103 }
1104
1105 if (RemoveOutlierUNs) {
1106 DenseMap<UtilityNodeT, unsigned> UNFrequency;
1107 for (auto &[Id, UNs] : IdToUNs)
1108 for (auto &UN : UNs)
1109 ++UNFrequency[UN];
1110 // Filter out utility nodes that are too infrequent or too prevalent to make
1111 // BalancedPartitioning more effective.
1112 for (auto &[Id, UNs] : IdToUNs)
1113 llvm::erase_if(C&: UNs, P: [&](auto &UN) {
1114 unsigned Freq = UNFrequency[UN];
1115 return Freq <= 1 || 2 * Freq > IdToUNs.size();
1116 });
1117 }
1118
1119 for (auto &[Id, UNs] : IdToUNs)
1120 Nodes.emplace_back(args&: Id, args&: UNs);
1121
1122 // Since BalancedPartitioning is sensitive to the initial order, we explicitly
1123 // order nodes by their earliest timestamp.
1124 llvm::sort(C&: Nodes, Comp: [&](auto &L, auto &R) {
1125 return std::make_pair(IdToFirstTimestamp[L.Id], L.Id) <
1126 std::make_pair(IdToFirstTimestamp[R.Id], R.Id);
1127 });
1128}
1129
1130#define INSTR_PROF_COMMON_API_IMPL
1131#include "llvm/ProfileData/InstrProfData.inc"
1132
1133/*!
1134 * ValueProfRecordClosure Interface implementation for InstrProfRecord
1135 * class. These C wrappers are used as adaptors so that C++ code can be
1136 * invoked as callbacks.
1137 */
1138uint32_t getNumValueKindsInstrProf(const void *Record) {
1139 return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds();
1140}
1141
1142uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) {
1143 return reinterpret_cast<const InstrProfRecord *>(Record)
1144 ->getNumValueSites(ValueKind: VKind);
1145}
1146
1147uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) {
1148 return reinterpret_cast<const InstrProfRecord *>(Record)
1149 ->getNumValueData(ValueKind: VKind);
1150}
1151
1152uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK,
1153 uint32_t S) {
1154 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1155 return IPR->getValueArrayForSite(ValueKind: VK, Site: S).size();
1156}
1157
1158void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst,
1159 uint32_t K, uint32_t S) {
1160 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1161 llvm::copy(Range: IPR->getValueArrayForSite(ValueKind: K, Site: S), Out: Dst);
1162}
1163
1164ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) {
1165 ValueProfData *VD = new (::operator new(TotalSizeInBytes)) ValueProfData();
1166 memset(s: VD, c: 0, n: TotalSizeInBytes);
1167 return VD;
1168}
1169
1170static ValueProfRecordClosure InstrProfRecordClosure = {
1171 .Record: nullptr,
1172 .GetNumValueKinds: getNumValueKindsInstrProf,
1173 .GetNumValueSites: getNumValueSitesInstrProf,
1174 .GetNumValueData: getNumValueDataInstrProf,
1175 .GetNumValueDataForSite: getNumValueDataForSiteInstrProf,
1176 .RemapValueData: nullptr,
1177 .GetValueForSite: getValueForSiteInstrProf,
1178 .AllocValueProfData: allocValueProfDataInstrProf};
1179
1180// Wrapper implementation using the closure mechanism.
1181uint32_t ValueProfData::getSize(const InstrProfRecord &Record) {
1182 auto Closure = InstrProfRecordClosure;
1183 Closure.Record = &Record;
1184 return getValueProfDataSize(Closure: &Closure);
1185}
1186
1187// Wrapper implementation using the closure mechanism.
1188std::unique_ptr<ValueProfData>
1189ValueProfData::serializeFrom(const InstrProfRecord &Record) {
1190 InstrProfRecordClosure.Record = &Record;
1191
1192 std::unique_ptr<ValueProfData> VPD(
1193 serializeValueProfDataFrom(Closure: &InstrProfRecordClosure, DstData: nullptr));
1194 return VPD;
1195}
1196
1197void ValueProfRecord::deserializeTo(InstrProfRecord &Record,
1198 InstrProfSymtab *SymTab) {
1199 Record.reserveSites(ValueKind: Kind, NumValueSites);
1200
1201 InstrProfValueData *ValueData = getValueProfRecordValueData(This: this);
1202 for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) {
1203 uint8_t ValueDataCount = this->SiteCountArray[VSite];
1204 ArrayRef<InstrProfValueData> VDs(ValueData, ValueDataCount);
1205 Record.addValueData(ValueKind: Kind, Site: VSite, VData: VDs, ValueMap: SymTab);
1206 ValueData += ValueDataCount;
1207 }
1208}
1209
1210// For writing/serializing, Old is the host endianness, and New is
1211// byte order intended on disk. For Reading/deserialization, Old
1212// is the on-disk source endianness, and New is the host endianness.
1213void ValueProfRecord::swapBytes(llvm::endianness Old, llvm::endianness New) {
1214 using namespace support;
1215
1216 if (Old == New)
1217 return;
1218
1219 if (llvm::endianness::native != Old) {
1220 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1221 sys::swapByteOrder<uint32_t>(Value&: Kind);
1222 }
1223 uint32_t ND = getValueProfRecordNumValueData(This: this);
1224 InstrProfValueData *VD = getValueProfRecordValueData(This: this);
1225
1226 // No need to swap byte array: SiteCountArrray.
1227 for (uint32_t I = 0; I < ND; I++) {
1228 sys::swapByteOrder<uint64_t>(Value&: VD[I].Value);
1229 sys::swapByteOrder<uint64_t>(Value&: VD[I].Count);
1230 }
1231 if (llvm::endianness::native == Old) {
1232 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1233 sys::swapByteOrder<uint32_t>(Value&: Kind);
1234 }
1235}
1236
1237void ValueProfData::deserializeTo(InstrProfRecord &Record,
1238 InstrProfSymtab *SymTab) {
1239 if (NumValueKinds == 0)
1240 return;
1241
1242 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1243 for (uint32_t K = 0; K < NumValueKinds; K++) {
1244 VR->deserializeTo(Record, SymTab);
1245 VR = getValueProfRecordNext(This: VR);
1246 }
1247}
1248
1249static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) {
1250 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize))
1251 ValueProfData());
1252}
1253
1254Error ValueProfData::checkIntegrity() {
1255 if (NumValueKinds > IPVK_Last + 1)
1256 return make_error<InstrProfError>(
1257 Args: instrprof_error::malformed, Args: "number of value profile kinds is invalid");
1258 // Total size needs to be multiple of quadword size.
1259 if (TotalSize % sizeof(uint64_t))
1260 return make_error<InstrProfError>(
1261 Args: instrprof_error::malformed, Args: "total size is not multiples of quardword");
1262
1263 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1264 for (uint32_t K = 0; K < this->NumValueKinds; K++) {
1265 if (VR->Kind > IPVK_Last)
1266 return make_error<InstrProfError>(Args: instrprof_error::malformed,
1267 Args: "value kind is invalid");
1268 VR = getValueProfRecordNext(This: VR);
1269 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize)
1270 return make_error<InstrProfError>(
1271 Args: instrprof_error::malformed,
1272 Args: "value profile address is greater than total size");
1273 }
1274 return Error::success();
1275}
1276
1277Expected<std::unique_ptr<ValueProfData>>
1278ValueProfData::getValueProfData(const unsigned char *D,
1279 const unsigned char *const BufferEnd,
1280 llvm::endianness Endianness) {
1281 using namespace support;
1282
1283 if (D + sizeof(ValueProfData) > BufferEnd)
1284 return make_error<InstrProfError>(Args: instrprof_error::truncated);
1285
1286 const unsigned char *Header = D;
1287 uint32_t TotalSize = endian::readNext<uint32_t>(memory&: Header, endian: Endianness);
1288
1289 if (D + TotalSize > BufferEnd)
1290 return make_error<InstrProfError>(Args: instrprof_error::too_large);
1291
1292 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize);
1293 memcpy(dest: VPD.get(), src: D, n: TotalSize);
1294 // Byte swap.
1295 VPD->swapBytesToHost(Endianness);
1296
1297 Error E = VPD->checkIntegrity();
1298 if (E)
1299 return std::move(E);
1300
1301 return std::move(VPD);
1302}
1303
1304void ValueProfData::swapBytesToHost(llvm::endianness Endianness) {
1305 using namespace support;
1306
1307 if (Endianness == llvm::endianness::native)
1308 return;
1309
1310 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1311 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1312
1313 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1314 for (uint32_t K = 0; K < NumValueKinds; K++) {
1315 VR->swapBytes(Old: Endianness, New: llvm::endianness::native);
1316 VR = getValueProfRecordNext(This: VR);
1317 }
1318}
1319
1320void ValueProfData::swapBytesFromHost(llvm::endianness Endianness) {
1321 using namespace support;
1322
1323 if (Endianness == llvm::endianness::native)
1324 return;
1325
1326 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1327 for (uint32_t K = 0; K < NumValueKinds; K++) {
1328 ValueProfRecord *NVR = getValueProfRecordNext(This: VR);
1329 VR->swapBytes(Old: llvm::endianness::native, New: Endianness);
1330 VR = NVR;
1331 }
1332 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1333 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1334}
1335
1336void annotateValueSite(Module &M, Instruction &Inst,
1337 const InstrProfRecord &InstrProfR,
1338 InstrProfValueKind ValueKind, uint32_t SiteIdx,
1339 uint32_t MaxMDCount) {
1340 auto VDs = InstrProfR.getValueArrayForSite(ValueKind, Site: SiteIdx);
1341 if (VDs.empty())
1342 return;
1343 uint64_t Sum = 0;
1344 for (const InstrProfValueData &V : VDs)
1345 Sum = SaturatingAdd(X: Sum, Y: V.Count);
1346 annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount);
1347}
1348
1349void annotateValueSite(Module &M, Instruction &Inst,
1350 ArrayRef<InstrProfValueData> VDs,
1351 uint64_t Sum, InstrProfValueKind ValueKind,
1352 uint32_t MaxMDCount) {
1353 if (VDs.empty())
1354 return;
1355 LLVMContext &Ctx = M.getContext();
1356 MDBuilder MDHelper(Ctx);
1357 SmallVector<Metadata *, 3> Vals;
1358 // Tag
1359 Vals.push_back(Elt: MDHelper.createString(Str: MDProfLabels::ValueProfile));
1360 // Value Kind
1361 Vals.push_back(Elt: MDHelper.createConstant(
1362 C: ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: ValueKind)));
1363 // Total Count
1364 Vals.push_back(
1365 Elt: MDHelper.createConstant(C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: Sum)));
1366
1367 // Value Profile Data
1368 uint32_t MDCount = MaxMDCount;
1369 for (const auto &VD : VDs) {
1370 Vals.push_back(Elt: MDHelper.createConstant(
1371 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Value)));
1372 Vals.push_back(Elt: MDHelper.createConstant(
1373 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Count)));
1374 if (--MDCount == 0)
1375 break;
1376 }
1377 Inst.setMetadata(KindID: LLVMContext::MD_prof, Node: MDNode::get(Context&: Ctx, MDs: Vals));
1378}
1379
1380MDNode *mayHaveValueProfileOfKind(const Instruction &Inst,
1381 InstrProfValueKind ValueKind) {
1382 MDNode *MD = Inst.getMetadata(KindID: LLVMContext::MD_prof);
1383 if (!MD)
1384 return nullptr;
1385
1386 if (MD->getNumOperands() < 5)
1387 return nullptr;
1388
1389 MDString *Tag = cast<MDString>(Val: MD->getOperand(I: 0));
1390 if (!Tag || Tag->getString() != MDProfLabels::ValueProfile)
1391 return nullptr;
1392
1393 // Now check kind:
1394 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 1));
1395 if (!KindInt)
1396 return nullptr;
1397 if (KindInt->getZExtValue() != ValueKind)
1398 return nullptr;
1399
1400 return MD;
1401}
1402
1403SmallVector<InstrProfValueData, 4>
1404getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
1405 uint32_t MaxNumValueData, uint64_t &TotalC,
1406 bool GetNoICPValue) {
1407 // Four inline elements seem to work well in practice. With MaxNumValueData,
1408 // this array won't grow very big anyway.
1409 SmallVector<InstrProfValueData, 4> ValueData;
1410 MDNode *MD = mayHaveValueProfileOfKind(Inst, ValueKind);
1411 if (!MD)
1412 return ValueData;
1413 const unsigned NOps = MD->getNumOperands();
1414 // Get total count
1415 ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 2));
1416 if (!TotalCInt)
1417 return ValueData;
1418 TotalC = TotalCInt->getZExtValue();
1419
1420 ValueData.reserve(N: (NOps - 3) / 2);
1421 for (unsigned I = 3; I < NOps; I += 2) {
1422 if (ValueData.size() >= MaxNumValueData)
1423 break;
1424 ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I));
1425 ConstantInt *Count =
1426 mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: I + 1));
1427 if (!Value || !Count) {
1428 ValueData.clear();
1429 return ValueData;
1430 }
1431 uint64_t CntValue = Count->getZExtValue();
1432 if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM))
1433 continue;
1434 InstrProfValueData V;
1435 V.Value = Value->getZExtValue();
1436 V.Count = CntValue;
1437 ValueData.push_back(Elt: V);
1438 }
1439 return ValueData;
1440}
1441
1442MDNode *getPGOFuncNameMetadata(const Function &F) {
1443 return F.getMetadata(Kind: getPGOFuncNameMetadataName());
1444}
1445
1446static void createPGONameMetadata(GlobalObject &GO, StringRef MetadataName,
1447 StringRef PGOName) {
1448 // Only for internal linkage functions or global variables. The name is not
1449 // the same as PGO name for these global objects.
1450 if (GO.getName() == PGOName)
1451 return;
1452
1453 // Don't create duplicated metadata.
1454 if (GO.getMetadata(Kind: MetadataName))
1455 return;
1456
1457 LLVMContext &C = GO.getContext();
1458 MDNode *N = MDNode::get(Context&: C, MDs: MDString::get(Context&: C, Str: PGOName));
1459 GO.setMetadata(Kind: MetadataName, Node: N);
1460}
1461
1462void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) {
1463 return createPGONameMetadata(GO&: F, MetadataName: getPGOFuncNameMetadataName(), PGOName: PGOFuncName);
1464}
1465
1466void createPGONameMetadata(GlobalObject &GO, StringRef PGOName) {
1467 return createPGONameMetadata(GO, MetadataName: getPGONameMetadataName(), PGOName);
1468}
1469
1470bool needsComdatForCounter(const GlobalObject &GO, const Module &M) {
1471 if (GO.hasComdat())
1472 return true;
1473
1474 if (!M.getTargetTriple().supportsCOMDAT())
1475 return false;
1476
1477 // See createPGOFuncNameVar for more details. To avoid link errors, profile
1478 // counters for function with available_externally linkage needs to be changed
1479 // to linkonce linkage. On ELF based systems, this leads to weak symbols to be
1480 // created. Without using comdat, duplicate entries won't be removed by the
1481 // linker leading to increased data segement size and raw profile size. Even
1482 // worse, since the referenced counter from profile per-function data object
1483 // will be resolved to the common strong definition, the profile counts for
1484 // available_externally functions will end up being duplicated in raw profile
1485 // data. This can result in distorted profile as the counts of those dups
1486 // will be accumulated by the profile merger.
1487 GlobalValue::LinkageTypes Linkage = GO.getLinkage();
1488 if (Linkage != GlobalValue::ExternalWeakLinkage &&
1489 Linkage != GlobalValue::AvailableExternallyLinkage)
1490 return false;
1491
1492 return true;
1493}
1494
1495// Check if INSTR_PROF_RAW_VERSION_VAR is defined.
1496bool isIRPGOFlagSet(const Module *M) {
1497 const GlobalVariable *IRInstrVar =
1498 M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR));
1499 if (!IRInstrVar || IRInstrVar->hasLocalLinkage())
1500 return false;
1501
1502 // For CSPGO+LTO, this variable might be marked as non-prevailing and we only
1503 // have the decl.
1504 if (IRInstrVar->isDeclaration())
1505 return true;
1506
1507 // Check if the flag is set.
1508 if (!IRInstrVar->hasInitializer())
1509 return false;
1510
1511 auto *InitVal = dyn_cast_or_null<ConstantInt>(Val: IRInstrVar->getInitializer());
1512 if (!InitVal)
1513 return false;
1514 return (InitVal->getZExtValue() & VARIANT_MASK_IR_PROF) != 0;
1515}
1516
1517// Check if we can safely rename this Comdat function.
1518bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) {
1519 if (F.getName().empty())
1520 return false;
1521 if (!needsComdatForCounter(GO: F, M: *(F.getParent())))
1522 return false;
1523 // Unsafe to rename the address-taken function (which can be used in
1524 // function comparison).
1525 if (CheckAddressTaken && F.hasAddressTaken())
1526 return false;
1527 // Only safe to do if this function may be discarded if it is not used
1528 // in the compilation unit.
1529 if (!GlobalValue::isDiscardableIfUnused(Linkage: F.getLinkage()))
1530 return false;
1531
1532 // For AvailableExternallyLinkage functions.
1533 if (!F.hasComdat()) {
1534 assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage);
1535 return true;
1536 }
1537 return true;
1538}
1539
1540// Create the variable for the profile file name.
1541void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput) {
1542 if (InstrProfileOutput.empty())
1543 return;
1544 Constant *ProfileNameConst =
1545 ConstantDataArray::getString(Context&: M.getContext(), Initializer: InstrProfileOutput, AddNull: true);
1546 GlobalVariable *ProfileNameVar = new GlobalVariable(
1547 M, ProfileNameConst->getType(), true, GlobalValue::WeakAnyLinkage,
1548 ProfileNameConst, INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR));
1549 ProfileNameVar->setVisibility(GlobalValue::HiddenVisibility);
1550 Triple TT(M.getTargetTriple());
1551 if (TT.supportsCOMDAT()) {
1552 ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
1553 ProfileNameVar->setComdat(M.getOrInsertComdat(
1554 Name: StringRef(INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR))));
1555 }
1556}
1557
1558Error OverlapStats::accumulateCounts(const std::string &BaseFilename,
1559 const std::string &TestFilename,
1560 bool IsCS) {
1561 auto GetProfileSum = [IsCS](const std::string &Filename,
1562 CountSumOrPercent &Sum) -> Error {
1563 // This function is only used from llvm-profdata that doesn't use any kind
1564 // of VFS. Just create a default RealFileSystem to read profiles.
1565 auto FS = vfs::getRealFileSystem();
1566 auto ReaderOrErr = InstrProfReader::create(Path: Filename, FS&: *FS);
1567 if (Error E = ReaderOrErr.takeError()) {
1568 return E;
1569 }
1570 auto Reader = std::move(ReaderOrErr.get());
1571 Reader->accumulateCounts(Sum, IsCS);
1572 return Error::success();
1573 };
1574 auto Ret = GetProfileSum(BaseFilename, Base);
1575 if (Ret)
1576 return Ret;
1577 Ret = GetProfileSum(TestFilename, Test);
1578 if (Ret)
1579 return Ret;
1580 this->BaseFilename = &BaseFilename;
1581 this->TestFilename = &TestFilename;
1582 Valid = true;
1583 return Error::success();
1584}
1585
1586void OverlapStats::addOneMismatch(const CountSumOrPercent &MismatchFunc) {
1587 Mismatch.NumEntries += 1;
1588 Mismatch.CountSum += MismatchFunc.CountSum / Test.CountSum;
1589 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1590 if (Test.ValueCounts[I] >= 1.0f)
1591 Mismatch.ValueCounts[I] +=
1592 MismatchFunc.ValueCounts[I] / Test.ValueCounts[I];
1593 }
1594}
1595
1596void OverlapStats::addOneUnique(const CountSumOrPercent &UniqueFunc) {
1597 Unique.NumEntries += 1;
1598 Unique.CountSum += UniqueFunc.CountSum / Test.CountSum;
1599 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1600 if (Test.ValueCounts[I] >= 1.0f)
1601 Unique.ValueCounts[I] += UniqueFunc.ValueCounts[I] / Test.ValueCounts[I];
1602 }
1603}
1604
1605void OverlapStats::dump(raw_fd_ostream &OS) const {
1606 if (!Valid)
1607 return;
1608
1609 const char *EntryName =
1610 (Level == ProgramLevel ? "functions" : "edge counters");
1611 if (Level == ProgramLevel) {
1612 OS << "Profile overlap information for base_profile: " << *BaseFilename
1613 << " and test_profile: " << *TestFilename << "\nProgram level:\n";
1614 } else {
1615 OS << "Function level:\n"
1616 << " Function: " << FuncName << " (Hash=" << FuncHash << ")\n";
1617 }
1618
1619 OS << " # of " << EntryName << " overlap: " << Overlap.NumEntries << "\n";
1620 if (Mismatch.NumEntries)
1621 OS << " # of " << EntryName << " mismatch: " << Mismatch.NumEntries
1622 << "\n";
1623 if (Unique.NumEntries)
1624 OS << " # of " << EntryName
1625 << " only in test_profile: " << Unique.NumEntries << "\n";
1626
1627 OS << " Edge profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.CountSum * 100)
1628 << "\n";
1629 if (Mismatch.NumEntries)
1630 OS << " Mismatched count percentage (Edge): "
1631 << format(Fmt: "%.3f%%", Vals: Mismatch.CountSum * 100) << "\n";
1632 if (Unique.NumEntries)
1633 OS << " Percentage of Edge profile only in test_profile: "
1634 << format(Fmt: "%.3f%%", Vals: Unique.CountSum * 100) << "\n";
1635 OS << " Edge profile base count sum: " << format(Fmt: "%.0f", Vals: Base.CountSum)
1636 << "\n"
1637 << " Edge profile test count sum: " << format(Fmt: "%.0f", Vals: Test.CountSum)
1638 << "\n";
1639
1640 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1641 if (Base.ValueCounts[I] < 1.0f && Test.ValueCounts[I] < 1.0f)
1642 continue;
1643 char ProfileKindName[20] = {0};
1644 switch (I) {
1645 case IPVK_IndirectCallTarget:
1646 strncpy(dest: ProfileKindName, src: "IndirectCall", n: 19);
1647 break;
1648 case IPVK_MemOPSize:
1649 strncpy(dest: ProfileKindName, src: "MemOP", n: 19);
1650 break;
1651 case IPVK_VTableTarget:
1652 strncpy(dest: ProfileKindName, src: "VTable", n: 19);
1653 break;
1654 default:
1655 snprintf(s: ProfileKindName, maxlen: 19, format: "VP[%d]", I);
1656 break;
1657 }
1658 OS << " " << ProfileKindName
1659 << " profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.ValueCounts[I] * 100)
1660 << "\n";
1661 if (Mismatch.NumEntries)
1662 OS << " Mismatched count percentage (" << ProfileKindName
1663 << "): " << format(Fmt: "%.3f%%", Vals: Mismatch.ValueCounts[I] * 100) << "\n";
1664 if (Unique.NumEntries)
1665 OS << " Percentage of " << ProfileKindName
1666 << " profile only in test_profile: "
1667 << format(Fmt: "%.3f%%", Vals: Unique.ValueCounts[I] * 100) << "\n";
1668 OS << " " << ProfileKindName
1669 << " profile base count sum: " << format(Fmt: "%.0f", Vals: Base.ValueCounts[I])
1670 << "\n"
1671 << " " << ProfileKindName
1672 << " profile test count sum: " << format(Fmt: "%.0f", Vals: Test.ValueCounts[I])
1673 << "\n";
1674 }
1675}
1676
1677namespace IndexedInstrProf {
1678Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
1679 using namespace support;
1680 static_assert(std::is_standard_layout_v<Header>,
1681 "Use standard layout for Header for simplicity");
1682 Header H;
1683
1684 H.Magic = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1685 // Check the magic number.
1686 if (H.Magic != IndexedInstrProf::Magic)
1687 return make_error<InstrProfError>(Args: instrprof_error::bad_magic);
1688
1689 // Read the version.
1690 H.Version = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1691 if (H.getIndexedProfileVersion() >
1692 IndexedInstrProf::ProfVersion::CurrentVersion)
1693 return make_error<InstrProfError>(Args: instrprof_error::unsupported_version);
1694
1695 static_assert(IndexedInstrProf::ProfVersion::CurrentVersion == Version13,
1696 "Please update the reader as needed when a new field is added "
1697 "or when indexed profile version gets bumped.");
1698
1699 Buffer += sizeof(uint64_t); // Skip Header.Unused field.
1700 H.HashType = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1701 H.HashOffset = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1702 if (H.getIndexedProfileVersion() >= 8)
1703 H.MemProfOffset =
1704 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1705 if (H.getIndexedProfileVersion() >= 9)
1706 H.BinaryIdOffset =
1707 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1708 // Version 11 is handled by this condition.
1709 if (H.getIndexedProfileVersion() >= 10)
1710 H.TemporalProfTracesOffset =
1711 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1712 if (H.getIndexedProfileVersion() >= 12)
1713 H.VTableNamesOffset =
1714 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1715 return H;
1716}
1717
1718uint64_t Header::getIndexedProfileVersion() const {
1719 return GET_VERSION(Version);
1720}
1721
1722size_t Header::size() const {
1723 switch (getIndexedProfileVersion()) {
1724 // To retain backward compatibility, new fields must be appended to the end
1725 // of the header, and byte offset of existing fields shouldn't change when
1726 // indexed profile version gets incremented.
1727 static_assert(
1728 IndexedInstrProf::ProfVersion::CurrentVersion == Version13,
1729 "Please update the size computation below if a new field has "
1730 "been added to the header; for a version bump without new "
1731 "fields, add a case statement to fall through to the latest version.");
1732 case 13ull:
1733 case 12ull:
1734 return 72;
1735 case 11ull:
1736 [[fallthrough]];
1737 case 10ull:
1738 return 64;
1739 case 9ull:
1740 return 56;
1741 case 8ull:
1742 return 48;
1743 default: // Version7 (when the backwards compatible header was introduced).
1744 return 40;
1745 }
1746}
1747
1748} // namespace IndexedInstrProf
1749
1750} // end namespace llvm
1751