llvm-project
1802 строки · 63.5 Кб
1//===- bolt/runtime/instr.cpp ---------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10// not support linking modules with dependencies on one another into the final
11// binary (TODO?), which means this library has to be self-contained in a single
12// module.
13//
14// All extern declarations here need to be defined by BOLT itself. Those will be
15// undefined symbols that BOLT needs to resolve by emitting these symbols with
16// MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17// for defining the symbols here and these two files have a tight coupling: one
18// working statically when you run BOLT and another during program runtime when
19// you run an instrumented binary. The main goal here is to output an fdata file
20// (BOLT profile) with the instrumentation counters inserted by the static pass.
21// Counters for indirect calls are an exception, as we can't know them
22// statically. These counters are created and managed here. To allow this, we
23// need a minimal framework for allocating memory dynamically. We provide this
24// with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25//
26// Since this code is intended to be inserted into any executable, we decided to
27// make it standalone and do not depend on any external libraries (i.e. language
28// support libraries, such as glibc or stdc++). To allow this, we provide a few
29// light implementations of common OS interacting functionalities using direct
30// syscall wrappers. Our simple allocator doesn't manage deallocations that
31// fragment the memory space, so it's stack based. This is the minimal framework
32// provided here to allow processing instrumented counters and writing fdata.
33//
34// In the C++ idiom used here, we never use or rely on constructors or
35// destructors for global objects. That's because those need support from the
36// linker in initialization/finalization code, and we want to keep our linker
37// very simple. Similarly, we don't create any global objects that are zero
38// initialized, since those would need to go .bss, which our simple linker also
39// don't support (TODO?).
40//
41//===----------------------------------------------------------------------===//
42
43#include "common.h"44
45// Enables a very verbose logging to stderr useful when debugging
46//#define ENABLE_DEBUG
47
48#ifdef ENABLE_DEBUG49#define DEBUG(X) \50{ X; }51#else52#define DEBUG(X) \53{}54#endif55
56#pragma GCC visibility push(hidden)57
58extern "C" {59
60#if defined(__APPLE__)61extern uint64_t* _bolt_instr_locations_getter();62extern uint32_t _bolt_num_counters_getter();63
64extern uint8_t* _bolt_instr_tables_getter();65extern uint32_t _bolt_instr_num_funcs_getter();66
67#else68
69// Main counters inserted by instrumentation, incremented during runtime when
70// points of interest (locations) in the program are reached. Those are direct
71// calls and direct and indirect branches (local ones). There are also counters
72// for basic block execution if they are a spanning tree leaf and need to be
73// counted in order to infer the execution count of other edges of the CFG.
74extern uint64_t __bolt_instr_locations[];75extern uint32_t __bolt_num_counters;76// Descriptions are serialized metadata about binary functions written by BOLT,
77// so we have a minimal understanding about the program structure. For a
78// reference on the exact format of this metadata, see *Description structs,
79// Location, IntrumentedNode and EntryNode.
80// Number of indirect call site descriptions
81extern uint32_t __bolt_instr_num_ind_calls;82// Number of indirect call target descriptions
83extern uint32_t __bolt_instr_num_ind_targets;84// Number of function descriptions
85extern uint32_t __bolt_instr_num_funcs;86// Time to sleep across dumps (when we write the fdata profile to disk)
87extern uint32_t __bolt_instr_sleep_time;88// Do not clear counters across dumps, rewrite file with the updated values
89extern bool __bolt_instr_no_counters_clear;90// Wait until all forks of instrumented process will finish
91extern bool __bolt_instr_wait_forks;92// Filename to dump data to
93extern char __bolt_instr_filename[];94// Instumented binary file path
95extern char __bolt_instr_binpath[];96// If true, append current PID to the fdata filename when creating it so
97// different invocations of the same program can be differentiated.
98extern bool __bolt_instr_use_pid;99// Functions that will be used to instrument indirect calls. BOLT static pass
100// will identify indirect calls and modify them to load the address in these
101// trampolines and call this address instead. BOLT can't use direct calls to
102// our handlers because our addresses here are not known at analysis time. We
103// only support resolving dependencies from this file to the output of BOLT,
104// *not* the other way around.
105// TODO: We need better linking support to make that happen.
106extern void (*__bolt_ind_call_counter_func_pointer)();107extern void (*__bolt_ind_tailcall_counter_func_pointer)();108// Function pointers to init/fini trampoline routines in the binary, so we can
109// resume regular execution of these functions that we hooked
110extern void __bolt_start_trampoline();111extern void __bolt_fini_trampoline();112
113#endif114}
115
116namespace {117
118/// A simple allocator that mmaps a fixed size region and manages this space
119/// in a stack fashion, meaning you always deallocate the last element that
120/// was allocated. In practice, we don't need to deallocate individual elements.
121/// We monotonically increase our usage and then deallocate everything once we
122/// are done processing something.
123class BumpPtrAllocator {124/// This is written before each allocation and act as a canary to detect when125/// a bug caused our program to cross allocation boundaries.126struct EntryMetadata {127uint64_t Magic;128uint64_t AllocSize;129};130
131public:132void *allocate(size_t Size) {133Lock L(M);134
135if (StackBase == nullptr) {136StackBase = reinterpret_cast<uint8_t *>(137__mmap(0, MaxSize, PROT_READ | PROT_WRITE,138(Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));139assert(StackBase != MAP_FAILED,140"BumpPtrAllocator: failed to mmap stack!");141StackSize = 0;142}143
144Size = alignTo(Size + sizeof(EntryMetadata), 16);145uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);146auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);147M->Magic = Magic;148M->AllocSize = Size;149StackSize += Size;150assert(StackSize < MaxSize, "allocator ran out of memory");151return AllocAddress;152}153
154#ifdef DEBUG155/// Element-wise deallocation is only used for debugging to catch memory156/// bugs by checking magic bytes. Ordinarily, we reset the allocator once157/// we are done with it. Reset is done with clear(). There's no need158/// to deallocate each element individually.159void deallocate(void *Ptr) {160Lock L(M);161uint8_t MetadataOffset = sizeof(EntryMetadata);162auto *M = reinterpret_cast<EntryMetadata *>(163reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);164const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;165// Validate size166if (Ptr != StackTop - M->AllocSize) {167// Failed validation, check if it is a pointer returned by operator new []168MetadataOffset +=169sizeof(uint64_t); // Space for number of elements alloc'ed170M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -171MetadataOffset);172// Ok, it failed both checks if this assertion fails. Stop the program, we173// have a memory bug.174assert(Ptr == StackTop - M->AllocSize,175"must deallocate the last element alloc'ed");176}177assert(M->Magic == Magic, "allocator magic is corrupt");178StackSize -= M->AllocSize;179}180#else181void deallocate(void *) {}182#endif183
184void clear() {185Lock L(M);186StackSize = 0;187}188
189/// Set mmap reservation size (only relevant before first allocation)190void setMaxSize(uint64_t Size) { MaxSize = Size; }191
192/// Set mmap reservation privacy (only relevant before first allocation)193void setShared(bool S) { Shared = S; }194
195void destroy() {196if (StackBase == nullptr)197return;198__munmap(StackBase, MaxSize);199}200
201// Placement operator to construct allocator in possibly shared mmaped memory202static void *operator new(size_t, void *Ptr) { return Ptr; };203
204private:205static constexpr uint64_t Magic = 0x1122334455667788ull;206uint64_t MaxSize = 0xa00000;207uint8_t *StackBase{nullptr};208uint64_t StackSize{0};209bool Shared{false};210Mutex M;211};212
213/// Used for allocating indirect call instrumentation counters. Initialized by
214/// __bolt_instr_setup, our initialization routine.
215BumpPtrAllocator *GlobalAlloc;216
217// Base address which we substract from recorded PC values when searching for
218// indirect call description entries. Needed because indCall descriptions are
219// mapped read-only and contain static addresses. Initialized in
220// __bolt_instr_setup.
221uint64_t TextBaseAddress = 0;222
223// Storage for GlobalAlloc which can be shared if not using
224// instrumentation-file-append-pid.
225void *GlobalMetadataStorage;226
227} // anonymous namespace228
229// User-defined placement new operators. We only use those (as opposed to
230// overriding the regular operator new) so we can keep our allocator in the
231// stack instead of in a data section (global).
232void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }233void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {234auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));235memset(Ptr, C, Sz);236return Ptr;237}
238void *operator new[](size_t Sz, BumpPtrAllocator &A) {239return A.allocate(Sz);240}
241void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {242auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));243memset(Ptr, C, Sz);244return Ptr;245}
246// Only called during exception unwinding (useless). We must manually dealloc.
247// C++ language weirdness
248void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }249
250namespace {251
252// Disable instrumentation optimizations that sacrifice profile accuracy
253extern "C" bool __bolt_instr_conservative;254
255/// Basic key-val atom stored in our hash
256struct SimpleHashTableEntryBase {257uint64_t Key;258uint64_t Val;259void dump(const char *Msg = nullptr) {260// TODO: make some sort of formatting function261// Currently we have to do it the ugly way because262// we want every message to be printed atomically via a single call to263// __write. If we use reportNumber() and others nultiple times, we'll get264// garbage in mulithreaded environment265char Buf[BufSize];266char *Ptr = Buf;267Ptr = intToStr(Ptr, __getpid(), 10);268*Ptr++ = ':';269*Ptr++ = ' ';270if (Msg)271Ptr = strCopy(Ptr, Msg, strLen(Msg));272*Ptr++ = '0';273*Ptr++ = 'x';274Ptr = intToStr(Ptr, (uint64_t)this, 16);275*Ptr++ = ':';276*Ptr++ = ' ';277Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);278Ptr = intToStr(Ptr, Key, 16);279*Ptr++ = ',';280*Ptr++ = ' ';281*Ptr++ = '0';282*Ptr++ = 'x';283Ptr = intToStr(Ptr, Val, 16);284*Ptr++ = ')';285*Ptr++ = '\n';286assert(Ptr - Buf < BufSize, "Buffer overflow!");287// print everything all at once for atomicity288__write(2, Buf, Ptr - Buf);289}290};291
292/// This hash table implementation starts by allocating a table of size
293/// InitialSize. When conflicts happen in this main table, it resolves
294/// them by chaining a new table of size IncSize. It never reallocs as our
295/// allocator doesn't support it. The key is intended to be function pointers.
296/// There's no clever hash function (it's just x mod size, size being prime).
297/// I never tuned the coefficientes in the modular equation (TODO)
298/// This is used for indirect calls (each call site has one of this, so it
299/// should have a small footprint) and for tallying call counts globally for
300/// each target to check if we missed the origin of some calls (this one is a
301/// large instantiation of this template, since it is global for all call sites)
302template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,303uint32_t IncSize = 7>304class SimpleHashTable {305public:306using MapEntry = T;307
308/// Increment by 1 the value of \p Key. If it is not in this table, it will be309/// added to the table and its value set to 1.310void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {311if (!__bolt_instr_conservative) {312TryLock L(M);313if (!L.isLocked())314return;315auto &E = getOrAllocEntry(Key, Alloc);316++E.Val;317return;318}319Lock L(M);320auto &E = getOrAllocEntry(Key, Alloc);321++E.Val;322}323
324/// Basic member accessing interface. Here we pass the allocator explicitly to325/// avoid storing a pointer to it as part of this table (remember there is one326/// hash for each indirect call site, so we want to minimize our footprint).327MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {328if (!__bolt_instr_conservative) {329TryLock L(M);330if (!L.isLocked())331return NoEntry;332return getOrAllocEntry(Key, Alloc);333}334Lock L(M);335return getOrAllocEntry(Key, Alloc);336}337
338/// Traverses all elements in the table339template <typename... Args>340void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {341Lock L(M);342if (!TableRoot)343return;344return forEachElement(Callback, InitialSize, TableRoot, args...);345}346
347void resetCounters();348
349private:350constexpr static uint64_t VacantMarker = 0;351constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;352
353MapEntry *TableRoot{nullptr};354MapEntry NoEntry;355Mutex M;356
357template <typename... Args>358void forEachElement(void (*Callback)(MapEntry &, Args...),359uint32_t NumEntries, MapEntry *Entries, Args... args) {360for (uint32_t I = 0; I < NumEntries; ++I) {361MapEntry &Entry = Entries[I];362if (Entry.Key == VacantMarker)363continue;364if (Entry.Key & FollowUpTableMarker) {365MapEntry *Next =366reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker);367assert(Next != Entries, "Circular reference!");368forEachElement(Callback, IncSize, Next, args...);369continue;370}371Callback(Entry, args...);372}373}374
375MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {376TableRoot = new (Alloc, 0) MapEntry[InitialSize];377MapEntry &Entry = TableRoot[Key % InitialSize];378Entry.Key = Key;379// DEBUG(Entry.dump("Created root entry: "));380return Entry;381}382
383MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,384BumpPtrAllocator &Alloc, int CurLevel) {385// DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));386const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;387uint64_t Remainder = Selector / NumEntries;388Selector = Selector % NumEntries;389MapEntry &Entry = Entries[Selector];390
391// A hit392if (Entry.Key == Key) {393// DEBUG(Entry.dump("Hit: "));394return Entry;395}396
397// Vacant - add new entry398if (Entry.Key == VacantMarker) {399Entry.Key = Key;400// DEBUG(Entry.dump("Adding new entry: "));401return Entry;402}403
404// Defer to the next level405if (Entry.Key & FollowUpTableMarker) {406return getEntry(407reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),408Key, Remainder, Alloc, CurLevel + 1);409}410
411// Conflict - create the next level412// DEBUG(Entry.dump("Creating new level: "));413
414MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];415// DEBUG(416// reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),417// 16));418uint64_t CurEntrySelector = Entry.Key / InitialSize;419for (int I = 0; I < CurLevel; ++I)420CurEntrySelector /= IncSize;421CurEntrySelector = CurEntrySelector % IncSize;422NextLevelTbl[CurEntrySelector] = Entry;423Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;424assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=425uint64_t(Entries),426"circular reference created!\n");427// DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));428// DEBUG(Entry.dump("Updated old entry: "));429return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);430}431
432MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {433if (TableRoot) {434MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0);435assert(!(E.Key & FollowUpTableMarker), "Invalid entry!");436return E;437}438return firstAllocation(Key, Alloc);439}440};441
442template <typename T> void resetIndCallCounter(T &Entry) {443Entry.Val = 0;444}
445
446template <typename T, uint32_t X, uint32_t Y>447void SimpleHashTable<T, X, Y>::resetCounters() {448forEachElement(resetIndCallCounter);449}
450
451/// Represents a hash table mapping a function target address to its counter.
452using IndirectCallHashTable = SimpleHashTable<>;453
454/// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
455/// global array of all hash tables storing indirect call destinations happening
456/// during runtime, one table per call site.
457IndirectCallHashTable *GlobalIndCallCounters{458reinterpret_cast<IndirectCallHashTable *>(1)};459
460/// Don't allow reentrancy in the fdata writing phase - only one thread writes
461/// it
462Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};463
464/// Store number of calls in additional to target address (Key) and frequency
465/// as perceived by the basic block counter (Val).
466struct CallFlowEntryBase : public SimpleHashTableEntryBase {467uint64_t Calls;468};469
470using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;471
472/// This is a large table indexing all possible call targets (indirect and
473/// direct ones). The goal is to find mismatches between number of calls (for
474/// those calls we were able to track) and the entry basic block counter of the
475/// callee. In most cases, these two should be equal. If not, there are two
476/// possible scenarios here:
477///
478/// * Entry BB has higher frequency than all known calls to this function.
479/// In this case, we have dynamic library code or any uninstrumented code
480/// calling this function. We will write the profile for these untracked
481/// calls as having source "0 [unknown] 0" in the fdata file.
482///
483/// * Number of known calls is higher than the frequency of entry BB
484/// This only happens when there is no counter for the entry BB / callee
485/// function is not simple (in BOLT terms). We don't do anything special
486/// here and just ignore those (we still report all calls to the non-simple
487/// function, though).
488///
489class CallFlowHashTable : public CallFlowHashTableBase {490public:491CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}492
493MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }494
495private:496// Different than the hash table for indirect call targets, we do store the497// allocator here since there is only one call flow hash and space overhead498// is negligible.499BumpPtrAllocator &Alloc;500};501
502///
503/// Description metadata emitted by BOLT to describe the program - refer to
504/// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
505///
506struct Location {507uint32_t FunctionName;508uint32_t Offset;509};510
511struct CallDescription {512Location From;513uint32_t FromNode;514Location To;515uint32_t Counter;516uint64_t TargetAddress;517};518
519using IndCallDescription = Location;520
521struct IndCallTargetDescription {522Location Loc;523uint64_t Address;524};525
526struct EdgeDescription {527Location From;528uint32_t FromNode;529Location To;530uint32_t ToNode;531uint32_t Counter;532};533
534struct InstrumentedNode {535uint32_t Node;536uint32_t Counter;537};538
539struct EntryNode {540uint64_t Node;541uint64_t Address;542};543
544struct FunctionDescription {545uint32_t NumLeafNodes;546const InstrumentedNode *LeafNodes;547uint32_t NumEdges;548const EdgeDescription *Edges;549uint32_t NumCalls;550const CallDescription *Calls;551uint32_t NumEntryNodes;552const EntryNode *EntryNodes;553
554/// Constructor will parse the serialized function metadata written by BOLT555FunctionDescription(const uint8_t *FuncDesc);556
557uint64_t getSize() const {558return 16 + NumLeafNodes * sizeof(InstrumentedNode) +559NumEdges * sizeof(EdgeDescription) +560NumCalls * sizeof(CallDescription) +561NumEntryNodes * sizeof(EntryNode);562}563};564
565/// The context is created when the fdata profile needs to be written to disk
566/// and we need to interpret our runtime counters. It contains pointers to the
567/// mmaped binary (only the BOLT written metadata section). Deserialization
568/// should be straightforward as most data is POD or an array of POD elements.
569/// This metadata is used to reconstruct function CFGs.
570struct ProfileWriterContext {571IndCallDescription *IndCallDescriptions;572IndCallTargetDescription *IndCallTargets;573uint8_t *FuncDescriptions;574char *Strings; // String table with function names used in this binary575int FileDesc; // File descriptor for the file on disk backing this576// information in memory via mmap577void *MMapPtr; // The mmap ptr578int MMapSize; // The mmap size579
580/// Hash table storing all possible call destinations to detect untracked581/// calls and correctly report them as [unknown] in output fdata.582CallFlowHashTable *CallFlowTable;583
584/// Lookup the sorted indirect call target vector to fetch function name and585/// offset for an arbitrary function pointer.586const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;587};588
589/// Perform a string comparison and returns zero if Str1 matches Str2. Compares
590/// at most Size characters.
591int compareStr(const char *Str1, const char *Str2, int Size) {592while (*Str1 == *Str2) {593if (*Str1 == '\0' || --Size == 0)594return 0;595++Str1;596++Str2;597}598return 1;599}
600
601/// Output Location to the fdata file
602char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,603const Location Loc, uint32_t BufSize) {604// fdata location format: Type Name Offset605// Type 1 - regular symbol606OutBuf = strCopy(OutBuf, "1 ");607const char *Str = Ctx.Strings + Loc.FunctionName;608uint32_t Size = 25;609while (*Str) {610*OutBuf++ = *Str++;611if (++Size >= BufSize)612break;613}614assert(!*Str, "buffer overflow, function name too large");615*OutBuf++ = ' ';616OutBuf = intToStr(OutBuf, Loc.Offset, 16);617*OutBuf++ = ' ';618return OutBuf;619}
620
621/// Read and deserialize a function description written by BOLT. \p FuncDesc
622/// points at the beginning of the function metadata structure in the file.
623/// See Instrumentation::emitTablesAsELFNote()
624FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {625NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);626DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));627LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);628
629NumEdges = *reinterpret_cast<const uint32_t *>(630FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));631DEBUG(reportNumber("NumEdges = ", NumEdges, 10));632Edges = reinterpret_cast<const EdgeDescription *>(633FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));634
635NumCalls = *reinterpret_cast<const uint32_t *>(636FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +637NumEdges * sizeof(EdgeDescription));638DEBUG(reportNumber("NumCalls = ", NumCalls, 10));639Calls = reinterpret_cast<const CallDescription *>(640FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +641NumEdges * sizeof(EdgeDescription));642NumEntryNodes = *reinterpret_cast<const uint32_t *>(643FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +644NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));645DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));646EntryNodes = reinterpret_cast<const EntryNode *>(647FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +648NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));649}
650
651/// Read and mmap descriptions written by BOLT from the executable's notes
652/// section
653#if defined(HAVE_ELF_H) and !defined(__APPLE__)654
655void *__attribute__((noinline)) __get_pc() {656return __builtin_extract_return_addr(__builtin_return_address(0));657}
658
659/// Get string with address and parse it to hex pair <StartAddress, EndAddress>
660bool parseAddressRange(const char *Str, uint64_t &StartAddress,661uint64_t &EndAddress) {662if (!Str)663return false;664// Parsed string format: <hex1>-<hex2>665StartAddress = hexToLong(Str, '-');666while (*Str && *Str != '-')667++Str;668if (!*Str)669return false;670++Str; // swallow '-'671EndAddress = hexToLong(Str);672return true;673}
674
675/// Get full path to the real binary by getting current virtual address
676/// and searching for the appropriate link in address range in
677/// /proc/self/map_files
678static char *getBinaryPath() {679const uint32_t BufSize = 1024;680const uint32_t NameMax = 4096;681const char DirPath[] = "/proc/self/map_files/";682static char TargetPath[NameMax] = {};683char Buf[BufSize];684
685if (__bolt_instr_binpath[0] != '\0')686return __bolt_instr_binpath;687
688if (TargetPath[0] != '\0')689return TargetPath;690
691unsigned long CurAddr = (unsigned long)__get_pc();692uint64_t FDdir = __open(DirPath, O_RDONLY,693/*mode=*/0666);694assert(static_cast<int64_t>(FDdir) >= 0,695"failed to open /proc/self/map_files");696
697while (long Nread = __getdents64(FDdir, (struct dirent64 *)Buf, BufSize)) {698assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");699
700struct dirent64 *d;701for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {702d = (struct dirent64 *)(Buf + Bpos);703
704uint64_t StartAddress, EndAddress;705if (!parseAddressRange(d->d_name, StartAddress, EndAddress))706continue;707if (CurAddr < StartAddress || CurAddr > EndAddress)708continue;709char FindBuf[NameMax];710char *C = strCopy(FindBuf, DirPath, NameMax);711C = strCopy(C, d->d_name, NameMax - (C - FindBuf));712*C = '\0';713uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));714assert(Ret != -1 && Ret != BufSize, "readlink error");715TargetPath[Ret] = '\0';716return TargetPath;717}718}719return nullptr;720}
721
722ProfileWriterContext readDescriptions() {723ProfileWriterContext Result;724char *BinPath = getBinaryPath();725assert(BinPath && BinPath[0] != '\0', "failed to find binary path");726
727uint64_t FD = __open(BinPath, O_RDONLY,728/*mode=*/0666);729assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");730
731Result.FileDesc = FD;732
733// mmap our binary to memory734uint64_t Size = __lseek(FD, 0, SEEK_END);735uint8_t *BinContents = reinterpret_cast<uint8_t *>(736__mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));737assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!");738Result.MMapPtr = BinContents;739Result.MMapSize = Size;740Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);741Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);742Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(743BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);744
745// Find .bolt.instr.tables with the data we need and set pointers to it746for (int I = 0; I < Hdr->e_shnum; ++I) {747char *SecName = reinterpret_cast<char *>(748BinContents + StringTblHeader->sh_offset + Shdr->sh_name);749if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {750Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +751(I + 1) * Hdr->e_shentsize);752continue;753}754// Actual contents of the ELF note start after offset 20 decimal:755// Offset 0: Producer name size (4 bytes)756// Offset 4: Contents size (4 bytes)757// Offset 8: Note type (4 bytes)758// Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)759// Offset 20: Contents760uint32_t IndCallDescSize =761*reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);762uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(763BinContents + Shdr->sh_offset + 24 + IndCallDescSize);764uint32_t FuncDescSize =765*reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +766IndCallDescSize + IndCallTargetDescSize);767Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(768BinContents + Shdr->sh_offset + 24);769Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(770BinContents + Shdr->sh_offset + 28 + IndCallDescSize);771Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +772IndCallDescSize + IndCallTargetDescSize;773Result.Strings = reinterpret_cast<char *>(774BinContents + Shdr->sh_offset + 32 + IndCallDescSize +775IndCallTargetDescSize + FuncDescSize);776return Result;777}778const char ErrMsg[] =779"BOLT instrumentation runtime error: could not find section "780".bolt.instr.tables\n";781reportError(ErrMsg, sizeof(ErrMsg));782return Result;783}
784
785#else786
787ProfileWriterContext readDescriptions() {788ProfileWriterContext Result;789uint8_t *Tables = _bolt_instr_tables_getter();790uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);791uint32_t IndCallTargetDescSize =792*reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);793uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(794Tables + 8 + IndCallDescSize + IndCallTargetDescSize);795Result.IndCallDescriptions =796reinterpret_cast<IndCallDescription *>(Tables + 4);797Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(798Tables + 8 + IndCallDescSize);799Result.FuncDescriptions =800Tables + 12 + IndCallDescSize + IndCallTargetDescSize;801Result.Strings = reinterpret_cast<char *>(802Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);803return Result;804}
805
806#endif807
808#if !defined(__APPLE__)809/// Debug by printing overall metadata global numbers to check it is sane
810void printStats(const ProfileWriterContext &Ctx) {811char StatMsg[BufSize];812char *StatPtr = StatMsg;813StatPtr =814strCopy(StatPtr,815"\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");816StatPtr = intToStr(StatPtr,817Ctx.FuncDescriptions -818reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),81910);820StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");821StatPtr = intToStr(822StatPtr,823reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);824StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");825StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);826StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");827StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);828StatPtr = strCopy(StatPtr, "\n");829__write(2, StatMsg, StatPtr - StatMsg);830}
831#endif832
833
834/// This is part of a simple CFG representation in memory, where we store
835/// a dynamically sized array of input and output edges per node, and store
836/// a dynamically sized array of nodes per graph. We also store the spanning
837/// tree edges for that CFG in a separate array of nodes in
838/// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
839struct Edge {840uint32_t Node; // Index in nodes array regarding the destination of this edge841uint32_t ID; // Edge index in an array comprising all edges of the graph842};843
844/// A regular graph node or a spanning tree node
845struct Node {846uint32_t NumInEdges{0}; // Input edge count used to size InEdge847uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges848Edge *InEdges{nullptr}; // Created and managed by \p Graph849Edge *OutEdges{nullptr}; // ditto850};851
852/// Main class for CFG representation in memory. Manages object creation and
853/// destruction, populates an array of CFG nodes as well as corresponding
854/// spanning tree nodes.
855struct Graph {856uint32_t NumNodes;857Node *CFGNodes;858Node *SpanningTreeNodes;859uint64_t *EdgeFreqs;860uint64_t *CallFreqs;861BumpPtrAllocator &Alloc;862const FunctionDescription &D;863
864/// Reads a list of edges from function description \p D and builds865/// the graph from it. Allocates several internal dynamic structures that are866/// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all867/// spanning tree leaf nodes descriptions (their counters). They are the seed868/// used to compute the rest of the missing edge counts in a bottom-up869/// traversal of the spanning tree.870Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,871const uint64_t *Counters, ProfileWriterContext &Ctx);872~Graph();873void dump() const;874
875private:876void computeEdgeFrequencies(const uint64_t *Counters,877ProfileWriterContext &Ctx);878void dumpEdgeFreqs() const;879};880
881Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,882const uint64_t *Counters, ProfileWriterContext &Ctx)883: Alloc(Alloc), D(D) {884DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));885// First pass to determine number of nodes886int32_t MaxNodes = -1;887CallFreqs = nullptr;888EdgeFreqs = nullptr;889for (int I = 0; I < D.NumEdges; ++I) {890if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)891MaxNodes = D.Edges[I].FromNode;892if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)893MaxNodes = D.Edges[I].ToNode;894}895
896for (int I = 0; I < D.NumLeafNodes; ++I)897if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)898MaxNodes = D.LeafNodes[I].Node;899
900for (int I = 0; I < D.NumCalls; ++I)901if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)902MaxNodes = D.Calls[I].FromNode;903
904// No nodes? Nothing to do905if (MaxNodes < 0) {906DEBUG(report("No nodes!\n"));907CFGNodes = nullptr;908SpanningTreeNodes = nullptr;909NumNodes = 0;910return;911}912++MaxNodes;913DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));914NumNodes = static_cast<uint32_t>(MaxNodes);915
916// Initial allocations917CFGNodes = new (Alloc) Node[MaxNodes];918
919DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));920SpanningTreeNodes = new (Alloc) Node[MaxNodes];921DEBUG(reportNumber("G->SpanningTreeNodes = 0x",922(uint64_t)SpanningTreeNodes, 16));923
924// Figure out how much to allocate to each vector (in/out edge sets)925for (int I = 0; I < D.NumEdges; ++I) {926CFGNodes[D.Edges[I].FromNode].NumOutEdges++;927CFGNodes[D.Edges[I].ToNode].NumInEdges++;928if (D.Edges[I].Counter != 0xffffffff)929continue;930
931SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;932SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;933}934
935// Allocate in/out edge sets936for (int I = 0; I < MaxNodes; ++I) {937if (CFGNodes[I].NumInEdges > 0)938CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];939if (CFGNodes[I].NumOutEdges > 0)940CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];941if (SpanningTreeNodes[I].NumInEdges > 0)942SpanningTreeNodes[I].InEdges =943new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];944if (SpanningTreeNodes[I].NumOutEdges > 0)945SpanningTreeNodes[I].OutEdges =946new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];947CFGNodes[I].NumInEdges = 0;948CFGNodes[I].NumOutEdges = 0;949SpanningTreeNodes[I].NumInEdges = 0;950SpanningTreeNodes[I].NumOutEdges = 0;951}952
953// Fill in/out edge sets954for (int I = 0; I < D.NumEdges; ++I) {955const uint32_t Src = D.Edges[I].FromNode;956const uint32_t Dst = D.Edges[I].ToNode;957Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];958E->Node = Dst;959E->ID = I;960
961E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];962E->Node = Src;963E->ID = I;964
965if (D.Edges[I].Counter != 0xffffffff)966continue;967
968E = &SpanningTreeNodes[Src]969.OutEdges[SpanningTreeNodes[Src].NumOutEdges++];970E->Node = Dst;971E->ID = I;972
973E = &SpanningTreeNodes[Dst]974.InEdges[SpanningTreeNodes[Dst].NumInEdges++];975E->Node = Src;976E->ID = I;977}978
979computeEdgeFrequencies(Counters, Ctx);980}
981
982Graph::~Graph() {983if (CallFreqs)984Alloc.deallocate(CallFreqs);985if (EdgeFreqs)986Alloc.deallocate(EdgeFreqs);987for (int I = NumNodes - 1; I >= 0; --I) {988if (SpanningTreeNodes[I].OutEdges)989Alloc.deallocate(SpanningTreeNodes[I].OutEdges);990if (SpanningTreeNodes[I].InEdges)991Alloc.deallocate(SpanningTreeNodes[I].InEdges);992if (CFGNodes[I].OutEdges)993Alloc.deallocate(CFGNodes[I].OutEdges);994if (CFGNodes[I].InEdges)995Alloc.deallocate(CFGNodes[I].InEdges);996}997if (SpanningTreeNodes)998Alloc.deallocate(SpanningTreeNodes);999if (CFGNodes)1000Alloc.deallocate(CFGNodes);1001}
1002
1003void Graph::dump() const {1004reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);1005report(" Full graph:\n");1006for (int I = 0; I < NumNodes; ++I) {1007const Node *N = &CFGNodes[I];1008reportNumber(" Node #", I, 10);1009reportNumber(" InEdges total ", N->NumInEdges, 10);1010for (int J = 0; J < N->NumInEdges; ++J)1011reportNumber(" ", N->InEdges[J].Node, 10);1012reportNumber(" OutEdges total ", N->NumOutEdges, 10);1013for (int J = 0; J < N->NumOutEdges; ++J)1014reportNumber(" ", N->OutEdges[J].Node, 10);1015report("\n");1016}1017report(" Spanning tree:\n");1018for (int I = 0; I < NumNodes; ++I) {1019const Node *N = &SpanningTreeNodes[I];1020reportNumber(" Node #", I, 10);1021reportNumber(" InEdges total ", N->NumInEdges, 10);1022for (int J = 0; J < N->NumInEdges; ++J)1023reportNumber(" ", N->InEdges[J].Node, 10);1024reportNumber(" OutEdges total ", N->NumOutEdges, 10);1025for (int J = 0; J < N->NumOutEdges; ++J)1026reportNumber(" ", N->OutEdges[J].Node, 10);1027report("\n");1028}1029}
1030
1031void Graph::dumpEdgeFreqs() const {1032reportNumber(1033"Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);1034for (int I = 0; I < D.NumEdges; ++I) {1035reportNumber("* Src: ", D.Edges[I].FromNode, 10);1036reportNumber(" Dst: ", D.Edges[I].ToNode, 10);1037reportNumber(" Cnt: ", EdgeFreqs[I], 10);1038}1039}
1040
1041/// Auxiliary map structure for fast lookups of which calls map to each node of
1042/// the function CFG
1043struct NodeToCallsMap {1044struct MapEntry {1045uint32_t NumCalls;1046uint32_t *Calls;1047};1048MapEntry *Entries;1049BumpPtrAllocator &Alloc;1050const uint32_t NumNodes;1051
1052NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,1053uint32_t NumNodes)1054: Alloc(Alloc), NumNodes(NumNodes) {1055Entries = new (Alloc, 0) MapEntry[NumNodes];1056for (int I = 0; I < D.NumCalls; ++I) {1057DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));1058++Entries[D.Calls[I].FromNode].NumCalls;1059}1060for (int I = 0; I < NumNodes; ++I) {1061Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)1062uint32_t[Entries[I].NumCalls]1063: nullptr;1064Entries[I].NumCalls = 0;1065}1066for (int I = 0; I < D.NumCalls; ++I) {1067MapEntry &Entry = Entries[D.Calls[I].FromNode];1068Entry.Calls[Entry.NumCalls++] = I;1069}1070}1071
1072/// Set the frequency of all calls in node \p NodeID to Freq. However, if1073/// the calls have their own counters and do not depend on the basic block1074/// counter, this means they have landing pads and throw exceptions. In this1075/// case, set their frequency with their counters and return the maximum1076/// value observed in such counters. This will be used as the new frequency1077/// at basic block entry. This is used to fix the CFG edge frequencies in the1078/// presence of exceptions.1079uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,1080const FunctionDescription &D,1081const uint64_t *Counters,1082ProfileWriterContext &Ctx) const {1083const MapEntry &Entry = Entries[NodeID];1084uint64_t MaxValue = 0ull;1085for (int I = 0, E = Entry.NumCalls; I != E; ++I) {1086const uint32_t CallID = Entry.Calls[I];1087DEBUG(reportNumber(" Setting freq for call ID: ", CallID, 10));1088const CallDescription &CallDesc = D.Calls[CallID];1089if (CallDesc.Counter == 0xffffffff) {1090CallFreqs[CallID] = Freq;1091DEBUG(reportNumber(" with : ", Freq, 10));1092} else {1093const uint64_t CounterVal = Counters[CallDesc.Counter];1094CallFreqs[CallID] = CounterVal;1095MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;1096DEBUG(reportNumber(" with (private counter) : ", CounterVal, 10));1097}1098DEBUG(reportNumber(" Address: 0x", CallDesc.TargetAddress, 16));1099if (CallFreqs[CallID] > 0)1100Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=1101CallFreqs[CallID];1102}1103return MaxValue;1104}1105
1106~NodeToCallsMap() {1107for (int I = NumNodes - 1; I >= 0; --I)1108if (Entries[I].Calls)1109Alloc.deallocate(Entries[I].Calls);1110Alloc.deallocate(Entries);1111}1112};1113
1114/// Fill an array with the frequency of each edge in the function represented
1115/// by G, as well as another array for each call.
1116void Graph::computeEdgeFrequencies(const uint64_t *Counters,1117ProfileWriterContext &Ctx) {1118if (NumNodes == 0)1119return;1120
1121EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;1122CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;1123
1124// Setup a lookup for calls present in each node (BB)1125NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);1126
1127// Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the1128// spanning tree don't have explicit counters. We must infer their value using1129// a linear combination of other counters (sum of counters of the outgoing1130// edges minus sum of counters of the incoming edges).1131uint32_t *Stack = new (Alloc) uint32_t [NumNodes];1132uint32_t StackTop = 0;1133enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };1134Status *Visited = new (Alloc, 0) Status[NumNodes];1135uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];1136uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];1137
1138// Setup a fast lookup for frequency of leaf nodes, which have special1139// basic block frequency instrumentation (they are not edge profiled).1140for (int I = 0; I < D.NumLeafNodes; ++I) {1141LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];1142DEBUG({1143if (Counters[D.LeafNodes[I].Counter] > 0) {1144reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);1145reportNumber(" Counter: ", Counters[D.LeafNodes[I].Counter], 10);1146}1147});1148}1149for (int I = 0; I < D.NumEntryNodes; ++I) {1150EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;1151DEBUG({1152reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);1153reportNumber(" Address: ", D.EntryNodes[I].Address, 16);1154});1155}1156// Add all root nodes to the stack1157for (int I = 0; I < NumNodes; ++I)1158if (SpanningTreeNodes[I].NumInEdges == 0)1159Stack[StackTop++] = I;1160
1161// Empty stack?1162if (StackTop == 0) {1163DEBUG(report("Empty stack!\n"));1164Alloc.deallocate(EntryAddress);1165Alloc.deallocate(LeafFrequency);1166Alloc.deallocate(Visited);1167Alloc.deallocate(Stack);1168CallMap->~NodeToCallsMap();1169Alloc.deallocate(CallMap);1170if (CallFreqs)1171Alloc.deallocate(CallFreqs);1172if (EdgeFreqs)1173Alloc.deallocate(EdgeFreqs);1174EdgeFreqs = nullptr;1175CallFreqs = nullptr;1176return;1177}1178// Add all known edge counts, will infer the rest1179for (int I = 0; I < D.NumEdges; ++I) {1180const uint32_t C = D.Edges[I].Counter;1181if (C == 0xffffffff) // inferred counter - we will compute its value1182continue;1183EdgeFreqs[I] = Counters[C];1184}1185
1186while (StackTop > 0) {1187const uint32_t Cur = Stack[--StackTop];1188DEBUG({1189if (Visited[Cur] == S_VISITING)1190report("(visiting) ");1191else1192report("(new) ");1193reportNumber("Cur: ", Cur, 10);1194});1195
1196// This shouldn't happen in a tree1197assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");1198if (Visited[Cur] == S_NEW) {1199Visited[Cur] = S_VISITING;1200Stack[StackTop++] = Cur;1201assert(StackTop <= NumNodes, "stack grew too large");1202for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {1203const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;1204Stack[StackTop++] = Succ;1205assert(StackTop <= NumNodes, "stack grew too large");1206}1207continue;1208}1209Visited[Cur] = S_VISITED;1210
1211// Establish our node frequency based on outgoing edges, which should all be1212// resolved by now.1213int64_t CurNodeFreq = LeafFrequency[Cur];1214// Not a leaf?1215if (!CurNodeFreq) {1216for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {1217const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;1218CurNodeFreq += EdgeFreqs[SuccEdge];1219}1220}1221if (CurNodeFreq < 0)1222CurNodeFreq = 0;1223
1224const uint64_t CallFreq = CallMap->visitAllCallsIn(1225Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);1226
1227// Exception handling affected our output flow? Fix with calls info1228DEBUG({1229if (CallFreq > CurNodeFreq)1230report("Bumping node frequency with call info\n");1231});1232CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;1233
1234if (CurNodeFreq > 0) {1235if (uint64_t Addr = EntryAddress[Cur]) {1236DEBUG(1237reportNumber(" Setting flow at entry point address 0x", Addr, 16));1238DEBUG(reportNumber(" with: ", CurNodeFreq, 10));1239Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;1240}1241}1242
1243// No parent? Reached a tree root, limit to call frequency updating.1244if (SpanningTreeNodes[Cur].NumInEdges == 0)1245continue;1246
1247assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");1248const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;1249
1250// Calculate parent edge freq.1251int64_t ParentEdgeFreq = CurNodeFreq;1252for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {1253const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;1254ParentEdgeFreq -= EdgeFreqs[PredEdge];1255}1256
1257// Sometimes the conservative CFG that BOLT builds will lead to incorrect1258// flow computation. For example, in a BB that transitively calls the exit1259// syscall, BOLT will add a fall-through successor even though it should not1260// have any successors. So this block execution will likely be wrong. We1261// tolerate this imperfection since this case should be quite infrequent.1262if (ParentEdgeFreq < 0) {1263DEBUG(dumpEdgeFreqs());1264DEBUG(report("WARNING: incorrect flow"));1265ParentEdgeFreq = 0;1266}1267DEBUG(reportNumber(" Setting freq for ParentEdge: ", ParentEdge, 10));1268DEBUG(reportNumber(" with ParentEdgeFreq: ", ParentEdgeFreq, 10));1269EdgeFreqs[ParentEdge] = ParentEdgeFreq;1270}1271
1272Alloc.deallocate(EntryAddress);1273Alloc.deallocate(LeafFrequency);1274Alloc.deallocate(Visited);1275Alloc.deallocate(Stack);1276CallMap->~NodeToCallsMap();1277Alloc.deallocate(CallMap);1278DEBUG(dumpEdgeFreqs());1279}
1280
1281/// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1282/// \p Alloc to allocate helper dynamic structures used to compute profile for
1283/// edges that we do not explicitly instrument.
1284const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,1285const uint8_t *FuncDesc,1286BumpPtrAllocator &Alloc) {1287const FunctionDescription F(FuncDesc);1288const uint8_t *next = FuncDesc + F.getSize();1289
1290#if !defined(__APPLE__)1291uint64_t *bolt_instr_locations = __bolt_instr_locations;1292#else1293uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();1294#endif1295
1296// Skip funcs we know are cold1297#ifndef ENABLE_DEBUG1298uint64_t CountersFreq = 0;1299for (int I = 0; I < F.NumLeafNodes; ++I)1300CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];1301
1302if (CountersFreq == 0) {1303for (int I = 0; I < F.NumEdges; ++I) {1304const uint32_t C = F.Edges[I].Counter;1305if (C == 0xffffffff)1306continue;1307CountersFreq += bolt_instr_locations[C];1308}1309if (CountersFreq == 0) {1310for (int I = 0; I < F.NumCalls; ++I) {1311const uint32_t C = F.Calls[I].Counter;1312if (C == 0xffffffff)1313continue;1314CountersFreq += bolt_instr_locations[C];1315}1316if (CountersFreq == 0)1317return next;1318}1319}1320#endif1321
1322Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);1323DEBUG(G->dump());1324
1325if (!G->EdgeFreqs && !G->CallFreqs) {1326G->~Graph();1327Alloc.deallocate(G);1328return next;1329}1330
1331for (int I = 0; I < F.NumEdges; ++I) {1332const uint64_t Freq = G->EdgeFreqs[I];1333if (Freq == 0)1334continue;1335const EdgeDescription *Desc = &F.Edges[I];1336char LineBuf[BufSize];1337char *Ptr = LineBuf;1338Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);1339Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));1340Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);1341Ptr = intToStr(Ptr, Freq, 10);1342*Ptr++ = '\n';1343__write(FD, LineBuf, Ptr - LineBuf);1344}1345
1346for (int I = 0; I < F.NumCalls; ++I) {1347const uint64_t Freq = G->CallFreqs[I];1348if (Freq == 0)1349continue;1350char LineBuf[BufSize];1351char *Ptr = LineBuf;1352const CallDescription *Desc = &F.Calls[I];1353Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);1354Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));1355Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);1356Ptr = intToStr(Ptr, Freq, 10);1357*Ptr++ = '\n';1358__write(FD, LineBuf, Ptr - LineBuf);1359}1360
1361G->~Graph();1362Alloc.deallocate(G);1363return next;1364}
1365
1366#if !defined(__APPLE__)1367const IndCallTargetDescription *1368ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {1369uint32_t B = 0;1370uint32_t E = __bolt_instr_num_ind_targets;1371if (E == 0)1372return nullptr;1373do {1374uint32_t I = (E - B) / 2 + B;1375if (IndCallTargets[I].Address == Target)1376return &IndCallTargets[I];1377if (IndCallTargets[I].Address < Target)1378B = I + 1;1379else1380E = I;1381} while (B < E);1382return nullptr;1383}
1384
1385/// Write a single indirect call <src, target> pair to the fdata file
1386void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,1387int FD, int CallsiteID,1388ProfileWriterContext *Ctx) {1389if (Entry.Val == 0)1390return;1391DEBUG(reportNumber("Target func 0x", Entry.Key, 16));1392DEBUG(reportNumber("Target freq: ", Entry.Val, 10));1393const IndCallDescription *CallsiteDesc =1394&Ctx->IndCallDescriptions[CallsiteID];1395const IndCallTargetDescription *TargetDesc =1396Ctx->lookupIndCallTarget(Entry.Key - TextBaseAddress);1397if (!TargetDesc) {1398DEBUG(report("Failed to lookup indirect call target\n"));1399char LineBuf[BufSize];1400char *Ptr = LineBuf;1401Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);1402Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);1403Ptr = intToStr(Ptr, Entry.Val, 10);1404*Ptr++ = '\n';1405__write(FD, LineBuf, Ptr - LineBuf);1406return;1407}1408Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;1409char LineBuf[BufSize];1410char *Ptr = LineBuf;1411Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);1412Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));1413Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);1414Ptr = intToStr(Ptr, Entry.Val, 10);1415*Ptr++ = '\n';1416__write(FD, LineBuf, Ptr - LineBuf);1417}
1418
1419/// Write to \p FD all of the indirect call profiles.
1420void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {1421for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {1422DEBUG(reportNumber("IndCallsite #", I, 10));1423GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);1424}1425}
1426
1427/// Check a single call flow for a callee versus all known callers. If there are
1428/// less callers than what the callee expects, write the difference with source
1429/// [unknown] in the profile.
1430void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,1431ProfileWriterContext *Ctx) {1432DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));1433DEBUG(reportNumber("Calls: ", Entry.Calls, 10));1434DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));1435DEBUG({1436if (Entry.Calls > Entry.Val)1437report(" More calls than expected!\n");1438});1439if (Entry.Val <= Entry.Calls)1440return;1441DEBUG(reportNumber(1442" Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));1443const IndCallTargetDescription *TargetDesc =1444Ctx->lookupIndCallTarget(Entry.Key);1445if (!TargetDesc) {1446// There is probably something wrong with this callee and this should be1447// investigated, but I don't want to assert and lose all data collected.1448DEBUG(report("WARNING: failed to look up call target!\n"));1449return;1450}1451char LineBuf[BufSize];1452char *Ptr = LineBuf;1453Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);1454Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));1455Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);1456Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);1457*Ptr++ = '\n';1458__write(FD, LineBuf, Ptr - LineBuf);1459}
1460
1461/// Open fdata file for writing and return a valid file descriptor, aborting
1462/// program upon failure.
1463int openProfile() {1464// Build the profile name string by appending our PID1465char Buf[BufSize];1466uint64_t PID = __getpid();1467char *Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);1468if (__bolt_instr_use_pid) {1469Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));1470Ptr = intToStr(Ptr, PID, 10);1471Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));1472}1473*Ptr++ = '\0';1474uint64_t FD = __open(Buf, O_WRONLY | O_TRUNC | O_CREAT,1475/*mode=*/0666);1476if (static_cast<int64_t>(FD) < 0) {1477report("Error while trying to open profile file for writing: ");1478report(Buf);1479reportNumber("\nFailed with error number: 0x",14800 - static_cast<int64_t>(FD), 16);1481__exit(1);1482}1483return FD;1484}
1485
1486#endif1487
1488} // anonymous namespace1489
1490#if !defined(__APPLE__)1491
1492/// Reset all counters in case you want to start profiling a new phase of your
1493/// program independently of prior phases.
1494/// The address of this function is printed by BOLT and this can be called by
1495/// any attached debugger during runtime. There is a useful oneliner for gdb:
1496///
1497/// gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1498/// -ex 'set confirm off' -ex quit
1499///
1500/// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1501/// name.
1502extern "C" void __bolt_instr_clear_counters() {1503memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,1504__bolt_num_counters * 8);1505for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)1506GlobalIndCallCounters[I].resetCounters();1507}
1508
1509/// This is the entry point for profile writing.
1510/// There are three ways of getting here:
1511///
1512/// * Program execution ended, finalization methods are running and BOLT
1513/// hooked into FINI from your binary dynamic section;
1514/// * You used the sleep timer option and during initialization we forked
1515/// a separate process that will call this function periodically;
1516/// * BOLT prints this function address so you can attach a debugger and
1517/// call this function directly to get your profile written to disk
1518/// on demand.
1519///
1520extern "C" void __attribute((force_align_arg_pointer))1521__bolt_instr_data_dump(int FD) {1522// Already dumping1523if (!GlobalWriteProfileMutex->acquire())1524return;1525
1526int ret = __lseek(FD, 0, SEEK_SET);1527assert(ret == 0, "Failed to lseek!");1528ret = __ftruncate(FD, 0);1529assert(ret == 0, "Failed to ftruncate!");1530BumpPtrAllocator HashAlloc;1531HashAlloc.setMaxSize(0x6400000);1532ProfileWriterContext Ctx = readDescriptions();1533Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);1534
1535DEBUG(printStats(Ctx));1536
1537BumpPtrAllocator Alloc;1538Alloc.setMaxSize(0x6400000);1539const uint8_t *FuncDesc = Ctx.FuncDescriptions;1540for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {1541FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);1542Alloc.clear();1543DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));1544}1545assert(FuncDesc == (void *)Ctx.Strings,1546"FuncDesc ptr must be equal to stringtable");1547
1548writeIndirectCallProfile(FD, Ctx);1549Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);1550
1551__fsync(FD);1552__munmap(Ctx.MMapPtr, Ctx.MMapSize);1553__close(Ctx.FileDesc);1554HashAlloc.destroy();1555GlobalWriteProfileMutex->release();1556DEBUG(report("Finished writing profile.\n"));1557}
1558
1559/// Event loop for our child process spawned during setup to dump profile data
1560/// at user-specified intervals
1561void watchProcess() {1562timespec ts, rem;1563uint64_t Ellapsed = 0ull;1564int FD = openProfile();1565uint64_t ppid;1566if (__bolt_instr_wait_forks) {1567// Store parent pgid1568ppid = -__getpgid(0);1569// And leave parent process group1570__setpgid(0, 0);1571} else {1572// Store parent pid1573ppid = __getppid();1574if (ppid == 1) {1575// Parent already dead1576__bolt_instr_data_dump(FD);1577goto out;1578}1579}1580
1581ts.tv_sec = 1;1582ts.tv_nsec = 0;1583while (1) {1584__nanosleep(&ts, &rem);1585// This means our parent process or all its forks are dead,1586// so no need for us to keep dumping.1587if (__kill(ppid, 0) < 0) {1588if (__bolt_instr_no_counters_clear)1589__bolt_instr_data_dump(FD);1590break;1591}1592
1593if (++Ellapsed < __bolt_instr_sleep_time)1594continue;1595
1596Ellapsed = 0;1597__bolt_instr_data_dump(FD);1598if (__bolt_instr_no_counters_clear == false)1599__bolt_instr_clear_counters();1600}1601
1602out:;1603DEBUG(report("My parent process is dead, bye!\n"));1604__close(FD);1605__exit(0);1606}
1607
1608extern "C" void __bolt_instr_indirect_call();1609extern "C" void __bolt_instr_indirect_tailcall();1610
1611/// Initialization code
1612extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {1613__bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;1614__bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;1615TextBaseAddress = getTextBaseAddress();1616
1617const uint64_t CountersStart =1618reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);1619const uint64_t CountersEnd = alignTo(1620reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),16210x1000);1622DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));1623DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));1624assert(CountersEnd > CountersStart, "no counters");1625
1626const bool Shared = !__bolt_instr_use_pid;1627const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED : MAP_PRIVATE;1628
1629void *Ret =1630__mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,1631MAP_ANONYMOUS | MapPrivateOrShared | MAP_FIXED, -1, 0);1632assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!");1633
1634GlobalMetadataStorage = __mmap(0, 4096, PROT_READ | PROT_WRITE,1635MapPrivateOrShared | MAP_ANONYMOUS, -1, 0);1636assert(GlobalMetadataStorage != MAP_FAILED,1637"__bolt_instr_setup: failed to mmap page for metadata!");1638
1639GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator;1640// Conservatively reserve 100MiB1641GlobalAlloc->setMaxSize(0x6400000);1642GlobalAlloc->setShared(Shared);1643GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex();1644if (__bolt_instr_num_ind_calls > 0)1645GlobalIndCallCounters =1646new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];1647
1648if (__bolt_instr_sleep_time != 0) {1649// Separate instrumented process to the own process group1650if (__bolt_instr_wait_forks)1651__setpgid(0, 0);1652
1653if (long PID = __fork())1654return;1655watchProcess();1656}1657}
1658
1659extern "C" __attribute((force_align_arg_pointer)) void1660instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {1661GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc);1662}
1663
1664/// We receive as in-stack arguments the identifier of the indirect call site
1665/// as well as the target address for the call
1666extern "C" __attribute((naked)) void __bolt_instr_indirect_call()1667{
1668#if defined(__aarch64__)1669// clang-format off1670__asm__ __volatile__(SAVE_ALL1671"ldp x0, x1, [sp, #288]\n"1672"bl instrumentIndirectCall\n"1673RESTORE_ALL
1674"ret\n"1675:::);1676// clang-format on1677#else1678// clang-format off1679__asm__ __volatile__(SAVE_ALL1680"mov 0xa0(%%rsp), %%rdi\n"1681"mov 0x98(%%rsp), %%rsi\n"1682"call instrumentIndirectCall\n"1683RESTORE_ALL
1684"ret\n"1685:::);1686// clang-format on1687#endif1688}
1689
1690extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()1691{
1692#if defined(__aarch64__)1693// clang-format off1694__asm__ __volatile__(SAVE_ALL1695"ldp x0, x1, [sp, #288]\n"1696"bl instrumentIndirectCall\n"1697RESTORE_ALL
1698"ret\n"1699:::);1700// clang-format on1701#else1702// clang-format off1703__asm__ __volatile__(SAVE_ALL1704"mov 0x98(%%rsp), %%rdi\n"1705"mov 0x90(%%rsp), %%rsi\n"1706"call instrumentIndirectCall\n"1707RESTORE_ALL
1708"ret\n"1709:::);1710// clang-format on1711#endif1712}
1713
1714/// This is hooking ELF's entry, it needs to save all machine state.
1715extern "C" __attribute((naked)) void __bolt_instr_start()1716{
1717#if defined(__aarch64__)1718// clang-format off1719__asm__ __volatile__(SAVE_ALL1720"bl __bolt_instr_setup\n"1721RESTORE_ALL
1722"adrp x16, __bolt_start_trampoline\n"1723"add x16, x16, #:lo12:__bolt_start_trampoline\n"1724"br x16\n"1725:::);1726// clang-format on1727#else1728// clang-format off1729__asm__ __volatile__(SAVE_ALL1730"call __bolt_instr_setup\n"1731RESTORE_ALL
1732"jmp __bolt_start_trampoline\n"1733:::);1734// clang-format on1735#endif1736}
1737
1738/// This is hooking into ELF's DT_FINI
1739extern "C" void __bolt_instr_fini() {1740#if defined(__aarch64__)1741// clang-format off1742__asm__ __volatile__(SAVE_ALL1743"adrp x16, __bolt_fini_trampoline\n"1744"add x16, x16, #:lo12:__bolt_fini_trampoline\n"1745"blr x16\n"1746RESTORE_ALL
1747:::);1748// clang-format on1749#else1750__asm__ __volatile__("call __bolt_fini_trampoline\n" :::);1751#endif1752if (__bolt_instr_sleep_time == 0) {1753int FD = openProfile();1754__bolt_instr_data_dump(FD);1755__close(FD);1756}1757DEBUG(report("Finished.\n"));1758}
1759
1760#endif1761
1762#if defined(__APPLE__)1763
1764extern "C" void __bolt_instr_data_dump() {1765ProfileWriterContext Ctx = readDescriptions();1766
1767int FD = 2;1768BumpPtrAllocator Alloc;1769const uint8_t *FuncDesc = Ctx.FuncDescriptions;1770uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();1771
1772for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {1773FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);1774Alloc.clear();1775DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));1776}1777assert(FuncDesc == (void *)Ctx.Strings,1778"FuncDesc ptr must be equal to stringtable");1779}
1780
1781// On OSX/iOS the final symbol name of an extern "C" function/variable contains
1782// one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1783extern "C"1784__attribute__((section("__TEXT,__setup")))1785__attribute__((force_align_arg_pointer))1786void _bolt_instr_setup() {1787__asm__ __volatile__(SAVE_ALL :::);1788
1789report("Hello!\n");1790
1791__asm__ __volatile__(RESTORE_ALL :::);1792}
1793
1794extern "C"1795__attribute__((section("__TEXT,__fini")))1796__attribute__((force_align_arg_pointer))1797void _bolt_instr_fini() {1798report("Bye!\n");1799__bolt_instr_data_dump();1800}
1801
1802#endif1803