llvm-project
571 строка · 22.1 Кб
1//===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface between the inliner and a learned model.
10// It delegates model evaluation to either the AOT compiled model (the
11// 'release' mode) or a runtime-loaded model (the 'development' case).
12//
13//===----------------------------------------------------------------------===//
14#include "llvm/Analysis/MLInlineAdvisor.h"15#include "llvm/ADT/SCCIterator.h"16#include "llvm/Analysis/AssumptionCache.h"17#include "llvm/Analysis/BlockFrequencyInfo.h"18#include "llvm/Analysis/CallGraph.h"19#include "llvm/Analysis/FunctionPropertiesAnalysis.h"20#include "llvm/Analysis/InlineCost.h"21#include "llvm/Analysis/InlineModelFeatureMaps.h"22#include "llvm/Analysis/InteractiveModelRunner.h"23#include "llvm/Analysis/LazyCallGraph.h"24#include "llvm/Analysis/LoopInfo.h"25#include "llvm/Analysis/MLModelRunner.h"26#include "llvm/Analysis/OptimizationRemarkEmitter.h"27#include "llvm/Analysis/ProfileSummaryInfo.h"28#include "llvm/Analysis/ReleaseModeModelRunner.h"29#include "llvm/Analysis/TargetTransformInfo.h"30#include "llvm/IR/Dominators.h"31#include "llvm/IR/InstIterator.h"32#include "llvm/IR/Module.h"33#include "llvm/IR/PassManager.h"34#include "llvm/Support/CommandLine.h"35
36using namespace llvm;37
38static cl::opt<std::string> InteractiveChannelBaseName(39"inliner-interactive-channel-base", cl::Hidden,40cl::desc(41"Base file path for the interactive mode. The incoming filename should "42"have the name <inliner-interactive-channel-base>.in, while the "43"outgoing name should be <inliner-interactive-channel-base>.out"));44static const std::string InclDefaultMsg =45(Twine("In interactive mode, also send the default policy decision: ") +46DefaultDecisionName + ".")47.str();48static cl::opt<bool>49InteractiveIncludeDefault("inliner-interactive-include-default", cl::Hidden,50cl::desc(InclDefaultMsg));51
52enum class SkipMLPolicyCriteria { Never, IfCallerIsNotCold };53
54static cl::opt<SkipMLPolicyCriteria> SkipPolicy(55"ml-inliner-skip-policy", cl::Hidden, cl::init(SkipMLPolicyCriteria::Never),56cl::values(clEnumValN(SkipMLPolicyCriteria::Never, "never", "never"),57clEnumValN(SkipMLPolicyCriteria::IfCallerIsNotCold,58"if-caller-not-cold", "if the caller is not cold")));59
60static cl::opt<std::string> ModelSelector("ml-inliner-model-selector",61cl::Hidden, cl::init(""));62
63#if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)64// codegen-ed file
65#include "InlinerSizeModel.h" // NOLINT66using CompiledModelType = llvm::InlinerSizeModel;67#else68using CompiledModelType = NoopSavedModelImpl;69#endif70
71std::unique_ptr<InlineAdvisor>72llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM,73std::function<bool(CallBase &)> GetDefaultAdvice) {74if (!llvm::isEmbeddedModelEvaluatorValid<CompiledModelType>() &&75InteractiveChannelBaseName.empty())76return nullptr;77std::unique_ptr<MLModelRunner> AOTRunner;78if (InteractiveChannelBaseName.empty())79AOTRunner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(80M.getContext(), FeatureMap, DecisionName,81EmbeddedModelRunnerOptions().setModelSelector(ModelSelector));82else {83auto Features = FeatureMap;84if (InteractiveIncludeDefault)85Features.push_back(DefaultDecisionSpec);86AOTRunner = std::make_unique<InteractiveModelRunner>(87M.getContext(), Features, InlineDecisionSpec,88InteractiveChannelBaseName + ".out",89InteractiveChannelBaseName + ".in");90}91return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner),92GetDefaultAdvice);93}
94
95#define DEBUG_TYPE "inline-ml"96
97static cl::opt<float> SizeIncreaseThreshold(98"ml-advisor-size-increase-threshold", cl::Hidden,99cl::desc("Maximum factor by which expected native size may increase before "100"blocking any further inlining."),101cl::init(2.0));102
103static cl::opt<bool> KeepFPICache(104"ml-advisor-keep-fpi-cache", cl::Hidden,105cl::desc(106"For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"),107cl::init(false));108
109// clang-format off
110const std::vector<TensorSpec> llvm::FeatureMap{111#define POPULATE_NAMES(DTYPE, SHAPE, NAME, __) TensorSpec::createSpec<DTYPE>(#NAME, SHAPE),112// InlineCost features - these must come first
113INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)114
115// Non-cost features
116INLINE_FEATURE_ITERATOR(POPULATE_NAMES)117#undef POPULATE_NAMES118};119// clang-format on
120
121const char *const llvm::DecisionName = "inlining_decision";122const TensorSpec llvm::InlineDecisionSpec =123TensorSpec::createSpec<int64_t>(DecisionName, {1});124const char *const llvm::DefaultDecisionName = "inlining_default";125const TensorSpec llvm::DefaultDecisionSpec =126TensorSpec::createSpec<int64_t>(DefaultDecisionName, {1});127const char *const llvm::RewardName = "delta_size";128
129CallBase *getInlinableCS(Instruction &I) {130if (auto *CS = dyn_cast<CallBase>(&I))131if (Function *Callee = CS->getCalledFunction()) {132if (!Callee->isDeclaration()) {133return CS;134}135}136return nullptr;137}
138
139MLInlineAdvisor::MLInlineAdvisor(140Module &M, ModuleAnalysisManager &MAM,141std::unique_ptr<MLModelRunner> Runner,142std::function<bool(CallBase &)> GetDefaultAdvice)143: InlineAdvisor(144M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),145ModelRunner(std::move(Runner)), GetDefaultAdvice(GetDefaultAdvice),146CG(MAM.getResult<LazyCallGraphAnalysis>(M)),147InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize),148PSI(MAM.getResult<ProfileSummaryAnalysis>(M)) {149assert(ModelRunner);150ModelRunner->switchContext("");151// Extract the 'call site height' feature - the position of a call site152// relative to the farthest statically reachable SCC node. We don't mutate153// this value while inlining happens. Empirically, this feature proved154// critical in behavioral cloning - i.e. training a model to mimic the manual155// heuristic's decisions - and, thus, equally important for training for156// improvement.157CallGraph CGraph(M);158for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {159const std::vector<CallGraphNode *> &CGNodes = *I;160unsigned Level = 0;161for (auto *CGNode : CGNodes) {162Function *F = CGNode->getFunction();163if (!F || F->isDeclaration())164continue;165for (auto &I : instructions(F)) {166if (auto *CS = getInlinableCS(I)) {167auto *Called = CS->getCalledFunction();168auto Pos = FunctionLevels.find(&CG.get(*Called));169// In bottom up traversal, an inlinable callee is either in the170// same SCC, or to a function in a visited SCC. So not finding its171// level means we haven't visited it yet, meaning it's in this SCC.172if (Pos == FunctionLevels.end())173continue;174Level = std::max(Level, Pos->second + 1);175}176}177}178for (auto *CGNode : CGNodes) {179Function *F = CGNode->getFunction();180if (F && !F->isDeclaration())181FunctionLevels[&CG.get(*F)] = Level;182}183}184for (auto KVP : FunctionLevels) {185AllNodes.insert(KVP.first);186EdgeCount += getLocalCalls(KVP.first->getFunction());187}188NodeCount = AllNodes.size();189}
190
191unsigned MLInlineAdvisor::getInitialFunctionLevel(const Function &F) const {192return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;193}
194
195void MLInlineAdvisor::onPassEntry(LazyCallGraph::SCC *CurSCC) {196if (!CurSCC || ForceStop)197return;198FPICache.clear();199// Function passes executed between InlinerPass runs may have changed the200// module-wide features.201// The cgscc pass manager rules are such that:202// - if a pass leads to merging SCCs, then the pipeline is restarted on the203// merged SCC204// - if a pass leads to splitting the SCC, then we continue with one of the205// splits206// This means that the NodesInLastSCC is a superset (not strict) of the nodes207// that subsequent passes would have processed208// - in addition, if new Nodes were created by a pass (e.g. CoroSplit),209// they'd be adjacent to Nodes in the last SCC. So we just need to check the210// boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't211// care about the nature of the Edge (call or ref). `FunctionLevels`-wise, we212// record them at the same level as the original node (this is a choice, may213// need revisiting).214// - nodes are only deleted at the end of a call graph walk where they are215// batch deleted, so we shouldn't see any dead nodes here.216while (!NodesInLastSCC.empty()) {217const auto *N = *NodesInLastSCC.begin();218assert(!N->isDead());219NodesInLastSCC.erase(N);220EdgeCount += getLocalCalls(N->getFunction());221const auto NLevel = FunctionLevels.at(N);222for (const auto &E : *(*N)) {223const auto *AdjNode = &E.getNode();224assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());225auto I = AllNodes.insert(AdjNode);226// We've discovered a new function.227if (I.second) {228++NodeCount;229NodesInLastSCC.insert(AdjNode);230FunctionLevels[AdjNode] = NLevel;231}232}233}234
235EdgeCount -= EdgesOfLastSeenNodes;236EdgesOfLastSeenNodes = 0;237
238// (Re)use NodesInLastSCC to remember the nodes in the SCC right now,239// in case the SCC is split before onPassExit and some nodes are split out240assert(NodesInLastSCC.empty());241for (const auto &N : *CurSCC)242NodesInLastSCC.insert(&N);243}
244
245void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *CurSCC) {246// No need to keep this around - function passes will invalidate it.247if (!KeepFPICache)248FPICache.clear();249if (!CurSCC || ForceStop)250return;251// Keep track of the nodes and edges we last saw. Then, in onPassEntry,252// we update the node count and edge count from the subset of these nodes that253// survived.254EdgesOfLastSeenNodes = 0;255
256// Check on nodes that were in SCC onPassEntry257for (const LazyCallGraph::Node *N : NodesInLastSCC) {258assert(!N->isDead());259EdgesOfLastSeenNodes += getLocalCalls(N->getFunction());260}261
262// Check on nodes that may have got added to SCC263for (const auto &N : *CurSCC) {264assert(!N.isDead());265auto I = NodesInLastSCC.insert(&N);266if (I.second)267EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());268}269assert(NodeCount >= NodesInLastSCC.size());270assert(EdgeCount >= EdgesOfLastSeenNodes);271}
272
273int64_t MLInlineAdvisor::getLocalCalls(Function &F) {274return getCachedFPI(F).DirectCallsToDefinedFunctions;275}
276
277// Update the internal state of the advisor, and force invalidate feature
278// analysis. Currently, we maintain minimal (and very simple) global state - the
279// number of functions and the number of static calls. We also keep track of the
280// total IR size in this module, to stop misbehaving policies at a certain bloat
281// factor (SizeIncreaseThreshold)
282void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,283bool CalleeWasDeleted) {284assert(!ForceStop);285Function *Caller = Advice.getCaller();286Function *Callee = Advice.getCallee();287// The caller features aren't valid anymore.288{289PreservedAnalyses PA = PreservedAnalyses::all();290PA.abandon<FunctionPropertiesAnalysis>();291PA.abandon<DominatorTreeAnalysis>();292PA.abandon<LoopAnalysis>();293FAM.invalidate(*Caller, PA);294}295Advice.updateCachedCallerFPI(FAM);296int64_t IRSizeAfter =297getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);298CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);299if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)300ForceStop = true;301
302// We can delta-update module-wide features. We know the inlining only changed303// the caller, and maybe the callee (by deleting the latter).304// Nodes are simple to update.305// For edges, we 'forget' the edges that the caller and callee used to have306// before inlining, and add back what they currently have together.307int64_t NewCallerAndCalleeEdges =308getCachedFPI(*Caller).DirectCallsToDefinedFunctions;309
310// A dead function's node is not actually removed from the call graph until311// the end of the call graph walk, but the node no longer belongs to any valid312// SCC.313if (CalleeWasDeleted) {314--NodeCount;315NodesInLastSCC.erase(CG.lookup(*Callee));316DeadFunctions.insert(Callee);317} else {318NewCallerAndCalleeEdges +=319getCachedFPI(*Callee).DirectCallsToDefinedFunctions;320}321EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);322assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);323}
324
325int64_t MLInlineAdvisor::getModuleIRSize() const {326int64_t Ret = 0;327for (auto &F : M)328if (!F.isDeclaration())329Ret += getIRSize(F);330return Ret;331}
332
333FunctionPropertiesInfo &MLInlineAdvisor::getCachedFPI(Function &F) const {334auto InsertPair =335FPICache.insert(std::make_pair(&F, FunctionPropertiesInfo()));336if (!InsertPair.second)337return InsertPair.first->second;338InsertPair.first->second = FAM.getResult<FunctionPropertiesAnalysis>(F);339return InsertPair.first->second;340}
341
342std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {343if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))344return Skip;345
346auto &Caller = *CB.getCaller();347auto &Callee = *CB.getCalledFunction();348
349auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {350return FAM.getResult<AssumptionAnalysis>(F);351};352auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee);353auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);354
355if (SkipPolicy == SkipMLPolicyCriteria::IfCallerIsNotCold) {356if (!PSI.isFunctionEntryCold(&Caller))357return std::make_unique<InlineAdvice>(this, CB, ORE,358GetDefaultAdvice(CB));359}360auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE);361// If this is a "never inline" case, there won't be any changes to internal362// state we need to track, so we can just return the base InlineAdvice, which363// will do nothing interesting.364// Same thing if this is a recursive case.365if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never ||366&Caller == &Callee)367return getMandatoryAdvice(CB, false);368
369bool Mandatory =370MandatoryKind == InlineAdvisor::MandatoryInliningKind::Always;371
372// If we need to stop, we won't want to track anymore any state changes, so373// we just return the base InlineAdvice, which acts as a noop.374if (ForceStop) {375ORE.emit([&] {376return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB)377<< "Won't attempt inlining because module size grew too much.";378});379return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory);380}381
382int CostEstimate = 0;383if (!Mandatory) {384auto IsCallSiteInlinable =385llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache);386if (!IsCallSiteInlinable) {387// We can't inline this for correctness reasons, so return the base388// InlineAdvice, as we don't care about tracking any state changes (which389// won't happen).390return std::make_unique<InlineAdvice>(this, CB, ORE, false);391}392CostEstimate = *IsCallSiteInlinable;393}394
395const auto CostFeatures =396llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);397if (!CostFeatures) {398return std::make_unique<InlineAdvice>(this, CB, ORE, false);399}400
401if (Mandatory)402return getMandatoryAdvice(CB, true);403
404auto NrCtantParams = 0;405for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {406NrCtantParams += (isa<Constant>(*I));407}408
409auto &CallerBefore = getCachedFPI(Caller);410auto &CalleeBefore = getCachedFPI(Callee);411
412*ModelRunner->getTensor<int64_t>(FeatureIndex::callee_basic_block_count) =413CalleeBefore.BasicBlockCount;414*ModelRunner->getTensor<int64_t>(FeatureIndex::callsite_height) =415getInitialFunctionLevel(Caller);416*ModelRunner->getTensor<int64_t>(FeatureIndex::node_count) = NodeCount;417*ModelRunner->getTensor<int64_t>(FeatureIndex::nr_ctant_params) =418NrCtantParams;419*ModelRunner->getTensor<int64_t>(FeatureIndex::edge_count) = EdgeCount;420*ModelRunner->getTensor<int64_t>(FeatureIndex::caller_users) =421CallerBefore.Uses;422*ModelRunner->getTensor<int64_t>(423FeatureIndex::caller_conditionally_executed_blocks) =424CallerBefore.BlocksReachedFromConditionalInstruction;425*ModelRunner->getTensor<int64_t>(FeatureIndex::caller_basic_block_count) =426CallerBefore.BasicBlockCount;427*ModelRunner->getTensor<int64_t>(428FeatureIndex::callee_conditionally_executed_blocks) =429CalleeBefore.BlocksReachedFromConditionalInstruction;430*ModelRunner->getTensor<int64_t>(FeatureIndex::callee_users) =431CalleeBefore.Uses;432*ModelRunner->getTensor<int64_t>(FeatureIndex::cost_estimate) = CostEstimate;433*ModelRunner->getTensor<int64_t>(FeatureIndex::is_callee_avail_external) =434Callee.hasAvailableExternallyLinkage();435*ModelRunner->getTensor<int64_t>(FeatureIndex::is_caller_avail_external) =436Caller.hasAvailableExternallyLinkage();437
438// Add the cost features439for (size_t I = 0;440I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {441*ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature(442static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I);443}444// This one would have been set up to be right at the end.445if (!InteractiveChannelBaseName.empty() && InteractiveIncludeDefault)446*ModelRunner->getTensor<int64_t>(InlineCostFeatureIndex::NumberOfFeatures) =447GetDefaultAdvice(CB);448return getAdviceFromModel(CB, ORE);449}
450
451std::unique_ptr<MLInlineAdvice>452MLInlineAdvisor::getAdviceFromModel(CallBase &CB,453OptimizationRemarkEmitter &ORE) {454return std::make_unique<MLInlineAdvice>(455this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>()));456}
457
458std::unique_ptr<InlineAdvice>459MLInlineAdvisor::getSkipAdviceIfUnreachableCallsite(CallBase &CB) {460if (!FAM.getResult<DominatorTreeAnalysis>(*CB.getCaller())461.isReachableFromEntry(CB.getParent()))462return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), false);463return nullptr;464}
465
466std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,467bool Advice) {468// Make sure we track inlinings in all cases - mandatory or not.469if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))470return Skip;471if (Advice && !ForceStop)472return getMandatoryAdviceImpl(CB);473
474// If this is a "never inline" case, there won't be any changes to internal475// state we need to track, so we can just return the base InlineAdvice, which476// will do nothing interesting.477// Same if we are forced to stop - we don't track anymore.478return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);479}
480
481std::unique_ptr<MLInlineAdvice>482MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {483return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);484}
485
486void MLInlineAdvisor::print(raw_ostream &OS) const {487OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount488<< " EdgesOfLastSeenNodes: " << EdgesOfLastSeenNodes << "\n";489OS << "[MLInlineAdvisor] FPI:\n";490for (auto I : FPICache) {491OS << I.first->getName() << ":\n";492I.second.print(OS);493OS << "\n";494}495OS << "\n";496OS << "[MLInlineAdvisor] FuncLevels:\n";497for (auto I : FunctionLevels)498OS << (DeadFunctions.contains(&I.first->getFunction())499? "<deleted>"500: I.first->getFunction().getName())501<< " : " << I.second << "\n";502
503OS << "\n";504}
505
506MLInlineAdvice::MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB,507OptimizationRemarkEmitter &ORE,508bool Recommendation)509: InlineAdvice(Advisor, CB, ORE, Recommendation),510CallerIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Caller)),511CalleeIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Callee)),512CallerAndCalleeEdges(Advisor->isForcedToStop()513? 0514: (Advisor->getLocalCalls(*Caller) +515Advisor->getLocalCalls(*Callee))),516PreInlineCallerFPI(Advisor->getCachedFPI(*Caller)) {517if (Recommendation)518FPU.emplace(Advisor->getCachedFPI(*getCaller()), CB);519}
520
521void MLInlineAdvice::reportContextForRemark(522DiagnosticInfoOptimizationBase &OR) {523using namespace ore;524OR << NV("Callee", Callee->getName());525for (size_t I = 0; I < NumberOfFeatures; ++I)526OR << NV(FeatureMap[I].name(),527*getAdvisor()->getModelRunner().getTensor<int64_t>(I));528OR << NV("ShouldInline", isInliningRecommended());529}
530
531void MLInlineAdvice::updateCachedCallerFPI(FunctionAnalysisManager &FAM) const {532FPU->finish(FAM);533}
534
535void MLInlineAdvice::recordInliningImpl() {536ORE.emit([&]() {537OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);538reportContextForRemark(R);539return R;540});541getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false);542}
543
544void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() {545ORE.emit([&]() {546OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,547Block);548reportContextForRemark(R);549return R;550});551getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true);552}
553
554void MLInlineAdvice::recordUnsuccessfulInliningImpl(555const InlineResult &Result) {556getAdvisor()->getCachedFPI(*Caller) = PreInlineCallerFPI;557ORE.emit([&]() {558OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful",559DLoc, Block);560reportContextForRemark(R);561return R;562});563}
564void MLInlineAdvice::recordUnattemptedInliningImpl() {565assert(!FPU);566ORE.emit([&]() {567OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block);568reportContextForRemark(R);569return R;570});571}
572