X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/bytecode/CallLinkStatus.cpp?ds=sidebyside diff --git a/bytecode/CallLinkStatus.cpp b/bytecode/CallLinkStatus.cpp index 509b15a..103a7f2 100644 --- a/bytecode/CallLinkStatus.cpp +++ b/bytecode/CallLinkStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,101 +26,294 @@ #include "config.h" #include "CallLinkStatus.h" +#include "CallLinkInfo.h" #include "CodeBlock.h" +#include "DFGJITCode.h" #include "LLIntCallLinkInfo.h" -#include "Operations.h" +#include "JSCInlines.h" #include +#include namespace JSC { +static const bool verbose = false; + CallLinkStatus::CallLinkStatus(JSValue value) - : m_callTarget(value) - , m_executable(0) - , m_structure(0) - , m_couldTakeSlowPath(false) + : m_couldTakeSlowPath(false) , m_isProved(false) { - if (!value || !value.isCell()) - return; - - m_structure = value.asCell()->structure(); - - if (!value.asCell()->inherits(&JSFunction::s_info)) + if (!value || !value.isCell()) { + m_couldTakeSlowPath = true; return; + } - m_executable = jsCast(value.asCell())->executable(); + m_variants.append(CallVariant(value.asCell())); } -JSFunction* CallLinkStatus::function() const +CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) { - if (!m_callTarget || !m_callTarget.isCell()) - return 0; + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#if ENABLE(DFG_JIT) + if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) { + // We could force this to be a closure call, but instead we'll just assume that it + // takes slow path. + return takesSlowPath(); + } +#else + UNUSED_PARAM(locker); +#endif + + VM& vm = *profiledBlock->vm(); + + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; + OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode); + if (op != op_call && op != op_construct) + return CallLinkStatus(); - if (!m_callTarget.asCell()->inherits(&JSFunction::s_info)) - return 0; + LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo; - return jsCast(m_callTarget.asCell()); + return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); } -InternalFunction* CallLinkStatus::internalFunction() const +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map) { - if (!m_callTarget || !m_callTarget.isCell()) - return 0; + ConcurrentJITLocker locker(profiledBlock->m_lock); - if (!m_callTarget.asCell()->inherits(&InternalFunction::s_info)) - return 0; + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(map); +#if ENABLE(DFG_JIT) + ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex); - return jsCast(m_callTarget.asCell()); + CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)); + if (!callLinkInfo) { + if (exitSiteData.m_takesSlowPath) + return takesSlowPath(); + return computeFromLLInt(locker, profiledBlock, bytecodeIndex); + } + + return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData); +#else + return CallLinkStatus(); +#endif } -Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const +CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) { - if (!m_executable) - return NoIntrinsic; + ExitSiteData exitSiteData; + +#if ENABLE(DFG_JIT) + exitSiteData.m_takesSlowPath = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable)); + exitSiteData.m_badFunction = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell)); +#else + UNUSED_PARAM(locker); + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#endif - return m_executable->intrinsicFor(kind); + return exitSiteData; } -CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex) +#if ENABLE(JIT) +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo) { + // We don't really need this, but anytime we have to debug this code, it becomes indispensable. UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); -#if ENABLE(LLINT) - Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - LLIntCallLinkInfo* callLinkInfo = instruction[4].u.callLinkInfo; - return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); -#else - return CallLinkStatus(); -#endif + CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo); + result.m_maxNumArguments = callLinkInfo.maxNumArguments(); + return result; } -CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex) +CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( + const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo) { - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - if (!profiledBlock->numberOfCallLinkInfos()) - return computeFromLLInt(profiledBlock, bytecodeIndex); + if (callLinkInfo.clearedByGC()) + return takesSlowPath(); + + // Note that despite requiring that the locker is held, this code is racy with respect + // to the CallLinkInfo: it may get cleared while this code runs! This is because + // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns + // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns + // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() + // itself to figure out which lock to lock. + // + // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow + // path count, the stub, and the target - can all be asked racily. Stubs and targets can + // only be deleted at next GC, so if we load a non-null one, then it must contain data + // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness + // is probably OK for now. - if (profiledBlock->couldTakeSlowCase(bytecodeIndex)) - return CallLinkStatus::takesSlowPath(); + // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive + // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is + // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative + // fencing in place to make sure that we see the variants list after construction. + if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) { + WTF::loadLoadFence(); + + CallEdgeList edges = stub->edges(); + + // Now that we've loaded the edges list, there are no further concurrency concerns. We will + // just manipulate and prune this list to our liking - mostly removing entries that are too + // infrequent and ensuring that it's sorted in descending order of frequency. + + RELEASE_ASSERT(edges.size()); + + std::sort( + edges.begin(), edges.end(), + [] (CallEdge a, CallEdge b) { + return a.count() > b.count(); + }); + RELEASE_ASSERT(edges.first().count() >= edges.last().count()); + + double totalCallsToKnown = 0; + double totalCallsToUnknown = callLinkInfo.slowPathCount(); + CallVariantList variants; + for (size_t i = 0; i < edges.size(); ++i) { + CallEdge edge = edges[i]; + // If the call is at the tail of the distribution, then we don't optimize it and we + // treat it as if it was a call to something unknown. We define the tail as being either + // a call that doesn't belong to the N most frequent callees (N = + // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too + // small. + if (i >= Options::maxPolymorphicCallVariantsForInlining() + || edge.count() < Options::frequentCallThreshold()) + totalCallsToUnknown += edge.count(); + else { + totalCallsToKnown += edge.count(); + variants.append(edge.callee()); + } + } + + // Bail if we didn't find any calls that qualified. + RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size()); + if (variants.isEmpty()) + return takesSlowPath(); + + // We require that the distribution of callees is skewed towards a handful of common ones. + if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate()) + return takesSlowPath(); + + RELEASE_ASSERT(totalCallsToKnown); + RELEASE_ASSERT(variants.size()); + + CallLinkStatus result; + result.m_variants = variants; + result.m_couldTakeSlowPath = !!totalCallsToUnknown; + return result; + } - CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex); - if (callLinkInfo.stub) - return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure()); + CallLinkStatus result; - JSFunction* target = callLinkInfo.lastSeenCallee.get(); - if (!target) - return computeFromLLInt(profiledBlock, bytecodeIndex); + if (JSFunction* target = callLinkInfo.lastSeenCallee()) { + CallVariant variant(target); + if (callLinkInfo.hasSeenClosure()) + variant = variant.despecifiedClosure(); + result.m_variants.append(variant); + } - if (callLinkInfo.hasSeenClosure) - return CallLinkStatus(target->executable(), target->structure()); + result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount(); - return CallLinkStatus(target); -#else - return CallLinkStatus(); + return result; +} + +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo, + ExitSiteData exitSiteData) +{ + CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo); + if (exitSiteData.m_badFunction) + result.makeClosureCall(); + if (exitSiteData.m_takesSlowPath) + result.m_couldTakeSlowPath = true; + + return result; +} #endif + +void CallLinkStatus::computeDFGStatuses( + CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map) +{ +#if ENABLE(DFG_JIT) + RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT); + CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative(); + for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) { + CallLinkInfo& info = **iter; + CodeOrigin codeOrigin = info.codeOrigin(); + + // Check if we had already previously made a terrible mistake in the FTL for this + // code origin. Note that this is approximate because we could have a monovariant + // inline in the FTL that ended up failing. We should fix that at some point by + // having data structures to track the context of frequent exits. This is currently + // challenging because it would require creating a CodeOrigin-based database in + // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the + // InlineCallFrames. + CodeBlock* currentBaseline = + baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock); + ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(currentBaseline->m_lock); + exitSiteData = computeExitSiteData( + locker, currentBaseline, codeOrigin.bytecodeIndex); + } + + { + ConcurrentJITLocker locker(dfgCodeBlock->m_lock); + map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData)); + } + } +#else + UNUSED_PARAM(dfgCodeBlock); +#endif // ENABLE(DFG_JIT) + + if (verbose) { + dataLog("Context map:\n"); + ContextMap::iterator iter = map.begin(); + ContextMap::iterator end = map.end(); + for (; iter != end; ++iter) { + dataLog(" ", iter->key, ":\n"); + dataLog(" ", iter->value, "\n"); + } + } +} + +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, CodeOrigin codeOrigin, + const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap) +{ + auto iter = dfgMap.find(codeOrigin); + if (iter != dfgMap.end()) + return iter->value; + + return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap); +} + +void CallLinkStatus::setProvenConstantCallee(CallVariant variant) +{ + m_variants = CallVariantList{ variant }; + m_couldTakeSlowPath = false; + m_isProved = true; +} + +bool CallLinkStatus::isClosureCall() const +{ + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].isClosureCall()) + return true; + } + return false; +} + +void CallLinkStatus::makeClosureCall() +{ + m_variants = despecifiedVariantList(m_variants); } void CallLinkStatus::dump(PrintStream& out) const @@ -138,14 +331,11 @@ void CallLinkStatus::dump(PrintStream& out) const if (m_couldTakeSlowPath) out.print(comma, "Could Take Slow Path"); - if (m_callTarget) - out.print(comma, "Known target: ", m_callTarget); - - if (m_executable) - out.print(comma, "Executable/CallHash: ", RawPointer(m_executable), "/", m_executable->hashFor(CodeForCall)); + if (!m_variants.isEmpty()) + out.print(comma, listDump(m_variants)); - if (m_structure) - out.print(comma, "Structure: ", RawPointer(m_structure)); + if (m_maxNumArguments) + out.print(comma, "maxNumArguments = ", m_maxNumArguments); } } // namespace JSC