2  * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 
  14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR 
  17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 
  21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  
  27 #include "CallLinkStatus.h" 
  29 #include "CallLinkInfo.h" 
  30 #include "CodeBlock.h" 
  31 #include "DFGJITCode.h" 
  32 #include "LLIntCallLinkInfo.h" 
  33 #include "JSCInlines.h" 
  34 #include <wtf/CommaPrinter.h> 
  38 static const bool verbose 
= false; 
  40 CallLinkStatus::CallLinkStatus(JSValue value
) 
  44     , m_couldTakeSlowPath(false) 
  47     if (!value 
|| !value
.isCell()) 
  50     m_structure 
= value
.asCell()->structure(); 
  52     if (!value
.asCell()->inherits(JSFunction::info())) 
  55     m_executable 
= jsCast
<JSFunction
*>(value
.asCell())->executable(); 
  58 JSFunction
* CallLinkStatus::function() const 
  60     if (!m_callTarget 
|| !m_callTarget
.isCell()) 
  63     if (!m_callTarget
.asCell()->inherits(JSFunction::info())) 
  66     return jsCast
<JSFunction
*>(m_callTarget
.asCell()); 
  69 InternalFunction
* CallLinkStatus::internalFunction() const 
  71     if (!m_callTarget 
|| !m_callTarget
.isCell()) 
  74     if (!m_callTarget
.asCell()->inherits(InternalFunction::info())) 
  77     return jsCast
<InternalFunction
*>(m_callTarget
.asCell()); 
  80 Intrinsic 
CallLinkStatus::intrinsicFor(CodeSpecializationKind kind
) const 
  85     return m_executable
->intrinsicFor(kind
); 
  88 CallLinkStatus 
CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker
& locker
, CodeBlock
* profiledBlock
, unsigned bytecodeIndex
) 
  90     UNUSED_PARAM(profiledBlock
); 
  91     UNUSED_PARAM(bytecodeIndex
); 
  93     if (profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadFunction
))) { 
  94         // We could force this to be a closure call, but instead we'll just assume that it 
  96         return takesSlowPath(); 
 102     VM
& vm 
= *profiledBlock
->vm(); 
 104     Instruction
* instruction 
= profiledBlock
->instructions().begin() + bytecodeIndex
; 
 105     OpcodeID op 
= vm
.interpreter
->getOpcodeID(instruction
[0].u
.opcode
); 
 106     if (op 
!= op_call 
&& op 
!= op_construct
) 
 107         return CallLinkStatus(); 
 109     LLIntCallLinkInfo
* callLinkInfo 
= instruction
[5].u
.callLinkInfo
; 
 111     return CallLinkStatus(callLinkInfo
->lastSeenCallee
.get()); 
 114 CallLinkStatus 
CallLinkStatus::computeFor( 
 115     CodeBlock
* profiledBlock
, unsigned bytecodeIndex
, const CallLinkInfoMap
& map
) 
 117     ConcurrentJITLocker 
locker(profiledBlock
->m_lock
); 
 119     UNUSED_PARAM(profiledBlock
); 
 120     UNUSED_PARAM(bytecodeIndex
); 
 123     if (profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadCache
)) 
 124         || profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadCacheWatchpoint
)) 
 125         || profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadExecutable
))) 
 126         return takesSlowPath(); 
 128     CallLinkInfo
* callLinkInfo 
= map
.get(CodeOrigin(bytecodeIndex
)); 
 130         return computeFromLLInt(locker
, profiledBlock
, bytecodeIndex
); 
 132     CallLinkStatus result 
= computeFor(locker
, *callLinkInfo
); 
 134         return computeFromLLInt(locker
, profiledBlock
, bytecodeIndex
); 
 136     if (profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadFunction
))) 
 137         result
.makeClosureCall(); 
 141     return CallLinkStatus(); 
 146 CallLinkStatus 
CallLinkStatus::computeFor(const ConcurrentJITLocker
&, CallLinkInfo
& callLinkInfo
) 
 148     // Note that despite requiring that the locker is held, this code is racy with respect 
 149     // to the CallLinkInfo: it may get cleared while this code runs! This is because 
 150     // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns 
 151     // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns 
 152     // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() 
 153     // itself to figure out which lock to lock. 
 155     // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow 
 156     // path count, the stub, and the target - can all be asked racily. Stubs and targets can 
 157     // only be deleted at next GC, so if we load a non-null one, then it must contain data 
 158     // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness 
 159     // is probably OK for now. 
 161     if (callLinkInfo
.slowPathCount 
>= Options::couldTakeSlowCaseMinimumCount()) 
 162         return takesSlowPath(); 
 164     if (ClosureCallStubRoutine
* stub 
= callLinkInfo
.stub
.get()) 
 165         return CallLinkStatus(stub
->executable(), stub
->structure()); 
 167     JSFunction
* target 
= callLinkInfo
.lastSeenCallee
.get(); 
 169         return CallLinkStatus(); 
 171     if (callLinkInfo
.hasSeenClosure
) 
 172         return CallLinkStatus(target
->executable(), target
->structure()); 
 174     return CallLinkStatus(target
); 
 178 void CallLinkStatus::computeDFGStatuses( 
 179     CodeBlock
* dfgCodeBlock
, CallLinkStatus::ContextMap
& map
) 
 182     RELEASE_ASSERT(dfgCodeBlock
->jitType() == JITCode::DFGJIT
); 
 183     CodeBlock
* baselineCodeBlock 
= dfgCodeBlock
->alternative(); 
 184     for (auto iter 
= dfgCodeBlock
->callLinkInfosBegin(); !!iter
; ++iter
) { 
 185         CallLinkInfo
& info 
= **iter
; 
 186         CodeOrigin codeOrigin 
= info
.codeOrigin
; 
 191         // Check if we had already previously made a terrible mistake in the FTL for this 
 192         // code origin. Note that this is approximate because we could have a monovariant 
 193         // inline in the FTL that ended up failing. We should fix that at some point by 
 194         // having data structures to track the context of frequent exits. This is currently 
 195         // challenging because it would require creating a CodeOrigin-based database in 
 196         // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the 
 198         CodeBlock
* currentBaseline 
= 
 199             baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, baselineCodeBlock
); 
 201             ConcurrentJITLocker 
locker(currentBaseline
->m_lock
); 
 203                 currentBaseline
->hasExitSite(locker
, DFG::FrequentExitSite(codeOrigin
.bytecodeIndex
, BadCache
, ExitFromFTL
)) 
 204                 || currentBaseline
->hasExitSite(locker
, DFG::FrequentExitSite(codeOrigin
.bytecodeIndex
, BadCacheWatchpoint
, ExitFromFTL
)) 
 205                 || currentBaseline
->hasExitSite(locker
, DFG::FrequentExitSite(codeOrigin
.bytecodeIndex
, BadExecutable
, ExitFromFTL
)); 
 207                 currentBaseline
->hasExitSite(locker
, DFG::FrequentExitSite(codeOrigin
.bytecodeIndex
, BadFunction
, ExitFromFTL
)); 
 211             ConcurrentJITLocker 
locker(dfgCodeBlock
->m_lock
); 
 213                 map
.add(info
.codeOrigin
, takesSlowPath()); 
 215                 CallLinkStatus status 
= computeFor(locker
, info
); 
 216                 if (status
.isSet()) { 
 218                         status
.makeClosureCall(); 
 219                     map
.add(info
.codeOrigin
, status
); 
 225     UNUSED_PARAM(dfgCodeBlock
); 
 226 #endif // ENABLE(DFG_JIT) 
 229         dataLog("Context map:\n"); 
 230         ContextMap::iterator iter 
= map
.begin(); 
 231         ContextMap::iterator end 
= map
.end(); 
 232         for (; iter 
!= end
; ++iter
) { 
 233             dataLog("    ", iter
->key
, ":\n"); 
 234             dataLog("        ", iter
->value
, "\n"); 
 239 CallLinkStatus 
CallLinkStatus::computeFor( 
 240     CodeBlock
* profiledBlock
, CodeOrigin codeOrigin
, 
 241     const CallLinkInfoMap
& baselineMap
, const CallLinkStatus::ContextMap
& dfgMap
) 
 243     auto iter 
= dfgMap
.find(codeOrigin
); 
 244     if (iter 
!= dfgMap
.end()) 
 247     return computeFor(profiledBlock
, codeOrigin
.bytecodeIndex
, baselineMap
); 
 250 void CallLinkStatus::dump(PrintStream
& out
) const 
 253         out
.print("Not Set"); 
 260         out
.print(comma
, "Statically Proved"); 
 262     if (m_couldTakeSlowPath
) 
 263         out
.print(comma
, "Could Take Slow Path"); 
 266         out
.print(comma
, "Known target: ", m_callTarget
); 
 269         out
.print(comma
, "Executable/CallHash: ", RawPointer(m_executable
)); 
 270         if (!isCompilationThread()) 
 271             out
.print("/", m_executable
->hashFor(CodeForCall
)); 
 275         out
.print(comma
, "Structure: ", RawPointer(m_structure
));