2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 
  14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR 
  17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 
  21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  
  27 #include "DFGOSRExitCompilerCommon.h" 
  31 #include "DFGJITCode.h" 
  32 #include "DFGOperations.h" 
  34 #include "JSCJSValueInlines.h" 
  35 #include "JSCInlines.h" 
  37 namespace JSC 
{ namespace DFG 
{ 
  39 void handleExitCounts(CCallHelpers
& jit
, const OSRExitBase
& exit
) 
  41     jit
.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit
.m_count
)); 
  43     jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.codeBlock()), GPRInfo::regT0
); 
  45     AssemblyHelpers::Jump tooFewFails
; 
  47     jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2
); 
  48     jit
.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2
); 
  49     jit
.store32(GPRInfo::regT2
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfOSRExitCounter())); 
  51     jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.baselineCodeBlock()), GPRInfo::regT0
); 
  52     AssemblyHelpers::Jump reoptimizeNow 
= jit
.branch32( 
  53         AssemblyHelpers::GreaterThanOrEqual
, 
  54         AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter()), 
  55         AssemblyHelpers::TrustedImm32(0)); 
  57     // We want to figure out if there's a possibility that we're in a loop. For the outermost 
  58     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger 
  59     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The 
  60     // problem is the inlined functions, which might also have loops, but whose baseline versions 
  61     // don't know where to look for the exit count. Figure out if those loops are severe enough 
  62     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. 
  63     // Otherwise, we should use the normal reoptimization trigger. 
  65     AssemblyHelpers::JumpList loopThreshold
; 
  67     for (InlineCallFrame
* inlineCallFrame 
= exit
.m_codeOrigin
.inlineCallFrame
; inlineCallFrame
; inlineCallFrame 
= inlineCallFrame
->caller
.inlineCallFrame
) { 
  70                 AssemblyHelpers::NonZero
, 
  71                 AssemblyHelpers::AbsoluteAddress( 
  72                     inlineCallFrame
->executable
->addressOfDidTryToEnterInLoop()))); 
  76         AssemblyHelpers::TrustedImm32(jit
.codeBlock()->exitCountThresholdForReoptimization()), 
  79     if (!loopThreshold
.empty()) { 
  80         AssemblyHelpers::Jump done 
= jit
.jump(); 
  82         loopThreshold
.link(&jit
); 
  84             AssemblyHelpers::TrustedImm32( 
  85                 jit
.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), 
  91     tooFewFails 
= jit
.branch32(AssemblyHelpers::BelowOrEqual
, GPRInfo::regT2
, GPRInfo::regT1
); 
  93     reoptimizeNow
.link(&jit
); 
  95     // Reoptimize as soon as possible. 
  96 #if !NUMBER_OF_ARGUMENT_REGISTERS 
  97     jit
.poke(GPRInfo::regT0
); 
  98     jit
.poke(AssemblyHelpers::TrustedImmPtr(&exit
), 1); 
 100     jit
.move(GPRInfo::regT0
, GPRInfo::argumentGPR0
); 
 101     jit
.move(AssemblyHelpers::TrustedImmPtr(&exit
), GPRInfo::argumentGPR1
); 
 103     jit
.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast
<void*>(triggerReoptimizationNow
)), GPRInfo::nonArgGPR0
); 
 104     jit
.call(GPRInfo::nonArgGPR0
); 
 105     AssemblyHelpers::Jump doneAdjusting 
= jit
.jump(); 
 107     tooFewFails
.link(&jit
); 
 109     // Adjust the execution counter such that the target is to only optimize after a while. 
 110     int32_t activeThreshold 
= 
 111         jit
.baselineCodeBlock()->adjustedCounterValue( 
 112             Options::thresholdForOptimizeAfterLongWarmUp()); 
 113     int32_t targetValue 
= applyMemoryUsageHeuristicsAndConvertToInt( 
 114         activeThreshold
, jit
.baselineCodeBlock()); 
 115     int32_t clippedValue
; 
 116     switch (jit
.codeBlock()->jitType()) { 
 117     case JITCode::DFGJIT
: 
 118         clippedValue 
= BaselineExecutionCounter::clippedThreshold(jit
.codeBlock()->globalObject(), targetValue
); 
 120     case JITCode::FTLJIT
: 
 121         clippedValue 
= UpperTierExecutionCounter::clippedThreshold(jit
.codeBlock()->globalObject(), targetValue
); 
 124         RELEASE_ASSERT_NOT_REACHED(); 
 125 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) 
 126         clippedValue 
= 0; // Make some compilers, and mhahnenberg, happy. 
 130     jit
.store32(AssemblyHelpers::TrustedImm32(-clippedValue
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter())); 
 131     jit
.store32(AssemblyHelpers::TrustedImm32(activeThreshold
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionActiveThreshold())); 
 132     jit
.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue
)), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionTotalCount())); 
 134     doneAdjusting
.link(&jit
); 
 137 void reifyInlinedCallFrames(CCallHelpers
& jit
, const OSRExitBase
& exit
) 
 139     ASSERT(jit
.baselineCodeBlock()->jitType() == JITCode::BaselineJIT
); 
 140     jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)JSStack::CodeBlock
)); 
 142     CodeOrigin codeOrigin
; 
 143     for (codeOrigin 
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin 
= codeOrigin
.inlineCallFrame
->caller
) { 
 144         InlineCallFrame
* inlineCallFrame 
= codeOrigin
.inlineCallFrame
; 
 145         CodeBlock
* baselineCodeBlock 
= jit
.baselineCodeBlockFor(codeOrigin
); 
 146         CodeBlock
* baselineCodeBlockForCaller 
= jit
.baselineCodeBlockFor(inlineCallFrame
->caller
); 
 147         void* jumpTarget 
= nullptr; 
 148         void* trueReturnPC 
= nullptr; 
 150         unsigned callBytecodeIndex 
= inlineCallFrame
->caller
.bytecodeIndex
; 
 152         switch (inlineCallFrame
->kind
) { 
 153         case InlineCallFrame::Call
: 
 154         case InlineCallFrame::Construct
: 
 155         case InlineCallFrame::CallVarargs
: 
 156         case InlineCallFrame::ConstructVarargs
: { 
 157             CallLinkInfo
* callLinkInfo 
= 
 158                 baselineCodeBlockForCaller
->getCallLinkInfoForBytecodeIndex(callBytecodeIndex
); 
 159             RELEASE_ASSERT(callLinkInfo
); 
 161             jumpTarget 
= callLinkInfo
->callReturnLocation().executableAddress(); 
 165         case InlineCallFrame::GetterCall
: 
 166         case InlineCallFrame::SetterCall
: { 
 167             StructureStubInfo
* stubInfo 
= 
 168                 baselineCodeBlockForCaller
->findStubInfo(CodeOrigin(callBytecodeIndex
)); 
 169             RELEASE_ASSERT(stubInfo
); 
 171             switch (inlineCallFrame
->kind
) { 
 172             case InlineCallFrame::GetterCall
: 
 173                 jumpTarget 
= jit
.vm()->getCTIStub(baselineGetterReturnThunkGenerator
).code().executableAddress(); 
 175             case InlineCallFrame::SetterCall
: 
 176                 jumpTarget 
= jit
.vm()->getCTIStub(baselineSetterReturnThunkGenerator
).code().executableAddress(); 
 179                 RELEASE_ASSERT_NOT_REACHED(); 
 183             trueReturnPC 
= stubInfo
->callReturnLocation
.labelAtOffset( 
 184                 stubInfo
->patch
.deltaCallToDone
).executableAddress(); 
 188         GPRReg callerFrameGPR
; 
 189         if (inlineCallFrame
->caller
.inlineCallFrame
) { 
 190             jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset 
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
); 
 191             callerFrameGPR 
= GPRInfo::regT3
; 
 193             callerFrameGPR 
= GPRInfo::callFrameRegister
; 
 195         jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::addressForByteOffset(inlineCallFrame
->returnPCOffset())); 
 197             jit
.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC
), AssemblyHelpers::addressFor(inlineCallFrame
->stackOffset 
+ virtualRegisterForArgument(inlineCallFrame
->arguments
.size()).offset())); 
 199         jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::CodeBlock
))); 
 200         if (!inlineCallFrame
->isVarargs()) 
 201             jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::ArgumentCount
))); 
 203         jit
.store64(callerFrameGPR
, AssemblyHelpers::addressForByteOffset(inlineCallFrame
->callerFrameOffset())); 
 204         uint32_t locationBits 
= CallFrame::Location::encodeAsBytecodeOffset(codeOrigin
.bytecodeIndex
); 
 205         jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::ArgumentCount
))); 
 206         if (!inlineCallFrame
->isClosureCall
) 
 207             jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame
->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::Callee
))); 
 208 #else // USE(JSVALUE64) // so this is the 32-bit part 
 209         jit
.storePtr(callerFrameGPR
, AssemblyHelpers::addressForByteOffset(inlineCallFrame
->callerFrameOffset())); 
 210         Instruction
* instruction 
= baselineCodeBlock
->instructions().begin() + codeOrigin
.bytecodeIndex
; 
 211         uint32_t locationBits 
= CallFrame::Location::encodeAsBytecodeInstruction(instruction
); 
 212         jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::ArgumentCount
))); 
 213         jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::Callee
))); 
 214         if (!inlineCallFrame
->isClosureCall
) 
 215             jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset 
+ JSStack::Callee
))); 
 216 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part 
 220     uint32_t locationBits 
= CallFrame::Location::encodeAsBytecodeOffset(codeOrigin
.bytecodeIndex
); 
 222     Instruction
* instruction 
= jit
.baselineCodeBlock()->instructions().begin() + codeOrigin
.bytecodeIndex
; 
 223     uint32_t locationBits 
= CallFrame::Location::encodeAsBytecodeInstruction(instruction
); 
 225     jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(JSStack::ArgumentCount
))); 
 229 static void osrWriteBarrier(CCallHelpers
& jit
, GPRReg owner
, GPRReg scratch
) 
 231     AssemblyHelpers::Jump ownerIsRememberedOrInEden 
= jit
.jumpIfIsRememberedOrInEden(owner
); 
 233     // We need these extra slots because setupArgumentsWithExecState will use poke on x86. 
 235     jit
.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister
); 
 238     jit
.setupArgumentsWithExecState(owner
); 
 239     jit
.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier
)), scratch
); 
 243     jit
.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister
); 
 246     ownerIsRememberedOrInEden
.link(&jit
); 
 248 #endif // ENABLE(GGC) 
 250 void adjustAndJumpToTarget(CCallHelpers
& jit
, const OSRExitBase
& exit
) 
 253     jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0
); 
 254     osrWriteBarrier(jit
, GPRInfo::nonArgGPR0
, GPRInfo::nonArgGPR1
); 
 255     InlineCallFrameSet
* inlineCallFrames 
= jit
.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames
.get(); 
 256     if (inlineCallFrames
) { 
 257         for (InlineCallFrame
* inlineCallFrame 
: *inlineCallFrames
) { 
 258             ScriptExecutable
* ownerExecutable 
= inlineCallFrame
->executable
.get(); 
 259             jit
.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable
), GPRInfo::nonArgGPR0
);  
 260             osrWriteBarrier(jit
, GPRInfo::nonArgGPR0
, GPRInfo::nonArgGPR1
); 
 265     if (exit
.m_codeOrigin
.inlineCallFrame
) 
 266         jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset 
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
); 
 268     CodeBlock
* baselineCodeBlock 
= jit
.baselineCodeBlockFor(exit
.m_codeOrigin
); 
 269     Vector
<BytecodeAndMachineOffset
>& decodedCodeMap 
= jit
.decodedCodeMapFor(baselineCodeBlock
); 
 271     BytecodeAndMachineOffset
* mapping 
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
); 
 274     ASSERT(mapping
->m_bytecodeIndex 
== exit
.m_codeOrigin
.bytecodeIndex
); 
 276     void* jumpTarget 
= baselineCodeBlock
->jitCode()->executableAddressAtOffset(mapping
->m_machineCodeOffset
); 
 278     jit
.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock
) * sizeof(Register
)), GPRInfo::callFrameRegister
, AssemblyHelpers::stackPointerRegister
); 
 280     jit
.jitAssertTagsInPlace(); 
 282     jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT2
); 
 283     jit
.jump(GPRInfo::regT2
); 
 286 } } // namespace JSC::DFG 
 288 #endif // ENABLE(DFG_JIT)