2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompiler.h"
31 #include "CallFrame.h"
32 #include "LinkBuffer.h"
33 #include "RepatchBuffer.h"
35 namespace JSC
{ namespace DFG
{
39 void compileOSRExit(ExecState
* exec
)
41 CodeBlock
* codeBlock
= exec
->codeBlock();
44 ASSERT(codeBlock
->getJITType() == JITCode::DFGJIT
);
46 JSGlobalData
* globalData
= &exec
->globalData();
48 uint32_t exitIndex
= globalData
->osrExitIndex
;
49 OSRExit
& exit
= codeBlock
->osrExit(exitIndex
);
51 // Make sure all code on our inline stack is JIT compiled. This is necessary since
52 // we may opt to inline a code block even before it had ever been compiled by the
53 // JIT, but our OSR exit infrastructure currently only works if the target of the
54 // OSR exit is JIT code. This could be changed since there is nothing particularly
55 // hard about doing an OSR exit into the interpreter, but for now this seems to make
56 // sense in that if we're OSR exiting from inlined code of a DFG code block, then
57 // probably it's a good sign that the thing we're exiting into is hot. Even more
58 // interestingly, since the code was inlined, it may never otherwise get JIT
59 // compiled since the act of inlining it may ensure that it otherwise never runs.
60 for (CodeOrigin codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
61 static_cast<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())
62 ->baselineCodeBlockFor(codeOrigin
.inlineCallFrame
->isCall
? CodeForCall
: CodeForConstruct
)
63 ->jitCompile(*globalData
);
66 SpeculationRecovery
* recovery
= 0;
67 if (exit
.m_recoveryIndex
)
68 recovery
= &codeBlock
->speculationRecovery(exit
.m_recoveryIndex
- 1);
70 #if DFG_ENABLE(DEBUG_VERBOSE)
71 dataLog("Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex
, exit
.m_codeOrigin
.bytecodeIndex
, exit
.m_nodeIndex
, exitKindToString(exit
.m_kind
), codeBlock
);
75 AssemblyHelpers
jit(globalData
, codeBlock
);
76 OSRExitCompiler
exitCompiler(jit
);
78 jit
.jitAssertHasValidCallFrame();
79 exitCompiler
.compileExit(exit
, recovery
);
81 LinkBuffer
patchBuffer(*globalData
, &jit
, codeBlock
);
82 exit
.m_code
= patchBuffer
.finalizeCode();
84 #if DFG_ENABLE(DEBUG_VERBOSE)
85 dataLog("OSR exit code at [%p, %p).\n", patchBuffer
.debugAddress(), static_cast<char*>(patchBuffer
.debugAddress()) + patchBuffer
.debugSize());
90 RepatchBuffer
repatchBuffer(codeBlock
);
91 repatchBuffer
.relink(exit
.m_check
.codeLocationForRepatch(codeBlock
), CodeLocationLabel(exit
.m_code
.code()));
94 globalData
->osrExitJumpDestination
= exit
.m_code
.code().executableAddress();
99 void OSRExitCompiler::handleExitCounts(const OSRExit
& exit
)
101 m_jit
.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit
.m_count
));
103 m_jit
.move(AssemblyHelpers::TrustedImmPtr(m_jit
.codeBlock()), GPRInfo::regT0
);
105 AssemblyHelpers::JumpList tooFewFails
;
107 if (exit
.m_kind
== InadequateCoverage
) {
108 // Proceed based on the assumption that we can profitably optimize this code once
109 // it has executed enough times.
111 m_jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2
);
112 m_jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1
);
113 m_jit
.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2
);
114 m_jit
.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1
);
115 m_jit
.store32(GPRInfo::regT2
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfForcedOSRExitCounter()));
116 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeSuccessCounter()));
118 tooFewFails
.append(m_jit
.branch32(AssemblyHelpers::BelowOrEqual
, GPRInfo::regT2
, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization
)));
120 // Proceed based on the assumption that we can handle these exits so long as they
121 // don't get too frequent.
123 m_jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2
);
124 m_jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1
);
125 m_jit
.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2
);
126 m_jit
.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1
);
127 m_jit
.store32(GPRInfo::regT2
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeFailCounter()));
128 m_jit
.store32(GPRInfo::regT1
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfSpeculativeSuccessCounter()));
130 m_jit
.move(AssemblyHelpers::TrustedImmPtr(m_jit
.baselineCodeBlock()), GPRInfo::regT0
);
132 tooFewFails
.append(m_jit
.branch32(AssemblyHelpers::BelowOrEqual
, GPRInfo::regT2
, AssemblyHelpers::TrustedImm32(m_jit
.codeBlock()->largeFailCountThreshold())));
133 m_jit
.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio
), GPRInfo::regT2
, GPRInfo::regT2
);
135 tooFewFails
.append(m_jit
.branch32(AssemblyHelpers::BelowOrEqual
, GPRInfo::regT2
, GPRInfo::regT1
));
138 // Reoptimize as soon as possible.
139 m_jit
.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter()));
140 m_jit
.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionActiveThreshold()));
141 AssemblyHelpers::Jump doneAdjusting
= m_jit
.jump();
143 tooFewFails
.link(&m_jit
);
145 // Adjust the execution counter such that the target is to only optimize after a while.
146 int32_t targetValue
=
147 ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
148 m_jit
.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
149 m_jit
.baselineCodeBlock());
150 m_jit
.store32(AssemblyHelpers::TrustedImm32(-targetValue
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter()));
151 m_jit
.store32(AssemblyHelpers::TrustedImm32(targetValue
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionActiveThreshold()));
152 m_jit
.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue
)), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionTotalCount()));
154 doneAdjusting
.link(&m_jit
);
157 } } // namespace JSC::DFG
159 #endif // ENABLE(DFG_JIT)