2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGOSRExitCompilerCommon.h"
31 #include "Arguments.h"
32 #include "DFGJITCode.h"
33 #include "DFGOperations.h"
35 #include "JSCJSValueInlines.h"
36 #include "JSCInlines.h"
38 namespace JSC
{ namespace DFG
{
40 void handleExitCounts(CCallHelpers
& jit
, const OSRExitBase
& exit
)
42 jit
.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit
.m_count
));
44 jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.codeBlock()), GPRInfo::regT0
);
46 AssemblyHelpers::Jump tooFewFails
;
48 jit
.load32(AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2
);
49 jit
.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2
);
50 jit
.store32(GPRInfo::regT2
, AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfOSRExitCounter()));
52 jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.baselineCodeBlock()), GPRInfo::regT0
);
53 AssemblyHelpers::Jump reoptimizeNow
= jit
.branch32(
54 AssemblyHelpers::GreaterThanOrEqual
,
55 AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter()),
56 AssemblyHelpers::TrustedImm32(0));
58 tooFewFails
= jit
.branch32(AssemblyHelpers::BelowOrEqual
, GPRInfo::regT2
, AssemblyHelpers::TrustedImm32(jit
.codeBlock()->exitCountThresholdForReoptimization()));
60 reoptimizeNow
.link(&jit
);
62 // Reoptimize as soon as possible.
63 #if !NUMBER_OF_ARGUMENT_REGISTERS
64 jit
.poke(GPRInfo::regT0
);
66 jit
.move(GPRInfo::regT0
, GPRInfo::argumentGPR0
);
67 ASSERT(GPRInfo::argumentGPR0
!= GPRInfo::regT1
);
69 jit
.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast
<void*>(triggerReoptimizationNow
)), GPRInfo::regT1
);
70 jit
.call(GPRInfo::regT1
);
71 AssemblyHelpers::Jump doneAdjusting
= jit
.jump();
73 tooFewFails
.link(&jit
);
75 // Adjust the execution counter such that the target is to only optimize after a while.
76 int32_t activeThreshold
=
77 jit
.baselineCodeBlock()->adjustedCounterValue(
78 Options::thresholdForOptimizeAfterLongWarmUp());
79 int32_t targetValue
= applyMemoryUsageHeuristicsAndConvertToInt(
80 activeThreshold
, jit
.baselineCodeBlock());
82 switch (jit
.codeBlock()->jitType()) {
84 clippedValue
= BaselineExecutionCounter::clippedThreshold(jit
.codeBlock()->globalObject(), targetValue
);
87 clippedValue
= UpperTierExecutionCounter::clippedThreshold(jit
.codeBlock()->globalObject(), targetValue
);
90 RELEASE_ASSERT_NOT_REACHED();
91 clippedValue
= 0; // Make some compilers, and mhahnenberg, happy.
94 jit
.store32(AssemblyHelpers::TrustedImm32(-clippedValue
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecuteCounter()));
95 jit
.store32(AssemblyHelpers::TrustedImm32(activeThreshold
), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionActiveThreshold()));
96 jit
.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue
)), AssemblyHelpers::Address(GPRInfo::regT0
, CodeBlock::offsetOfJITExecutionTotalCount()));
98 doneAdjusting
.link(&jit
);
101 void reifyInlinedCallFrames(CCallHelpers
& jit
, const OSRExitBase
& exit
)
103 ASSERT(jit
.baselineCodeBlock()->jitType() == JITCode::BaselineJIT
);
104 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jit
.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister
)JSStack::CodeBlock
));
106 CodeOrigin codeOrigin
;
107 for (codeOrigin
= exit
.m_codeOrigin
; codeOrigin
.inlineCallFrame
; codeOrigin
= codeOrigin
.inlineCallFrame
->caller
) {
108 InlineCallFrame
* inlineCallFrame
= codeOrigin
.inlineCallFrame
;
109 CodeBlock
* baselineCodeBlock
= jit
.baselineCodeBlockFor(codeOrigin
);
110 CodeBlock
* baselineCodeBlockForCaller
= jit
.baselineCodeBlockFor(inlineCallFrame
->caller
);
111 unsigned callBytecodeIndex
= inlineCallFrame
->caller
.bytecodeIndex
;
112 CallLinkInfo
* callLinkInfo
=
113 baselineCodeBlockForCaller
->getCallLinkInfoForBytecodeIndex(callBytecodeIndex
);
114 RELEASE_ASSERT(callLinkInfo
);
116 void* jumpTarget
= callLinkInfo
->callReturnLocation
.executableAddress();
118 GPRReg callerFrameGPR
;
119 if (inlineCallFrame
->caller
.inlineCallFrame
) {
120 jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->caller
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT3
);
121 callerFrameGPR
= GPRInfo::regT3
;
123 callerFrameGPR
= GPRInfo::callFrameRegister
;
126 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CodeBlock
)));
127 if (!inlineCallFrame
->isClosureCall
)
128 jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame
->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
129 jit
.store64(callerFrameGPR
, AssemblyHelpers::addressForByteOffset(inlineCallFrame
->callerFrameOffset()));
130 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::addressForByteOffset(inlineCallFrame
->returnPCOffset()));
131 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeOffset(codeOrigin
.bytecodeIndex
);
132 jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
133 jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
134 if (!inlineCallFrame
->isClosureCall
)
135 jit
.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame
->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
137 // Leave the captured arguments in regT3.
138 if (baselineCodeBlock
->usesArguments())
139 jit
.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame
->stackOffset
+ unmodifiedArgumentsRegister(baselineCodeBlock
->argumentsRegister()).offset())), GPRInfo::regT3
);
140 #else // USE(JSVALUE64) // so this is the 32-bit part
141 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock
), AssemblyHelpers::addressFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::CodeBlock
)));
142 jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
143 if (!inlineCallFrame
->isClosureCall
)
144 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ScopeChain
)));
145 jit
.storePtr(callerFrameGPR
, AssemblyHelpers::addressForByteOffset(inlineCallFrame
->callerFrameOffset()));
146 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget
), AssemblyHelpers::addressForByteOffset(inlineCallFrame
->returnPCOffset()));
147 Instruction
* instruction
= baselineCodeBlock
->instructions().begin() + codeOrigin
.bytecodeIndex
;
148 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeInstruction(instruction
);
149 jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
150 jit
.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame
->arguments
.size()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
)));
151 jit
.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag
), AssemblyHelpers::tagFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
152 if (!inlineCallFrame
->isClosureCall
)
153 jit
.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame
->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister
)(inlineCallFrame
->stackOffset
+ JSStack::Callee
)));
155 // Leave the captured arguments in regT3.
156 if (baselineCodeBlock
->usesArguments())
157 jit
.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame
->stackOffset
+ unmodifiedArgumentsRegister(baselineCodeBlock
->argumentsRegister()).offset())), GPRInfo::regT3
);
158 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
160 if (baselineCodeBlock
->usesArguments()) {
161 AssemblyHelpers::Jump noArguments
= jit
.branchTestPtr(AssemblyHelpers::Zero
, GPRInfo::regT3
);
162 jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT0
);
163 jit
.storePtr(GPRInfo::regT0
, AssemblyHelpers::Address(GPRInfo::regT3
, Arguments::offsetOfRegisters()));
164 noArguments
.link(&jit
);
169 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeOffset(codeOrigin
.bytecodeIndex
);
171 Instruction
* instruction
= jit
.baselineCodeBlock()->instructions().begin() + codeOrigin
.bytecodeIndex
;
172 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeInstruction(instruction
);
174 jit
.store32(AssemblyHelpers::TrustedImm32(locationBits
), AssemblyHelpers::tagFor((VirtualRegister
)(JSStack::ArgumentCount
)));
178 static void osrWriteBarrier(CCallHelpers
& jit
, GPRReg owner
, GPRReg scratch
)
180 AssemblyHelpers::Jump ownerNotMarkedOrAlreadyRemembered
= jit
.checkMarkByte(owner
);
182 // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
184 jit
.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister
);
187 jit
.setupArgumentsWithExecState(owner
);
188 jit
.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier
)), scratch
);
192 jit
.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister
);
195 ownerNotMarkedOrAlreadyRemembered
.link(&jit
);
197 #endif // ENABLE(GGC)
199 void adjustAndJumpToTarget(CCallHelpers
& jit
, const OSRExitBase
& exit
)
202 // 11) Write barrier the owner executables because we're jumping into a different block.
203 jit
.move(AssemblyHelpers::TrustedImmPtr(jit
.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0
);
204 osrWriteBarrier(jit
, GPRInfo::nonArgGPR0
, GPRInfo::nonArgGPR1
);
205 InlineCallFrameSet
* inlineCallFrames
= jit
.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames
.get();
206 if (inlineCallFrames
) {
207 for (InlineCallFrame
* inlineCallFrame
: *inlineCallFrames
) {
208 ScriptExecutable
* ownerExecutable
= inlineCallFrame
->executable
.get();
209 jit
.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable
), GPRInfo::nonArgGPR0
);
210 osrWriteBarrier(jit
, GPRInfo::nonArgGPR0
, GPRInfo::nonArgGPR1
);
215 if (exit
.m_codeOrigin
.inlineCallFrame
)
216 jit
.addPtr(AssemblyHelpers::TrustedImm32(exit
.m_codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
);
218 CodeBlock
* baselineCodeBlock
= jit
.baselineCodeBlockFor(exit
.m_codeOrigin
);
219 Vector
<BytecodeAndMachineOffset
>& decodedCodeMap
= jit
.decodedCodeMapFor(baselineCodeBlock
);
221 BytecodeAndMachineOffset
* mapping
= binarySearch
<BytecodeAndMachineOffset
, unsigned>(decodedCodeMap
, decodedCodeMap
.size(), exit
.m_codeOrigin
.bytecodeIndex
, BytecodeAndMachineOffset::getBytecodeIndex
);
224 ASSERT(mapping
->m_bytecodeIndex
== exit
.m_codeOrigin
.bytecodeIndex
);
226 void* jumpTarget
= baselineCodeBlock
->jitCode()->executableAddressAtOffset(mapping
->m_machineCodeOffset
);
228 jit
.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock
) * sizeof(Register
)), GPRInfo::callFrameRegister
, AssemblyHelpers::stackPointerRegister
);
230 jit
.jitAssertTagsInPlace();
232 jit
.move(AssemblyHelpers::TrustedImmPtr(jumpTarget
), GPRInfo::regT2
);
233 jit
.jump(GPRInfo::regT2
);
236 ArgumentsRecoveryGenerator::ArgumentsRecoveryGenerator() { }
237 ArgumentsRecoveryGenerator::~ArgumentsRecoveryGenerator() { }
239 void ArgumentsRecoveryGenerator::generateFor(
240 int operand
, CodeOrigin codeOrigin
, CCallHelpers
& jit
)
242 // Find the right inline call frame.
243 InlineCallFrame
* inlineCallFrame
= 0;
244 for (InlineCallFrame
* current
= codeOrigin
.inlineCallFrame
;
246 current
= current
->caller
.inlineCallFrame
) {
247 if (current
->stackOffset
>= operand
) {
248 inlineCallFrame
= current
;
253 if (!jit
.baselineCodeBlockFor(inlineCallFrame
)->usesArguments())
255 VirtualRegister argumentsRegister
= jit
.baselineArgumentsRegisterFor(inlineCallFrame
);
256 if (m_didCreateArgumentsObject
.add(inlineCallFrame
).isNewEntry
) {
257 // We know this call frame optimized out an arguments object that
258 // the baseline JIT would have created. Do that creation now.
260 if (inlineCallFrame
) {
261 jit
.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame
->stackOffset
* sizeof(EncodedJSValue
)), GPRInfo::callFrameRegister
, GPRInfo::regT0
);
262 jit
.setupArguments(GPRInfo::regT0
);
264 jit
.setupArgumentsExecState();
266 AssemblyHelpers::TrustedImmPtr(
267 bitwise_cast
<void*>(operationCreateArgumentsDuringOSRExit
)),
268 GPRInfo::nonArgGPR0
);
269 jit
.call(GPRInfo::nonArgGPR0
);
270 jit
.store64(GPRInfo::returnValueGPR
, AssemblyHelpers::addressFor(argumentsRegister
));
272 GPRInfo::returnValueGPR
,
273 AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister
)));
274 jit
.move(GPRInfo::returnValueGPR
, GPRInfo::regT0
); // no-op move on almost all platforms.
275 #else // USE(JSVALUE64) -> so the 32_64 part
276 if (inlineCallFrame
) {
277 jit
.setupArgumentsWithExecState(
278 AssemblyHelpers::TrustedImmPtr(inlineCallFrame
));
280 AssemblyHelpers::TrustedImmPtr(
281 bitwise_cast
<void*>(operationCreateInlinedArgumentsDuringOSRExit
)),
282 GPRInfo::nonArgGPR0
);
284 jit
.setupArgumentsExecState();
286 AssemblyHelpers::TrustedImmPtr(
287 bitwise_cast
<void*>(operationCreateArgumentsDuringOSRExit
)),
288 GPRInfo::nonArgGPR0
);
290 jit
.call(GPRInfo::nonArgGPR0
);
292 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
293 AssemblyHelpers::tagFor(argumentsRegister
));
295 GPRInfo::returnValueGPR
,
296 AssemblyHelpers::payloadFor(argumentsRegister
));
298 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
299 AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister
)));
301 GPRInfo::returnValueGPR
,
302 AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister
)));
303 jit
.move(GPRInfo::returnValueGPR
, GPRInfo::regT0
); // no-op move on almost all platforms.
304 #endif // USE(JSVALUE64)
308 jit
.load64(AssemblyHelpers::addressFor(argumentsRegister
), GPRInfo::regT0
);
309 jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor(operand
));
310 #else // USE(JSVALUE64) -> so the 32_64 part
311 jit
.load32(AssemblyHelpers::payloadFor(argumentsRegister
), GPRInfo::regT0
);
313 AssemblyHelpers::TrustedImm32(JSValue::CellTag
),
314 AssemblyHelpers::tagFor(operand
));
315 jit
.store32(GPRInfo::regT0
, AssemblyHelpers::payloadFor(operand
));
316 #endif // USE(JSVALUE64)
319 } } // namespace JSC::DFG
321 #endif // ENABLE(DFG_JIT)