]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGOSRExitCompilerCommon.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompilerCommon.cpp
1 /*
2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGOSRExitCompilerCommon.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGJITCode.h"
32 #include "DFGOperations.h"
33 #include "JIT.h"
34 #include "JSCJSValueInlines.h"
35 #include "JSCInlines.h"
36
37 namespace JSC { namespace DFG {
38
39 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
40 {
41 jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
42
43 jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
44
45 AssemblyHelpers::Jump tooFewFails;
46
47 jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
48 jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
49 jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
50
51 jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
52 AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
53 AssemblyHelpers::GreaterThanOrEqual,
54 AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
55 AssemblyHelpers::TrustedImm32(0));
56
57 // We want to figure out if there's a possibility that we're in a loop. For the outermost
58 // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
59 // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
60 // problem is the inlined functions, which might also have loops, but whose baseline versions
61 // don't know where to look for the exit count. Figure out if those loops are severe enough
62 // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
63 // Otherwise, we should use the normal reoptimization trigger.
64
65 AssemblyHelpers::JumpList loopThreshold;
66
67 for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
68 loopThreshold.append(
69 jit.branchTest8(
70 AssemblyHelpers::NonZero,
71 AssemblyHelpers::AbsoluteAddress(
72 inlineCallFrame->executable->addressOfDidTryToEnterInLoop())));
73 }
74
75 jit.move(
76 AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
77 GPRInfo::regT1);
78
79 if (!loopThreshold.empty()) {
80 AssemblyHelpers::Jump done = jit.jump();
81
82 loopThreshold.link(&jit);
83 jit.move(
84 AssemblyHelpers::TrustedImm32(
85 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
86 GPRInfo::regT1);
87
88 done.link(&jit);
89 }
90
91 tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
92
93 reoptimizeNow.link(&jit);
94
95 // Reoptimize as soon as possible.
96 #if !NUMBER_OF_ARGUMENT_REGISTERS
97 jit.poke(GPRInfo::regT0);
98 jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
99 #else
100 jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
101 jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
102 #endif
103 jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
104 jit.call(GPRInfo::nonArgGPR0);
105 AssemblyHelpers::Jump doneAdjusting = jit.jump();
106
107 tooFewFails.link(&jit);
108
109 // Adjust the execution counter such that the target is to only optimize after a while.
110 int32_t activeThreshold =
111 jit.baselineCodeBlock()->adjustedCounterValue(
112 Options::thresholdForOptimizeAfterLongWarmUp());
113 int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
114 activeThreshold, jit.baselineCodeBlock());
115 int32_t clippedValue;
116 switch (jit.codeBlock()->jitType()) {
117 case JITCode::DFGJIT:
118 clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
119 break;
120 case JITCode::FTLJIT:
121 clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
122 break;
123 default:
124 RELEASE_ASSERT_NOT_REACHED();
125 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
126 clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
127 #endif
128 break;
129 }
130 jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
131 jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
132 jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
133
134 doneAdjusting.link(&jit);
135 }
136
137 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
138 {
139 ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
140 jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
141
142 CodeOrigin codeOrigin;
143 for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
144 InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
145 CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
146 CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
147 void* jumpTarget = nullptr;
148 void* trueReturnPC = nullptr;
149
150 unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
151
152 switch (inlineCallFrame->kind) {
153 case InlineCallFrame::Call:
154 case InlineCallFrame::Construct:
155 case InlineCallFrame::CallVarargs:
156 case InlineCallFrame::ConstructVarargs: {
157 CallLinkInfo* callLinkInfo =
158 baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
159 RELEASE_ASSERT(callLinkInfo);
160
161 jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
162 break;
163 }
164
165 case InlineCallFrame::GetterCall:
166 case InlineCallFrame::SetterCall: {
167 StructureStubInfo* stubInfo =
168 baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
169 RELEASE_ASSERT(stubInfo);
170
171 switch (inlineCallFrame->kind) {
172 case InlineCallFrame::GetterCall:
173 jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
174 break;
175 case InlineCallFrame::SetterCall:
176 jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
177 break;
178 default:
179 RELEASE_ASSERT_NOT_REACHED();
180 break;
181 }
182
183 trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
184 stubInfo->patch.deltaCallToDone).executableAddress();
185 break;
186 } }
187
188 GPRReg callerFrameGPR;
189 if (inlineCallFrame->caller.inlineCallFrame) {
190 jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
191 callerFrameGPR = GPRInfo::regT3;
192 } else
193 callerFrameGPR = GPRInfo::callFrameRegister;
194
195 jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
196 if (trueReturnPC)
197 jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));
198
199 jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
200 if (!inlineCallFrame->isVarargs())
201 jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
202 #if USE(JSVALUE64)
203 jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
204 uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
205 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
206 if (!inlineCallFrame->isClosureCall)
207 jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
208 #else // USE(JSVALUE64) // so this is the 32-bit part
209 jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
210 Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
211 uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
212 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
213 jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
214 if (!inlineCallFrame->isClosureCall)
215 jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
216 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
217 }
218
219 #if USE(JSVALUE64)
220 uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
221 #else
222 Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
223 uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
224 #endif
225 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
226 }
227
228 #if ENABLE(GGC)
229 static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
230 {
231 AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner);
232
233 // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
234 #if CPU(X86)
235 jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
236 #endif
237
238 jit.setupArgumentsWithExecState(owner);
239 jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
240 jit.call(scratch);
241
242 #if CPU(X86)
243 jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
244 #endif
245
246 ownerIsRememberedOrInEden.link(&jit);
247 }
248 #endif // ENABLE(GGC)
249
250 void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
251 {
252 #if ENABLE(GGC)
253 jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0);
254 osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
255 InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
256 if (inlineCallFrames) {
257 for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
258 ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get();
259 jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0);
260 osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
261 }
262 }
263 #endif
264
265 if (exit.m_codeOrigin.inlineCallFrame)
266 jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
267
268 CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
269 Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
270
271 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
272
273 ASSERT(mapping);
274 ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
275
276 void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
277
278 jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
279
280 jit.jitAssertTagsInPlace();
281
282 jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
283 jit.jump(GPRInfo::regT2);
284 }
285
286 } } // namespace JSC::DFG
287
288 #endif // ENABLE(DFG_JIT)
289