]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGOSRExitCompiler32_64.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / dfg / DFGOSRExitCompiler32_64.cpp
1 /*
2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGOSRExitCompiler.h"
28
29 #if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
30
31 #include "DFGOperations.h"
32 #include "DFGOSRExitCompilerCommon.h"
33 #include "DFGSpeculativeJIT.h"
34 #include "JSCInlines.h"
35 #include <wtf/DataLog.h>
36
37 namespace JSC { namespace DFG {
38
39 void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
40 {
41 // 1) Pro-forma stuff.
42 if (Options::printEachOSRExit()) {
43 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
44 debugInfo->codeBlock = m_jit.codeBlock();
45 debugInfo->kind = exit.m_kind;
46 debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
47
48 m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
49 }
50
51 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
52 m_jit.addPtr(
53 CCallHelpers::TrustedImm32(
54 -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
55 CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
56
57 // 2) Perform speculation recovery. This only comes into play when an operation
58 // starts mutating state before verifying the speculation it has already made.
59
60 if (recovery) {
61 switch (recovery->type()) {
62 case SpeculativeAdd:
63 m_jit.sub32(recovery->src(), recovery->dest());
64 break;
65
66 case BooleanSpeculationCheck:
67 break;
68
69 default:
70 break;
71 }
72 }
73
74 // 3) Refine some value profile, if appropriate.
75
76 if (!!exit.m_jsValueSource) {
77 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
78 // If the instruction that this originated from has an array profile, then
79 // refine it. If it doesn't, then do nothing. The latter could happen for
80 // hoisted checks, or checks emitted for operations that didn't have array
81 // profiling - either ops that aren't array accesses at all, or weren't
82 // known to be array acceses in the bytecode. The latter case is a FIXME
83 // while the former case is an outcome of a CheckStructure not knowing why
84 // it was emitted (could be either due to an inline cache of a property
85 // property access, or due to an array profile).
86
87 // Note: We are free to assume that the jsValueSource is already known to
88 // be a cell since both BadCache and BadIndexingType exits occur after
89 // the cell check would have already happened.
90
91 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
92 if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
93 GPRReg usedRegister1;
94 GPRReg usedRegister2;
95 if (exit.m_jsValueSource.isAddress()) {
96 usedRegister1 = exit.m_jsValueSource.base();
97 usedRegister2 = InvalidGPRReg;
98 } else {
99 usedRegister1 = exit.m_jsValueSource.payloadGPR();
100 if (exit.m_jsValueSource.hasKnownTag())
101 usedRegister2 = InvalidGPRReg;
102 else
103 usedRegister2 = exit.m_jsValueSource.tagGPR();
104 }
105
106 GPRReg scratch1;
107 GPRReg scratch2;
108 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
109 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
110
111 #if CPU(ARM64)
112 m_jit.pushToSave(scratch1);
113 m_jit.pushToSave(scratch2);
114 #else
115 m_jit.push(scratch1);
116 m_jit.push(scratch2);
117 #endif
118
119 GPRReg value;
120 if (exit.m_jsValueSource.isAddress()) {
121 value = scratch1;
122 m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
123 } else
124 value = exit.m_jsValueSource.payloadGPR();
125
126 m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
127 m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
128 m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
129 m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
130 m_jit.lshift32(scratch1, scratch2);
131 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
132
133 #if CPU(ARM64)
134 m_jit.popToRestore(scratch2);
135 m_jit.popToRestore(scratch1);
136 #else
137 m_jit.pop(scratch2);
138 m_jit.pop(scratch1);
139 #endif
140 }
141 }
142
143 if (!!exit.m_valueProfile) {
144 EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
145
146 if (exit.m_jsValueSource.isAddress()) {
147 // Save a register so we can use it.
148 GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
149
150 #if CPU(ARM64)
151 m_jit.pushToSave(scratch);
152 #else
153 m_jit.push(scratch);
154 #endif
155
156 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
157 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
158 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
159 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
160
161 #if CPU(ARM64)
162 m_jit.popToRestore(scratch);
163 #else
164 m_jit.pop(scratch);
165 #endif
166 } else if (exit.m_jsValueSource.hasKnownTag()) {
167 m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
168 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
169 } else {
170 m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
171 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
172 }
173 }
174 }
175
176 // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
177 // do this simple approach.
178
179 // 4) Save all state from GPRs into the scratch buffer.
180
181 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
182 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
183
184 for (size_t index = 0; index < operands.size(); ++index) {
185 const ValueRecovery& recovery = operands[index];
186
187 switch (recovery.technique()) {
188 case UnboxedInt32InGPR:
189 case UnboxedBooleanInGPR:
190 case UnboxedCellInGPR:
191 m_jit.store32(
192 recovery.gpr(),
193 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
194 break;
195
196 case InPair:
197 m_jit.store32(
198 recovery.tagGPR(),
199 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
200 m_jit.store32(
201 recovery.payloadGPR(),
202 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
203 break;
204
205 default:
206 break;
207 }
208 }
209
210 // Now all GPRs are free to reuse.
211
212 // 5) Save all state from FPRs into the scratch buffer.
213
214 for (size_t index = 0; index < operands.size(); ++index) {
215 const ValueRecovery& recovery = operands[index];
216
217 switch (recovery.technique()) {
218 case InFPR:
219 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
220 m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
221 break;
222
223 default:
224 break;
225 }
226 }
227
228 // Now all FPRs are free to reuse.
229
230 // 6) Save all state from the stack into the scratch buffer. For simplicity we
231 // do this even for state that's already in the right place on the stack.
232 // It makes things simpler later.
233
234 for (size_t index = 0; index < operands.size(); ++index) {
235 const ValueRecovery& recovery = operands[index];
236
237 switch (recovery.technique()) {
238 case DisplacedInJSStack:
239 case Int32DisplacedInJSStack:
240 case DoubleDisplacedInJSStack:
241 case CellDisplacedInJSStack:
242 case BooleanDisplacedInJSStack:
243 m_jit.load32(
244 AssemblyHelpers::tagFor(recovery.virtualRegister()),
245 GPRInfo::regT0);
246 m_jit.load32(
247 AssemblyHelpers::payloadFor(recovery.virtualRegister()),
248 GPRInfo::regT1);
249 m_jit.store32(
250 GPRInfo::regT0,
251 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
252 m_jit.store32(
253 GPRInfo::regT1,
254 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
255 break;
256
257 default:
258 break;
259 }
260 }
261
262 // 7) Do all data format conversions and store the results into the stack.
263
264 bool haveArguments = false;
265
266 for (size_t index = 0; index < operands.size(); ++index) {
267 const ValueRecovery& recovery = operands[index];
268 int operand = operands.operandForIndex(index);
269
270 switch (recovery.technique()) {
271 case InPair:
272 case DisplacedInJSStack:
273 m_jit.load32(
274 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
275 GPRInfo::regT0);
276 m_jit.load32(
277 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
278 GPRInfo::regT1);
279 m_jit.store32(
280 GPRInfo::regT0,
281 AssemblyHelpers::tagFor(operand));
282 m_jit.store32(
283 GPRInfo::regT1,
284 AssemblyHelpers::payloadFor(operand));
285 break;
286
287 case InFPR:
288 case DoubleDisplacedInJSStack:
289 m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
290 m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
291 m_jit.purifyNaN(FPRInfo::fpRegT0);
292 m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
293 break;
294
295 case UnboxedInt32InGPR:
296 case Int32DisplacedInJSStack:
297 m_jit.load32(
298 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
299 GPRInfo::regT0);
300 m_jit.store32(
301 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
302 AssemblyHelpers::tagFor(operand));
303 m_jit.store32(
304 GPRInfo::regT0,
305 AssemblyHelpers::payloadFor(operand));
306 break;
307
308 case UnboxedCellInGPR:
309 case CellDisplacedInJSStack:
310 m_jit.load32(
311 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
312 GPRInfo::regT0);
313 m_jit.store32(
314 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
315 AssemblyHelpers::tagFor(operand));
316 m_jit.store32(
317 GPRInfo::regT0,
318 AssemblyHelpers::payloadFor(operand));
319 break;
320
321 case UnboxedBooleanInGPR:
322 case BooleanDisplacedInJSStack:
323 m_jit.load32(
324 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
325 GPRInfo::regT0);
326 m_jit.store32(
327 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
328 AssemblyHelpers::tagFor(operand));
329 m_jit.store32(
330 GPRInfo::regT0,
331 AssemblyHelpers::payloadFor(operand));
332 break;
333
334 case Constant:
335 m_jit.store32(
336 AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
337 AssemblyHelpers::tagFor(operand));
338 m_jit.store32(
339 AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
340 AssemblyHelpers::payloadFor(operand));
341 break;
342
343 case ArgumentsThatWereNotCreated:
344 haveArguments = true;
345 m_jit.store32(
346 AssemblyHelpers::TrustedImm32(JSValue().tag()),
347 AssemblyHelpers::tagFor(operand));
348 m_jit.store32(
349 AssemblyHelpers::TrustedImm32(JSValue().payload()),
350 AssemblyHelpers::payloadFor(operand));
351 break;
352
353 default:
354 break;
355 }
356 }
357
358 // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
359 // that all new calls into this code will go to the new JIT, so the execute
360 // counter only affects call frames that performed OSR exit and call frames
361 // that were still executing the old JIT at the time of another call frame's
362 // OSR exit. We want to ensure that the following is true:
363 //
364 // (a) Code the performs an OSR exit gets a chance to reenter optimized
365 // code eventually, since optimized code is faster. But we don't
366 // want to do such reentery too aggressively (see (c) below).
367 //
368 // (b) If there is code on the call stack that is still running the old
369 // JIT's code and has never OSR'd, then it should get a chance to
370 // perform OSR entry despite the fact that we've exited.
371 //
372 // (c) Code the performs an OSR exit should not immediately retry OSR
373 // entry, since both forms of OSR are expensive. OSR entry is
374 // particularly expensive.
375 //
376 // (d) Frequent OSR failures, even those that do not result in the code
377 // running in a hot loop, result in recompilation getting triggered.
378 //
379 // To ensure (c), we'd like to set the execute counter to
380 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
381 // (a) and (b), since then every OSR exit would delay the opportunity for
382 // every call frame to perform OSR entry. Essentially, if OSR exit happens
383 // frequently and the function has few loops, then the counter will never
384 // become non-negative and OSR entry will never be triggered. OSR entry
385 // will only happen if a loop gets hot in the old JIT, which does a pretty
386 // good job of ensuring (a) and (b). But that doesn't take care of (d),
387 // since each speculation failure would reset the execute counter.
388 // So we check here if the number of speculation failures is significantly
389 // larger than the number of successes (we want 90% success rate), and if
390 // there have been a large enough number of failures. If so, we set the
391 // counter to 0; otherwise we set the counter to
392 // counterValueForOptimizeAfterWarmUp().
393
394 handleExitCounts(m_jit, exit);
395
396 // 9) Reify inlined call frames.
397
398 reifyInlinedCallFrames(m_jit, exit);
399
400 // 10) Create arguments if necessary and place them into the appropriate aliased
401 // registers.
402
403 if (haveArguments) {
404 ArgumentsRecoveryGenerator argumentsRecovery;
405
406 for (size_t index = 0; index < operands.size(); ++index) {
407 const ValueRecovery& recovery = operands[index];
408 if (recovery.technique() != ArgumentsThatWereNotCreated)
409 continue;
410 argumentsRecovery.generateFor(
411 operands.operandForIndex(index), exit.m_codeOrigin, m_jit);
412 }
413 }
414
415 // 12) And finish.
416 adjustAndJumpToTarget(m_jit, exit);
417 }
418
419 } } // namespace JSC::DFG
420
421 #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)