]>
Commit | Line | Data |
---|---|---|
6fe7ccc8 A |
1 | /* |
2 | * Copyright (C) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "DFGOSRExitCompiler.h" | |
28 | ||
29 | #if ENABLE(DFG_JIT) && USE(JSVALUE32_64) | |
30 | ||
31 | #include "DFGOperations.h" | |
32 | ||
33 | namespace JSC { namespace DFG { | |
34 | ||
35 | void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery) | |
36 | { | |
37 | // 1) Pro-forma stuff. | |
38 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
39 | dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex); | |
40 | for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { | |
41 | dataLog("bc#%u", codeOrigin.bytecodeIndex); | |
42 | if (!codeOrigin.inlineCallFrame) | |
43 | break; | |
44 | dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); | |
45 | } | |
46 | dataLog(") at JIT offset 0x%x ", m_jit.debugOffset()); | |
47 | exit.dump(WTF::dataFile()); | |
48 | #endif | |
49 | #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE) | |
50 | SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; | |
51 | debugInfo->codeBlock = m_jit.codeBlock(); | |
52 | debugInfo->nodeIndex = exit.m_nodeIndex; | |
53 | ||
54 | m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); | |
55 | #endif | |
56 | ||
57 | #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE) | |
58 | m_jit.breakpoint(); | |
59 | #endif | |
60 | ||
61 | #if DFG_ENABLE(SUCCESS_STATS) | |
62 | static SamplingCounter counter("SpeculationFailure"); | |
63 | m_jit.emitCount(counter); | |
64 | #endif | |
65 | ||
66 | // 2) Perform speculation recovery. This only comes into play when an operation | |
67 | // starts mutating state before verifying the speculation it has already made. | |
68 | ||
69 | if (recovery) { | |
70 | switch (recovery->type()) { | |
71 | case SpeculativeAdd: | |
72 | m_jit.sub32(recovery->src(), recovery->dest()); | |
73 | break; | |
74 | ||
75 | case BooleanSpeculationCheck: | |
76 | break; | |
77 | ||
78 | default: | |
79 | break; | |
80 | } | |
81 | } | |
82 | ||
83 | // 3) Refine some value profile, if appropriate. | |
84 | ||
85 | if (!!exit.m_jsValueSource && !!exit.m_valueProfile) { | |
86 | EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0); | |
87 | ||
88 | if (exit.m_jsValueSource.isAddress()) { | |
89 | // Save a register so we can use it. | |
90 | GPRReg scratch = GPRInfo::regT0; | |
91 | if (scratch == exit.m_jsValueSource.base()) | |
92 | scratch = GPRInfo::regT1; | |
93 | ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t)); | |
94 | EncodedJSValue* scratchDataBuffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()); | |
95 | m_jit.store32(scratch, scratchDataBuffer); | |
96 | m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch); | |
97 | m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
98 | m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch); | |
99 | m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
100 | m_jit.load32(scratchDataBuffer, scratch); | |
101 | } else if (exit.m_jsValueSource.hasKnownTag()) { | |
102 | m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
103 | m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
104 | } else { | |
105 | m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
106 | m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
107 | } | |
108 | } | |
109 | ||
110 | // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR | |
111 | // whose destination is now occupied by a DFG virtual register, and we need | |
112 | // one for every displaced virtual register if there are more than | |
113 | // GPRInfo::numberOfRegisters of them. Also see if there are any constants, | |
114 | // any undefined slots, any FPR slots, and any unboxed ints. | |
115 | ||
116 | Vector<bool> poisonedVirtualRegisters(exit.m_variables.size()); | |
117 | for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) | |
118 | poisonedVirtualRegisters[i] = false; | |
119 | ||
120 | unsigned numberOfPoisonedVirtualRegisters = 0; | |
121 | unsigned numberOfDisplacedVirtualRegisters = 0; | |
122 | ||
123 | // Booleans for fast checks. We expect that most OSR exits do not have to rebox | |
124 | // Int32s, have no FPRs, and have no constants. If there are constants, we | |
125 | // expect most of them to be jsUndefined(); if that's true then we handle that | |
126 | // specially to minimize code size and execution time. | |
127 | bool haveUnboxedInt32InRegisterFile = false; | |
128 | bool haveUnboxedCellInRegisterFile = false; | |
129 | bool haveUnboxedBooleanInRegisterFile = false; | |
130 | bool haveUInt32s = false; | |
131 | bool haveFPRs = false; | |
132 | bool haveConstants = false; | |
133 | bool haveUndefined = false; | |
134 | ||
135 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
136 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
137 | switch (recovery.technique()) { | |
138 | case DisplacedInRegisterFile: | |
139 | case Int32DisplacedInRegisterFile: | |
140 | case CellDisplacedInRegisterFile: | |
141 | case BooleanDisplacedInRegisterFile: | |
142 | numberOfDisplacedVirtualRegisters++; | |
143 | ASSERT((int)recovery.virtualRegister() >= 0); | |
144 | ||
145 | // See if we might like to store to this virtual register before doing | |
146 | // virtual register shuffling. If so, we say that the virtual register | |
147 | // is poisoned: it cannot be stored to until after displaced virtual | |
148 | // registers are handled. We track poisoned virtual register carefully | |
149 | // to ensure this happens efficiently. Note that we expect this case | |
150 | // to be rare, so the handling of it is optimized for the cases in | |
151 | // which it does not happen. | |
152 | if (recovery.virtualRegister() < (int)exit.m_variables.size()) { | |
153 | switch (exit.m_variables[recovery.virtualRegister()].technique()) { | |
154 | case InGPR: | |
155 | case UnboxedInt32InGPR: | |
156 | case UnboxedBooleanInGPR: | |
157 | case UInt32InGPR: | |
158 | case InPair: | |
159 | case InFPR: | |
160 | if (!poisonedVirtualRegisters[recovery.virtualRegister()]) { | |
161 | poisonedVirtualRegisters[recovery.virtualRegister()] = true; | |
162 | numberOfPoisonedVirtualRegisters++; | |
163 | } | |
164 | break; | |
165 | default: | |
166 | break; | |
167 | } | |
168 | } | |
169 | break; | |
170 | ||
171 | case UInt32InGPR: | |
172 | haveUInt32s = true; | |
173 | break; | |
174 | ||
175 | case AlreadyInRegisterFileAsUnboxedInt32: | |
176 | haveUnboxedInt32InRegisterFile = true; | |
177 | break; | |
178 | ||
179 | case AlreadyInRegisterFileAsUnboxedCell: | |
180 | haveUnboxedCellInRegisterFile = true; | |
181 | break; | |
182 | ||
183 | case AlreadyInRegisterFileAsUnboxedBoolean: | |
184 | haveUnboxedBooleanInRegisterFile = true; | |
185 | break; | |
186 | ||
187 | case InFPR: | |
188 | haveFPRs = true; | |
189 | break; | |
190 | ||
191 | case Constant: | |
192 | haveConstants = true; | |
193 | if (recovery.constant().isUndefined()) | |
194 | haveUndefined = true; | |
195 | break; | |
196 | ||
197 | default: | |
198 | break; | |
199 | } | |
200 | } | |
201 | ||
202 | unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters); | |
203 | ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0))); | |
204 | EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; | |
205 | ||
206 | // From here on, the code assumes that it is profitable to maximize the distance | |
207 | // between when something is computed and when it is stored. | |
208 | ||
209 | // 5) Perform all reboxing of integers and cells, except for those in registers. | |
210 | ||
211 | if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) { | |
212 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
213 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
214 | switch (recovery.technique()) { | |
215 | case AlreadyInRegisterFileAsUnboxedInt32: | |
216 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); | |
217 | break; | |
218 | ||
219 | case AlreadyInRegisterFileAsUnboxedCell: | |
220 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); | |
221 | break; | |
222 | ||
223 | case AlreadyInRegisterFileAsUnboxedBoolean: | |
224 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); | |
225 | break; | |
226 | ||
227 | default: | |
228 | break; | |
229 | } | |
230 | } | |
231 | } | |
232 | ||
233 | // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage. | |
234 | // Note that GPRs do not have a fast change (like haveFPRs) because we expect that | |
235 | // most OSR failure points will have at least one GPR that needs to be dumped. | |
236 | ||
237 | initializePoisoned(exit.m_variables.size()); | |
238 | unsigned currentPoisonIndex = 0; | |
239 | ||
240 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
241 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
242 | int operand = exit.operandForIndex(index); | |
243 | switch (recovery.technique()) { | |
244 | case InGPR: | |
245 | case UnboxedInt32InGPR: | |
246 | case UnboxedBooleanInGPR: | |
247 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
248 | m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
249 | m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; | |
250 | currentPoisonIndex++; | |
251 | } else { | |
252 | uint32_t tag = JSValue::EmptyValueTag; | |
253 | if (recovery.technique() == InGPR) | |
254 | tag = JSValue::CellTag; | |
255 | else if (recovery.technique() == UnboxedInt32InGPR) | |
256 | tag = JSValue::Int32Tag; | |
257 | else | |
258 | tag = JSValue::BooleanTag; | |
259 | m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
260 | m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
261 | } | |
262 | break; | |
263 | case InPair: | |
264 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
265 | m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
266 | m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
267 | m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; | |
268 | currentPoisonIndex++; | |
269 | } else { | |
270 | m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
271 | m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
272 | } | |
273 | break; | |
274 | case UInt32InGPR: { | |
275 | EncodedJSValue* myScratch = scratchDataBuffer + scratchBufferLengthBeforeUInt32s; | |
276 | ||
277 | GPRReg addressGPR = GPRInfo::regT0; | |
278 | if (addressGPR == recovery.gpr()) | |
279 | addressGPR = GPRInfo::regT1; | |
280 | ||
281 | m_jit.storePtr(addressGPR, myScratch); | |
282 | m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR); | |
283 | m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); | |
284 | ||
285 | AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0)); | |
286 | ||
287 | m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0); | |
288 | m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0); | |
289 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
290 | m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR); | |
291 | m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); | |
292 | } else | |
293 | m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand)); | |
294 | ||
295 | AssemblyHelpers::Jump done = m_jit.jump(); | |
296 | ||
297 | positive.link(&m_jit); | |
298 | ||
299 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
300 | m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
301 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
302 | } else { | |
303 | m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
304 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
305 | } | |
306 | ||
307 | done.link(&m_jit); | |
308 | ||
309 | m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR); | |
310 | m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0); | |
311 | m_jit.loadPtr(myScratch, addressGPR); | |
312 | ||
313 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
314 | m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; | |
315 | currentPoisonIndex++; | |
316 | } | |
317 | break; | |
318 | } | |
319 | default: | |
320 | break; | |
321 | } | |
322 | } | |
323 | ||
324 | // 7) Dump all doubles into the register file, or to the scratch storage if the | |
325 | // destination virtual register is poisoned. | |
326 | if (haveFPRs) { | |
327 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
328 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
329 | if (recovery.technique() != InFPR) | |
330 | continue; | |
331 | if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { | |
332 | m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex); | |
333 | m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; | |
334 | currentPoisonIndex++; | |
335 | } else | |
336 | m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); | |
337 | } | |
338 | } | |
339 | ||
340 | // At this point all GPRs are available for scratch use. | |
341 | ||
342 | ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters); | |
343 | ||
344 | // 8) Reshuffle displaced virtual registers. Optimize for the case that | |
345 | // the number of displaced virtual registers is not more than the number | |
346 | // of available physical registers. | |
347 | ||
348 | if (numberOfDisplacedVirtualRegisters) { | |
349 | if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) { | |
350 | // So far this appears to be the case that triggers all the time, but | |
351 | // that is far from guaranteed. | |
352 | ||
353 | unsigned displacementIndex = 0; | |
354 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
355 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
356 | switch (recovery.technique()) { | |
357 | case DisplacedInRegisterFile: | |
358 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
359 | m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
360 | break; | |
361 | case Int32DisplacedInRegisterFile: | |
362 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
363 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++)); | |
364 | break; | |
365 | case CellDisplacedInRegisterFile: | |
366 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
367 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++)); | |
368 | break; | |
369 | case BooleanDisplacedInRegisterFile: | |
370 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
371 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++)); | |
372 | break; | |
373 | default: | |
374 | break; | |
375 | } | |
376 | } | |
377 | ||
378 | displacementIndex = 0; | |
379 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
380 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
381 | switch (recovery.technique()) { | |
382 | case DisplacedInRegisterFile: | |
383 | case Int32DisplacedInRegisterFile: | |
384 | case CellDisplacedInRegisterFile: | |
385 | case BooleanDisplacedInRegisterFile: | |
386 | m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
387 | m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
388 | break; | |
389 | default: | |
390 | break; | |
391 | } | |
392 | } | |
393 | } else { | |
394 | // FIXME: This should use the shuffling algorithm that we use | |
395 | // for speculative->non-speculative jumps, if we ever discover that | |
396 | // some hot code with lots of live values that get displaced and | |
397 | // spilled really enjoys frequently failing speculation. | |
398 | ||
399 | // For now this code is engineered to be correct but probably not | |
400 | // super. In particular, it correctly handles cases where for example | |
401 | // the displacements are a permutation of the destination values, like | |
402 | // | |
403 | // 1 -> 2 | |
404 | // 2 -> 1 | |
405 | // | |
406 | // It accomplishes this by simply lifting all of the virtual registers | |
407 | // from their old (DFG JIT) locations and dropping them in a scratch | |
408 | // location in memory, and then transferring from that scratch location | |
409 | // to their new (old JIT) locations. | |
410 | ||
411 | unsigned scratchIndex = numberOfPoisonedVirtualRegisters; | |
412 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
413 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
414 | switch (recovery.technique()) { | |
415 | case DisplacedInRegisterFile: | |
416 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); | |
417 | m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1); | |
418 | m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
419 | m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
420 | scratchIndex++; | |
421 | break; | |
422 | case Int32DisplacedInRegisterFile: | |
423 | case CellDisplacedInRegisterFile: | |
424 | case BooleanDisplacedInRegisterFile: | |
425 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); | |
426 | m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
427 | break; | |
428 | default: | |
429 | break; | |
430 | } | |
431 | } | |
432 | ||
433 | scratchIndex = numberOfPoisonedVirtualRegisters; | |
434 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
435 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
436 | switch (recovery.technique()) { | |
437 | case DisplacedInRegisterFile: | |
438 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
439 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); | |
440 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
441 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
442 | scratchIndex++; | |
443 | break; | |
444 | case Int32DisplacedInRegisterFile: | |
445 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
446 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
447 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
448 | break; | |
449 | case CellDisplacedInRegisterFile: | |
450 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
451 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
452 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
453 | break; | |
454 | case BooleanDisplacedInRegisterFile: | |
455 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
456 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
457 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
458 | break; | |
459 | default: | |
460 | break; | |
461 | } | |
462 | } | |
463 | ||
464 | ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters); | |
465 | } | |
466 | } | |
467 | ||
468 | // 9) Dump all poisoned virtual registers. | |
469 | ||
470 | if (numberOfPoisonedVirtualRegisters) { | |
471 | for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) { | |
472 | if (!poisonedVirtualRegisters[virtualRegister]) | |
473 | continue; | |
474 | ||
475 | const ValueRecovery& recovery = exit.m_variables[virtualRegister]; | |
476 | switch (recovery.technique()) { | |
477 | case InGPR: | |
478 | case UnboxedInt32InGPR: | |
479 | case UnboxedBooleanInGPR: { | |
480 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
481 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister)); | |
482 | uint32_t tag = JSValue::EmptyValueTag; | |
483 | if (recovery.technique() == InGPR) | |
484 | tag = JSValue::CellTag; | |
485 | else if (recovery.technique() == UnboxedInt32InGPR) | |
486 | tag = JSValue::Int32Tag; | |
487 | else | |
488 | tag = JSValue::BooleanTag; | |
489 | m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)virtualRegister)); | |
490 | break; | |
491 | } | |
492 | ||
493 | case InFPR: | |
494 | case InPair: | |
495 | case UInt32InGPR: | |
496 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
497 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); | |
498 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister)); | |
499 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister)); | |
500 | break; | |
501 | ||
502 | default: | |
503 | break; | |
504 | } | |
505 | } | |
506 | } | |
507 | ||
508 | // 10) Dump all constants. Optimize for Undefined, since that's a constant we see | |
509 | // often. | |
510 | ||
511 | if (haveConstants) { | |
512 | if (haveUndefined) { | |
513 | m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0); | |
514 | m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1); | |
515 | } | |
516 | ||
517 | for (int index = 0; index < exit.numberOfRecoveries(); ++index) { | |
518 | const ValueRecovery& recovery = exit.valueRecovery(index); | |
519 | if (recovery.technique() != Constant) | |
520 | continue; | |
521 | if (recovery.constant().isUndefined()) { | |
522 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
523 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
524 | } else { | |
525 | m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); | |
526 | m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); | |
527 | } | |
528 | } | |
529 | } | |
530 | ||
531 | // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know | |
532 | // that all new calls into this code will go to the new JIT, so the execute | |
533 | // counter only affects call frames that performed OSR exit and call frames | |
534 | // that were still executing the old JIT at the time of another call frame's | |
535 | // OSR exit. We want to ensure that the following is true: | |
536 | // | |
537 | // (a) Code the performs an OSR exit gets a chance to reenter optimized | |
538 | // code eventually, since optimized code is faster. But we don't | |
539 | // want to do such reentery too aggressively (see (c) below). | |
540 | // | |
541 | // (b) If there is code on the call stack that is still running the old | |
542 | // JIT's code and has never OSR'd, then it should get a chance to | |
543 | // perform OSR entry despite the fact that we've exited. | |
544 | // | |
545 | // (c) Code the performs an OSR exit should not immediately retry OSR | |
546 | // entry, since both forms of OSR are expensive. OSR entry is | |
547 | // particularly expensive. | |
548 | // | |
549 | // (d) Frequent OSR failures, even those that do not result in the code | |
550 | // running in a hot loop, result in recompilation getting triggered. | |
551 | // | |
552 | // To ensure (c), we'd like to set the execute counter to | |
553 | // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger | |
554 | // (a) and (b), since then every OSR exit would delay the opportunity for | |
555 | // every call frame to perform OSR entry. Essentially, if OSR exit happens | |
556 | // frequently and the function has few loops, then the counter will never | |
557 | // become non-negative and OSR entry will never be triggered. OSR entry | |
558 | // will only happen if a loop gets hot in the old JIT, which does a pretty | |
559 | // good job of ensuring (a) and (b). But that doesn't take care of (d), | |
560 | // since each speculation failure would reset the execute counter. | |
561 | // So we check here if the number of speculation failures is significantly | |
562 | // larger than the number of successes (we want 90% success rate), and if | |
563 | // there have been a large enough number of failures. If so, we set the | |
564 | // counter to 0; otherwise we set the counter to | |
565 | // counterValueForOptimizeAfterWarmUp(). | |
566 | ||
567 | handleExitCounts(exit); | |
568 | ||
569 | // 12) Load the result of the last bytecode operation into regT0. | |
570 | ||
571 | if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) { | |
572 | m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); | |
573 | m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2); | |
574 | } | |
575 | ||
576 | // 13) Fix call frame (s). | |
577 | ||
578 | ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT); | |
579 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock)); | |
580 | ||
581 | for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { | |
582 | InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; | |
583 | CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); | |
584 | CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller); | |
585 | Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller); | |
586 | unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call); | |
587 | BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), returnBytecodeIndex); | |
588 | ||
589 | ASSERT(mapping); | |
590 | ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex); | |
591 | ||
592 | void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset); | |
593 | ||
594 | GPRReg callerFrameGPR; | |
595 | if (inlineCallFrame->caller.inlineCallFrame) { | |
596 | m_jit.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); | |
597 | callerFrameGPR = GPRInfo::regT3; | |
598 | } else | |
599 | callerFrameGPR = GPRInfo::callFrameRegister; | |
600 | ||
601 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock))); | |
602 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); | |
603 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); | |
604 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); | |
605 | m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); | |
606 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC))); | |
607 | m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount))); | |
608 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); | |
609 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); | |
610 | } | |
611 | ||
612 | if (exit.m_codeOrigin.inlineCallFrame) | |
613 | m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); | |
614 | ||
615 | // 14) Jump into the corresponding baseline JIT code. | |
616 | ||
617 | CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin); | |
618 | Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock); | |
619 | ||
620 | BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex); | |
621 | ||
622 | ASSERT(mapping); | |
623 | ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); | |
624 | ||
625 | void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset); | |
626 | ||
627 | ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2); | |
628 | ||
629 | m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); | |
630 | m_jit.jump(GPRInfo::regT2); | |
631 | ||
632 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
633 | dataLog(" -> %p\n", jumpTarget); | |
634 | #endif | |
635 | } | |
636 | ||
637 | } } // namespace JSC::DFG | |
638 | ||
639 | #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64) |