]>
Commit | Line | Data |
---|---|---|
6fe7ccc8 A |
1 | /* |
2 | * Copyright (C) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "DFGOSRExitCompiler.h" | |
28 | ||
29 | #if ENABLE(DFG_JIT) && USE(JSVALUE32_64) | |
30 | ||
31 | #include "DFGOperations.h" | |
93a37866 A |
32 | #include "Operations.h" |
33 | #include <wtf/DataLog.h> | |
6fe7ccc8 A |
34 | |
35 | namespace JSC { namespace DFG { | |
36 | ||
93a37866 | 37 | void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) |
6fe7ccc8 A |
38 | { |
39 | // 1) Pro-forma stuff. | |
40 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
93a37866 | 41 | dataLogF("OSR exit ("); |
6fe7ccc8 | 42 | for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { |
93a37866 | 43 | dataLogF("bc#%u", codeOrigin.bytecodeIndex); |
6fe7ccc8 A |
44 | if (!codeOrigin.inlineCallFrame) |
45 | break; | |
93a37866 | 46 | dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); |
6fe7ccc8 | 47 | } |
93a37866 A |
48 | dataLogF(") at JIT offset 0x%x ", m_jit.debugOffset()); |
49 | dumpOperands(operands, WTF::dataFile()); | |
6fe7ccc8 | 50 | #endif |
6fe7ccc8 | 51 | |
93a37866 A |
52 | if (Options::printEachOSRExit()) { |
53 | SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; | |
54 | debugInfo->codeBlock = m_jit.codeBlock(); | |
55 | ||
56 | m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); | |
57 | } | |
6fe7ccc8 A |
58 | |
59 | #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE) | |
60 | m_jit.breakpoint(); | |
61 | #endif | |
62 | ||
63 | #if DFG_ENABLE(SUCCESS_STATS) | |
64 | static SamplingCounter counter("SpeculationFailure"); | |
65 | m_jit.emitCount(counter); | |
66 | #endif | |
67 | ||
68 | // 2) Perform speculation recovery. This only comes into play when an operation | |
69 | // starts mutating state before verifying the speculation it has already made. | |
70 | ||
71 | if (recovery) { | |
72 | switch (recovery->type()) { | |
73 | case SpeculativeAdd: | |
74 | m_jit.sub32(recovery->src(), recovery->dest()); | |
75 | break; | |
76 | ||
77 | case BooleanSpeculationCheck: | |
78 | break; | |
79 | ||
80 | default: | |
81 | break; | |
82 | } | |
83 | } | |
84 | ||
85 | // 3) Refine some value profile, if appropriate. | |
86 | ||
93a37866 A |
87 | if (!!exit.m_jsValueSource) { |
88 | if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { | |
89 | // If the instruction that this originated from has an array profile, then | |
90 | // refine it. If it doesn't, then do nothing. The latter could happen for | |
91 | // hoisted checks, or checks emitted for operations that didn't have array | |
92 | // profiling - either ops that aren't array accesses at all, or weren't | |
93 | // known to be array acceses in the bytecode. The latter case is a FIXME | |
94 | // while the former case is an outcome of a CheckStructure not knowing why | |
95 | // it was emitted (could be either due to an inline cache of a property | |
96 | // property access, or due to an array profile). | |
97 | ||
98 | // Note: We are free to assume that the jsValueSource is already known to | |
99 | // be a cell since both BadCache and BadIndexingType exits occur after | |
100 | // the cell check would have already happened. | |
101 | ||
102 | CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; | |
103 | if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { | |
104 | GPRReg usedRegister1; | |
105 | GPRReg usedRegister2; | |
106 | if (exit.m_jsValueSource.isAddress()) { | |
107 | usedRegister1 = exit.m_jsValueSource.base(); | |
108 | usedRegister2 = InvalidGPRReg; | |
109 | } else { | |
110 | usedRegister1 = exit.m_jsValueSource.payloadGPR(); | |
111 | if (exit.m_jsValueSource.hasKnownTag()) | |
112 | usedRegister2 = InvalidGPRReg; | |
113 | else | |
114 | usedRegister2 = exit.m_jsValueSource.tagGPR(); | |
115 | } | |
116 | ||
117 | GPRReg scratch1; | |
118 | GPRReg scratch2; | |
119 | scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2); | |
120 | scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1); | |
121 | ||
122 | #if CPU(ARM64) | |
123 | m_jit.pushToSave(scratch1); | |
124 | m_jit.pushToSave(scratch2); | |
125 | #else | |
126 | m_jit.push(scratch1); | |
127 | m_jit.push(scratch2); | |
128 | #endif | |
129 | ||
130 | GPRReg value; | |
131 | if (exit.m_jsValueSource.isAddress()) { | |
132 | value = scratch1; | |
133 | m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value); | |
134 | } else | |
135 | value = exit.m_jsValueSource.payloadGPR(); | |
136 | ||
137 | m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); | |
138 | m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); | |
139 | m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); | |
140 | m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); | |
141 | m_jit.lshift32(scratch1, scratch2); | |
142 | m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); | |
143 | ||
144 | #if CPU(ARM64) | |
145 | m_jit.popToRestore(scratch2); | |
146 | m_jit.popToRestore(scratch1); | |
147 | #else | |
148 | m_jit.pop(scratch2); | |
149 | m_jit.pop(scratch1); | |
150 | #endif | |
151 | } | |
152 | } | |
6fe7ccc8 | 153 | |
93a37866 A |
154 | if (!!exit.m_valueProfile) { |
155 | EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0); | |
156 | ||
157 | if (exit.m_jsValueSource.isAddress()) { | |
158 | // Save a register so we can use it. | |
159 | GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base()); | |
160 | ||
161 | #if CPU(ARM64) | |
162 | m_jit.pushToSave(scratch); | |
163 | #else | |
164 | m_jit.push(scratch); | |
165 | #endif | |
166 | ||
167 | m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch); | |
168 | m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
169 | m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch); | |
170 | m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
171 | ||
172 | #if CPU(ARM64) | |
173 | m_jit.popToRestore(scratch); | |
174 | #else | |
175 | m_jit.pop(scratch); | |
176 | #endif | |
177 | } else if (exit.m_jsValueSource.hasKnownTag()) { | |
178 | m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
179 | m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
180 | } else { | |
181 | m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); | |
182 | m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); | |
183 | } | |
6fe7ccc8 A |
184 | } |
185 | } | |
186 | ||
187 | // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR | |
188 | // whose destination is now occupied by a DFG virtual register, and we need | |
189 | // one for every displaced virtual register if there are more than | |
190 | // GPRInfo::numberOfRegisters of them. Also see if there are any constants, | |
191 | // any undefined slots, any FPR slots, and any unboxed ints. | |
192 | ||
93a37866 | 193 | Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals()); |
6fe7ccc8 A |
194 | for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) |
195 | poisonedVirtualRegisters[i] = false; | |
196 | ||
197 | unsigned numberOfPoisonedVirtualRegisters = 0; | |
198 | unsigned numberOfDisplacedVirtualRegisters = 0; | |
199 | ||
200 | // Booleans for fast checks. We expect that most OSR exits do not have to rebox | |
201 | // Int32s, have no FPRs, and have no constants. If there are constants, we | |
202 | // expect most of them to be jsUndefined(); if that's true then we handle that | |
203 | // specially to minimize code size and execution time. | |
93a37866 A |
204 | bool haveUnboxedInt32InJSStack = false; |
205 | bool haveUnboxedCellInJSStack = false; | |
206 | bool haveUnboxedBooleanInJSStack = false; | |
6fe7ccc8 A |
207 | bool haveUInt32s = false; |
208 | bool haveFPRs = false; | |
209 | bool haveConstants = false; | |
210 | bool haveUndefined = false; | |
93a37866 | 211 | bool haveArguments = false; |
6fe7ccc8 | 212 | |
93a37866 A |
213 | for (size_t index = 0; index < operands.size(); ++index) { |
214 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 215 | switch (recovery.technique()) { |
93a37866 A |
216 | case DisplacedInJSStack: |
217 | case Int32DisplacedInJSStack: | |
218 | case CellDisplacedInJSStack: | |
219 | case BooleanDisplacedInJSStack: | |
6fe7ccc8 A |
220 | numberOfDisplacedVirtualRegisters++; |
221 | ASSERT((int)recovery.virtualRegister() >= 0); | |
222 | ||
223 | // See if we might like to store to this virtual register before doing | |
224 | // virtual register shuffling. If so, we say that the virtual register | |
225 | // is poisoned: it cannot be stored to until after displaced virtual | |
226 | // registers are handled. We track poisoned virtual register carefully | |
227 | // to ensure this happens efficiently. Note that we expect this case | |
228 | // to be rare, so the handling of it is optimized for the cases in | |
229 | // which it does not happen. | |
93a37866 A |
230 | if (recovery.virtualRegister() < (int)operands.numberOfLocals()) { |
231 | switch (operands.local(recovery.virtualRegister()).technique()) { | |
6fe7ccc8 A |
232 | case InGPR: |
233 | case UnboxedInt32InGPR: | |
234 | case UnboxedBooleanInGPR: | |
235 | case UInt32InGPR: | |
236 | case InPair: | |
237 | case InFPR: | |
238 | if (!poisonedVirtualRegisters[recovery.virtualRegister()]) { | |
239 | poisonedVirtualRegisters[recovery.virtualRegister()] = true; | |
240 | numberOfPoisonedVirtualRegisters++; | |
241 | } | |
242 | break; | |
243 | default: | |
244 | break; | |
245 | } | |
246 | } | |
247 | break; | |
248 | ||
249 | case UInt32InGPR: | |
250 | haveUInt32s = true; | |
251 | break; | |
252 | ||
93a37866 A |
253 | case AlreadyInJSStackAsUnboxedInt32: |
254 | haveUnboxedInt32InJSStack = true; | |
6fe7ccc8 A |
255 | break; |
256 | ||
93a37866 A |
257 | case AlreadyInJSStackAsUnboxedCell: |
258 | haveUnboxedCellInJSStack = true; | |
6fe7ccc8 A |
259 | break; |
260 | ||
93a37866 A |
261 | case AlreadyInJSStackAsUnboxedBoolean: |
262 | haveUnboxedBooleanInJSStack = true; | |
6fe7ccc8 A |
263 | break; |
264 | ||
265 | case InFPR: | |
266 | haveFPRs = true; | |
267 | break; | |
268 | ||
269 | case Constant: | |
270 | haveConstants = true; | |
271 | if (recovery.constant().isUndefined()) | |
272 | haveUndefined = true; | |
273 | break; | |
274 | ||
93a37866 A |
275 | case ArgumentsThatWereNotCreated: |
276 | haveArguments = true; | |
277 | break; | |
278 | ||
6fe7ccc8 A |
279 | default: |
280 | break; | |
281 | } | |
282 | } | |
283 | ||
284 | unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters); | |
93a37866 | 285 | ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0))); |
6fe7ccc8 A |
286 | EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; |
287 | ||
288 | // From here on, the code assumes that it is profitable to maximize the distance | |
289 | // between when something is computed and when it is stored. | |
290 | ||
291 | // 5) Perform all reboxing of integers and cells, except for those in registers. | |
292 | ||
93a37866 A |
293 | if (haveUnboxedInt32InJSStack || haveUnboxedCellInJSStack || haveUnboxedBooleanInJSStack) { |
294 | for (size_t index = 0; index < operands.size(); ++index) { | |
295 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 296 | switch (recovery.technique()) { |
93a37866 A |
297 | case AlreadyInJSStackAsUnboxedInt32: |
298 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); | |
6fe7ccc8 A |
299 | break; |
300 | ||
93a37866 A |
301 | case AlreadyInJSStackAsUnboxedCell: |
302 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); | |
6fe7ccc8 A |
303 | break; |
304 | ||
93a37866 A |
305 | case AlreadyInJSStackAsUnboxedBoolean: |
306 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); | |
6fe7ccc8 A |
307 | break; |
308 | ||
309 | default: | |
310 | break; | |
311 | } | |
312 | } | |
313 | } | |
314 | ||
315 | // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage. | |
316 | // Note that GPRs do not have a fast change (like haveFPRs) because we expect that | |
317 | // most OSR failure points will have at least one GPR that needs to be dumped. | |
318 | ||
93a37866 | 319 | initializePoisoned(operands.numberOfLocals()); |
6fe7ccc8 A |
320 | unsigned currentPoisonIndex = 0; |
321 | ||
93a37866 A |
322 | for (size_t index = 0; index < operands.size(); ++index) { |
323 | const ValueRecovery& recovery = operands[index]; | |
324 | int operand = operands.operandForIndex(index); | |
6fe7ccc8 A |
325 | switch (recovery.technique()) { |
326 | case InGPR: | |
327 | case UnboxedInt32InGPR: | |
328 | case UnboxedBooleanInGPR: | |
93a37866 | 329 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
6fe7ccc8 | 330 | m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); |
93a37866 | 331 | m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; |
6fe7ccc8 A |
332 | currentPoisonIndex++; |
333 | } else { | |
334 | uint32_t tag = JSValue::EmptyValueTag; | |
335 | if (recovery.technique() == InGPR) | |
336 | tag = JSValue::CellTag; | |
337 | else if (recovery.technique() == UnboxedInt32InGPR) | |
338 | tag = JSValue::Int32Tag; | |
339 | else | |
340 | tag = JSValue::BooleanTag; | |
341 | m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
342 | m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
343 | } | |
344 | break; | |
345 | case InPair: | |
93a37866 | 346 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
6fe7ccc8 A |
347 | m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); |
348 | m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
93a37866 | 349 | m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; |
6fe7ccc8 A |
350 | currentPoisonIndex++; |
351 | } else { | |
352 | m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
353 | m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
354 | } | |
355 | break; | |
356 | case UInt32InGPR: { | |
357 | EncodedJSValue* myScratch = scratchDataBuffer + scratchBufferLengthBeforeUInt32s; | |
358 | ||
359 | GPRReg addressGPR = GPRInfo::regT0; | |
360 | if (addressGPR == recovery.gpr()) | |
361 | addressGPR = GPRInfo::regT1; | |
362 | ||
363 | m_jit.storePtr(addressGPR, myScratch); | |
364 | m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR); | |
365 | m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); | |
366 | ||
367 | AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0)); | |
368 | ||
369 | m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0); | |
370 | m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0); | |
93a37866 | 371 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
6fe7ccc8 A |
372 | m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR); |
373 | m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); | |
374 | } else | |
375 | m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand)); | |
376 | ||
377 | AssemblyHelpers::Jump done = m_jit.jump(); | |
378 | ||
379 | positive.link(&m_jit); | |
380 | ||
93a37866 | 381 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
6fe7ccc8 A |
382 | m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); |
383 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
384 | } else { | |
385 | m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand)); | |
386 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand)); | |
387 | } | |
388 | ||
389 | done.link(&m_jit); | |
390 | ||
391 | m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR); | |
392 | m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0); | |
393 | m_jit.loadPtr(myScratch, addressGPR); | |
394 | ||
93a37866 A |
395 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
396 | m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; | |
6fe7ccc8 A |
397 | currentPoisonIndex++; |
398 | } | |
399 | break; | |
400 | } | |
401 | default: | |
402 | break; | |
403 | } | |
404 | } | |
405 | ||
93a37866 | 406 | // 7) Dump all doubles into the stack, or to the scratch storage if the |
6fe7ccc8 A |
407 | // destination virtual register is poisoned. |
408 | if (haveFPRs) { | |
93a37866 A |
409 | for (size_t index = 0; index < operands.size(); ++index) { |
410 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 A |
411 | if (recovery.technique() != InFPR) |
412 | continue; | |
93a37866 | 413 | if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { |
6fe7ccc8 | 414 | m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex); |
93a37866 | 415 | m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; |
6fe7ccc8 A |
416 | currentPoisonIndex++; |
417 | } else | |
93a37866 | 418 | m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); |
6fe7ccc8 A |
419 | } |
420 | } | |
421 | ||
422 | // At this point all GPRs are available for scratch use. | |
423 | ||
424 | ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters); | |
425 | ||
426 | // 8) Reshuffle displaced virtual registers. Optimize for the case that | |
427 | // the number of displaced virtual registers is not more than the number | |
428 | // of available physical registers. | |
429 | ||
430 | if (numberOfDisplacedVirtualRegisters) { | |
431 | if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) { | |
432 | // So far this appears to be the case that triggers all the time, but | |
433 | // that is far from guaranteed. | |
434 | ||
435 | unsigned displacementIndex = 0; | |
93a37866 A |
436 | for (size_t index = 0; index < operands.size(); ++index) { |
437 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 438 | switch (recovery.technique()) { |
93a37866 | 439 | case DisplacedInJSStack: |
6fe7ccc8 A |
440 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); |
441 | m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); | |
442 | break; | |
93a37866 | 443 | case Int32DisplacedInJSStack: |
6fe7ccc8 A |
444 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); |
445 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++)); | |
446 | break; | |
93a37866 | 447 | case CellDisplacedInJSStack: |
6fe7ccc8 A |
448 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); |
449 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++)); | |
450 | break; | |
93a37866 | 451 | case BooleanDisplacedInJSStack: |
6fe7ccc8 A |
452 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); |
453 | m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++)); | |
454 | break; | |
455 | default: | |
456 | break; | |
457 | } | |
458 | } | |
459 | ||
460 | displacementIndex = 0; | |
93a37866 A |
461 | for (size_t index = 0; index < operands.size(); ++index) { |
462 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 463 | switch (recovery.technique()) { |
93a37866 A |
464 | case DisplacedInJSStack: |
465 | case Int32DisplacedInJSStack: | |
466 | case CellDisplacedInJSStack: | |
467 | case BooleanDisplacedInJSStack: | |
468 | m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); | |
469 | m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 A |
470 | break; |
471 | default: | |
472 | break; | |
473 | } | |
474 | } | |
475 | } else { | |
476 | // FIXME: This should use the shuffling algorithm that we use | |
477 | // for speculative->non-speculative jumps, if we ever discover that | |
478 | // some hot code with lots of live values that get displaced and | |
479 | // spilled really enjoys frequently failing speculation. | |
480 | ||
481 | // For now this code is engineered to be correct but probably not | |
482 | // super. In particular, it correctly handles cases where for example | |
483 | // the displacements are a permutation of the destination values, like | |
484 | // | |
485 | // 1 -> 2 | |
486 | // 2 -> 1 | |
487 | // | |
488 | // It accomplishes this by simply lifting all of the virtual registers | |
489 | // from their old (DFG JIT) locations and dropping them in a scratch | |
490 | // location in memory, and then transferring from that scratch location | |
491 | // to their new (old JIT) locations. | |
492 | ||
493 | unsigned scratchIndex = numberOfPoisonedVirtualRegisters; | |
93a37866 A |
494 | for (size_t index = 0; index < operands.size(); ++index) { |
495 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 496 | switch (recovery.technique()) { |
93a37866 | 497 | case DisplacedInJSStack: |
6fe7ccc8 A |
498 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); |
499 | m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1); | |
500 | m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
501 | m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
502 | scratchIndex++; | |
503 | break; | |
93a37866 A |
504 | case Int32DisplacedInJSStack: |
505 | case CellDisplacedInJSStack: | |
506 | case BooleanDisplacedInJSStack: | |
6fe7ccc8 A |
507 | m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); |
508 | m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
509 | break; | |
510 | default: | |
511 | break; | |
512 | } | |
513 | } | |
514 | ||
515 | scratchIndex = numberOfPoisonedVirtualRegisters; | |
93a37866 A |
516 | for (size_t index = 0; index < operands.size(); ++index) { |
517 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 | 518 | switch (recovery.technique()) { |
93a37866 | 519 | case DisplacedInJSStack: |
6fe7ccc8 A |
520 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); |
521 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); | |
93a37866 A |
522 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); |
523 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 A |
524 | scratchIndex++; |
525 | break; | |
93a37866 | 526 | case Int32DisplacedInJSStack: |
6fe7ccc8 | 527 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); |
93a37866 A |
528 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); |
529 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 | 530 | break; |
93a37866 | 531 | case CellDisplacedInJSStack: |
6fe7ccc8 | 532 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); |
93a37866 A |
533 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); |
534 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 | 535 | break; |
93a37866 | 536 | case BooleanDisplacedInJSStack: |
6fe7ccc8 | 537 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); |
93a37866 A |
538 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); |
539 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 A |
540 | break; |
541 | default: | |
542 | break; | |
543 | } | |
544 | } | |
545 | ||
546 | ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters); | |
547 | } | |
548 | } | |
549 | ||
550 | // 9) Dump all poisoned virtual registers. | |
551 | ||
552 | if (numberOfPoisonedVirtualRegisters) { | |
93a37866 | 553 | for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) { |
6fe7ccc8 A |
554 | if (!poisonedVirtualRegisters[virtualRegister]) |
555 | continue; | |
556 | ||
93a37866 | 557 | const ValueRecovery& recovery = operands.local(virtualRegister); |
6fe7ccc8 A |
558 | switch (recovery.technique()) { |
559 | case InGPR: | |
560 | case UnboxedInt32InGPR: | |
561 | case UnboxedBooleanInGPR: { | |
562 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
563 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister)); | |
564 | uint32_t tag = JSValue::EmptyValueTag; | |
565 | if (recovery.technique() == InGPR) | |
566 | tag = JSValue::CellTag; | |
567 | else if (recovery.technique() == UnboxedInt32InGPR) | |
568 | tag = JSValue::Int32Tag; | |
569 | else | |
570 | tag = JSValue::BooleanTag; | |
571 | m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)virtualRegister)); | |
572 | break; | |
573 | } | |
574 | ||
575 | case InFPR: | |
576 | case InPair: | |
577 | case UInt32InGPR: | |
578 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); | |
579 | m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); | |
580 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister)); | |
581 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister)); | |
582 | break; | |
583 | ||
584 | default: | |
585 | break; | |
586 | } | |
587 | } | |
588 | } | |
589 | ||
590 | // 10) Dump all constants. Optimize for Undefined, since that's a constant we see | |
591 | // often. | |
592 | ||
593 | if (haveConstants) { | |
594 | if (haveUndefined) { | |
595 | m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0); | |
596 | m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1); | |
597 | } | |
598 | ||
93a37866 A |
599 | for (size_t index = 0; index < operands.size(); ++index) { |
600 | const ValueRecovery& recovery = operands[index]; | |
6fe7ccc8 A |
601 | if (recovery.technique() != Constant) |
602 | continue; | |
603 | if (recovery.constant().isUndefined()) { | |
93a37866 A |
604 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); |
605 | m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 | 606 | } else { |
93a37866 A |
607 | m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); |
608 | m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); | |
6fe7ccc8 A |
609 | } |
610 | } | |
611 | } | |
612 | ||
93a37866 | 613 | // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know |
6fe7ccc8 A |
614 | // that all new calls into this code will go to the new JIT, so the execute |
615 | // counter only affects call frames that performed OSR exit and call frames | |
616 | // that were still executing the old JIT at the time of another call frame's | |
617 | // OSR exit. We want to ensure that the following is true: | |
618 | // | |
619 | // (a) Code the performs an OSR exit gets a chance to reenter optimized | |
620 | // code eventually, since optimized code is faster. But we don't | |
621 | // want to do such reentery too aggressively (see (c) below). | |
622 | // | |
623 | // (b) If there is code on the call stack that is still running the old | |
624 | // JIT's code and has never OSR'd, then it should get a chance to | |
625 | // perform OSR entry despite the fact that we've exited. | |
626 | // | |
627 | // (c) Code the performs an OSR exit should not immediately retry OSR | |
628 | // entry, since both forms of OSR are expensive. OSR entry is | |
629 | // particularly expensive. | |
630 | // | |
631 | // (d) Frequent OSR failures, even those that do not result in the code | |
632 | // running in a hot loop, result in recompilation getting triggered. | |
633 | // | |
634 | // To ensure (c), we'd like to set the execute counter to | |
635 | // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger | |
636 | // (a) and (b), since then every OSR exit would delay the opportunity for | |
637 | // every call frame to perform OSR entry. Essentially, if OSR exit happens | |
638 | // frequently and the function has few loops, then the counter will never | |
639 | // become non-negative and OSR entry will never be triggered. OSR entry | |
640 | // will only happen if a loop gets hot in the old JIT, which does a pretty | |
641 | // good job of ensuring (a) and (b). But that doesn't take care of (d), | |
642 | // since each speculation failure would reset the execute counter. | |
643 | // So we check here if the number of speculation failures is significantly | |
644 | // larger than the number of successes (we want 90% success rate), and if | |
645 | // there have been a large enough number of failures. If so, we set the | |
646 | // counter to 0; otherwise we set the counter to | |
647 | // counterValueForOptimizeAfterWarmUp(). | |
648 | ||
649 | handleExitCounts(exit); | |
650 | ||
93a37866 | 651 | // 13) Reify inlined call frames. |
6fe7ccc8 A |
652 | |
653 | ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT); | |
93a37866 | 654 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); |
6fe7ccc8 A |
655 | |
656 | for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { | |
657 | InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; | |
658 | CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); | |
659 | CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller); | |
660 | Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller); | |
661 | unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call); | |
93a37866 | 662 | BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); |
6fe7ccc8 A |
663 | |
664 | ASSERT(mapping); | |
665 | ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex); | |
666 | ||
667 | void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset); | |
668 | ||
669 | GPRReg callerFrameGPR; | |
670 | if (inlineCallFrame->caller.inlineCallFrame) { | |
671 | m_jit.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); | |
672 | callerFrameGPR = GPRInfo::regT3; | |
673 | } else | |
674 | callerFrameGPR = GPRInfo::callFrameRegister; | |
675 | ||
93a37866 A |
676 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); |
677 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); | |
678 | if (!inlineCallFrame->isClosureCall()) | |
679 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); | |
680 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); | |
681 | m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); | |
682 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC))); | |
683 | m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); | |
684 | m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); | |
685 | if (!inlineCallFrame->isClosureCall()) | |
686 | m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); | |
687 | } | |
688 | ||
689 | // 14) Create arguments if necessary and place them into the appropriate aliased | |
690 | // registers. | |
691 | ||
692 | if (haveArguments) { | |
693 | HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash, | |
694 | NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject; | |
695 | ||
696 | for (size_t index = 0; index < operands.size(); ++index) { | |
697 | const ValueRecovery& recovery = operands[index]; | |
698 | if (recovery.technique() != ArgumentsThatWereNotCreated) | |
699 | continue; | |
700 | int operand = operands.operandForIndex(index); | |
701 | // Find the right inline call frame. | |
702 | InlineCallFrame* inlineCallFrame = 0; | |
703 | for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; | |
704 | current; | |
705 | current = current->caller.inlineCallFrame) { | |
706 | if (current->stackOffset <= operand) { | |
707 | inlineCallFrame = current; | |
708 | break; | |
709 | } | |
710 | } | |
711 | ||
712 | if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) | |
713 | continue; | |
714 | int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame); | |
715 | if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { | |
716 | // We know this call frame optimized out an arguments object that | |
717 | // the baseline JIT would have created. Do that creation now. | |
718 | if (inlineCallFrame) { | |
719 | m_jit.setupArgumentsWithExecState( | |
720 | AssemblyHelpers::TrustedImmPtr(inlineCallFrame)); | |
721 | m_jit.move( | |
722 | AssemblyHelpers::TrustedImmPtr( | |
723 | bitwise_cast<void*>(operationCreateInlinedArguments)), | |
724 | GPRInfo::nonArgGPR0); | |
725 | } else { | |
726 | m_jit.setupArgumentsExecState(); | |
727 | m_jit.move( | |
728 | AssemblyHelpers::TrustedImmPtr( | |
729 | bitwise_cast<void*>(operationCreateArguments)), | |
730 | GPRInfo::nonArgGPR0); | |
731 | } | |
732 | m_jit.call(GPRInfo::nonArgGPR0); | |
733 | m_jit.store32( | |
734 | AssemblyHelpers::TrustedImm32(JSValue::CellTag), | |
735 | AssemblyHelpers::tagFor(argumentsRegister)); | |
736 | m_jit.store32( | |
737 | GPRInfo::returnValueGPR, | |
738 | AssemblyHelpers::payloadFor(argumentsRegister)); | |
739 | m_jit.store32( | |
740 | AssemblyHelpers::TrustedImm32(JSValue::CellTag), | |
741 | AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister))); | |
742 | m_jit.store32( | |
743 | GPRInfo::returnValueGPR, | |
744 | AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister))); | |
745 | m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. | |
746 | } | |
747 | ||
748 | m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0); | |
749 | m_jit.store32( | |
750 | AssemblyHelpers::TrustedImm32(JSValue::CellTag), | |
751 | AssemblyHelpers::tagFor(operand)); | |
752 | m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand)); | |
753 | } | |
6fe7ccc8 A |
754 | } |
755 | ||
93a37866 A |
756 | // 15) Load the result of the last bytecode operation into regT0. |
757 | ||
758 | if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) { | |
759 | m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); | |
760 | m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2); | |
761 | } | |
762 | ||
763 | // 16) Adjust the call frame pointer. | |
764 | ||
6fe7ccc8 A |
765 | if (exit.m_codeOrigin.inlineCallFrame) |
766 | m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); | |
767 | ||
93a37866 | 768 | // 17) Jump into the corresponding baseline JIT code. |
6fe7ccc8 A |
769 | |
770 | CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin); | |
771 | Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock); | |
772 | ||
93a37866 | 773 | BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); |
6fe7ccc8 A |
774 | |
775 | ASSERT(mapping); | |
776 | ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); | |
777 | ||
778 | void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset); | |
779 | ||
780 | ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2); | |
781 | ||
782 | m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); | |
783 | m_jit.jump(GPRInfo::regT2); | |
784 | ||
785 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
93a37866 | 786 | dataLogF(" -> %p\n", jumpTarget); |
6fe7ccc8 A |
787 | #endif |
788 | } | |
789 | ||
790 | } } // namespace JSC::DFG | |
791 | ||
792 | #endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64) |