]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGJITCompiler.cpp
cf35be6b6cddab04ce890a9c65641163ca02ec2e
[apple/javascriptcore.git] / dfg / DFGJITCompiler.cpp
1 /*
2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArityCheckFailReturnThunks.h"
32 #include "CodeBlock.h"
33 #include "DFGFailedFinalizer.h"
34 #include "DFGInlineCacheWrapperInlines.h"
35 #include "DFGJITCode.h"
36 #include "DFGJITFinalizer.h"
37 #include "DFGOSRExitCompiler.h"
38 #include "DFGOperations.h"
39 #include "DFGRegisterBank.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSpeculativeJIT.h"
42 #include "DFGThunks.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "JSCInlines.h"
47 #include "VM.h"
48
49 namespace JSC { namespace DFG {
50
51 JITCompiler::JITCompiler(Graph& dfg)
52 : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
53 , m_graph(dfg)
54 , m_jitCode(adoptRef(new JITCode()))
55 , m_blockHeads(dfg.numBlocks())
56 {
57 if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
58 m_disassembler = std::make_unique<Disassembler>(dfg);
59 }
60
61 JITCompiler::~JITCompiler()
62 {
63 }
64
65 void JITCompiler::linkOSRExits()
66 {
67 ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
68 if (m_graph.compilation()) {
69 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
70 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
71 Vector<Label> labels;
72 if (!info.m_failureJumps.empty()) {
73 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
74 labels.append(info.m_failureJumps.jumps()[j].label());
75 } else
76 labels.append(info.m_replacementSource);
77 m_exitSiteLabels.append(labels);
78 }
79 }
80
81 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
82 OSRExit& exit = m_jitCode->osrExit[i];
83 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
84 JumpList& failureJumps = info.m_failureJumps;
85 if (!failureJumps.empty())
86 failureJumps.link(this);
87 else
88 info.m_replacementDestination = label();
89 jitAssertHasValidCallFrame();
90 store32(TrustedImm32(i), &vm()->osrExitIndex);
91 exit.setPatchableCodeOffset(patchableJump());
92 }
93 }
94
95 void JITCompiler::compileEntry()
96 {
97 // This code currently matches the old JIT. In the function header we need to
98 // save return address and call frame via the prologue and perform a fast stack check.
99 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
100 // We'll need to convert the remaining cti_ style calls (specifically the stack
101 // check) which will be dependent on stack layout. (We'd need to account for this in
102 // both normal return code and when jumping to an exception handler).
103 emitFunctionPrologue();
104 emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
105 jitAssertTagsInPlace();
106 }
107
108 void JITCompiler::compileBody()
109 {
110 // We generate the speculative code path, followed by OSR exit code to return
111 // to the old JIT code if speculations fail.
112
113 bool compiledSpeculative = m_speculative->compile();
114 ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
115 }
116
117 void JITCompiler::compileExceptionHandlers()
118 {
119 if (!m_exceptionChecksWithCallFrameRollback.empty()) {
120 m_exceptionChecksWithCallFrameRollback.link(this);
121
122 // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
123 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
124 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
125 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
126
127 #if CPU(X86)
128 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
129 poke(GPRInfo::argumentGPR0);
130 poke(GPRInfo::argumentGPR1, 1);
131 #endif
132 m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
133
134 jumpToExceptionHandler();
135 }
136
137 if (!m_exceptionChecks.empty()) {
138 m_exceptionChecks.link(this);
139
140 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
141 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
142 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
143
144 #if CPU(X86)
145 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
146 poke(GPRInfo::argumentGPR0);
147 poke(GPRInfo::argumentGPR1, 1);
148 #endif
149 m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
150
151 jumpToExceptionHandler();
152 }
153 }
154
155 void JITCompiler::link(LinkBuffer& linkBuffer)
156 {
157 // Link the code, populate data in CodeBlock data structures.
158 m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
159 m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
160
161 if (!m_graph.m_plan.inlineCallFrames->isEmpty())
162 m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
163
164 #if USE(JSVALUE32_64)
165 m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
166 #endif
167
168 m_graph.registerFrozenValues();
169
170 BitVector usedJumpTables;
171 for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
172 SwitchData& data = **iter;
173 if (!data.didUseJumpTable)
174 continue;
175
176 if (data.kind == SwitchString)
177 continue;
178
179 RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
180
181 usedJumpTables.set(data.switchTableIndex);
182 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
183 table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
184 table.ctiOffsets.grow(table.branchOffsets.size());
185 for (unsigned j = table.ctiOffsets.size(); j--;)
186 table.ctiOffsets[j] = table.ctiDefault;
187 for (unsigned j = data.cases.size(); j--;) {
188 SwitchCase& myCase = data.cases[j];
189 table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
190 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
191 }
192 }
193
194 for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
195 if (usedJumpTables.get(i))
196 continue;
197
198 m_codeBlock->switchJumpTable(i).clear();
199 }
200
201 // NOTE: we cannot clear string switch tables because (1) we're running concurrently
202 // and we cannot deref StringImpl's and (2) it would be weird to deref those
203 // StringImpl's since we refer to them.
204 for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
205 SwitchData& data = **switchDataIter;
206 if (!data.didUseJumpTable)
207 continue;
208
209 if (data.kind != SwitchString)
210 continue;
211
212 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
213 table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
214 StringJumpTable::StringOffsetTable::iterator iter;
215 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
216 for (iter = table.offsetTable.begin(); iter != end; ++iter)
217 iter->value.ctiOffset = table.ctiDefault;
218 for (unsigned j = data.cases.size(); j--;) {
219 SwitchCase& myCase = data.cases[j];
220 iter = table.offsetTable.find(myCase.value.stringImpl());
221 RELEASE_ASSERT(iter != end);
222 iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
223 }
224 }
225
226 // Link all calls out from the JIT code to their respective functions.
227 for (unsigned i = 0; i < m_calls.size(); ++i)
228 linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
229
230 for (unsigned i = m_getByIds.size(); i--;)
231 m_getByIds[i].finalize(linkBuffer);
232 for (unsigned i = m_putByIds.size(); i--;)
233 m_putByIds[i].finalize(linkBuffer);
234
235 for (unsigned i = 0; i < m_ins.size(); ++i) {
236 StructureStubInfo& info = *m_ins[i].m_stubInfo;
237 CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
238 info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
239 info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
240 info.callReturnLocation = callReturnLocation;
241 info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
242 }
243
244 for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
245 JSCallRecord& record = m_jsCalls[i];
246 CallLinkInfo& info = *record.m_info;
247 ThunkGenerator generator = linkThunkGeneratorFor(
248 info.specializationKind(),
249 RegisterPreservationNotRequired);
250 linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
251 info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
252 linkBuffer.locationOf(record.m_targetToCheck),
253 linkBuffer.locationOfNearCall(record.m_fastCall));
254 }
255
256 MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
257 CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
258 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
259 OSRExit& exit = m_jitCode->osrExit[i];
260 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
261 linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
262 exit.correctJump(linkBuffer);
263 if (info.m_replacementSource.isSet()) {
264 m_jitCode->common.jumpReplacements.append(JumpReplacement(
265 linkBuffer.locationOf(info.m_replacementSource),
266 linkBuffer.locationOf(info.m_replacementDestination)));
267 }
268 }
269
270 if (m_graph.compilation()) {
271 ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
272 for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
273 Vector<Label>& labels = m_exitSiteLabels[i];
274 Vector<const void*> addresses;
275 for (unsigned j = 0; j < labels.size(); ++j)
276 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
277 m_graph.compilation()->addOSRExitSite(addresses);
278 }
279 } else
280 ASSERT(!m_exitSiteLabels.size());
281
282 m_jitCode->common.compilation = m_graph.compilation();
283
284 }
285
286 void JITCompiler::compile()
287 {
288 SamplingRegion samplingRegion("DFG Backend");
289
290 setStartOfCode();
291 compileEntry();
292 m_speculative = std::make_unique<SpeculativeJIT>(*this);
293
294 // Plant a check that sufficient space is available in the JSStack.
295 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
296 Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
297
298 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
299 checkStackPointerAlignment();
300 compileBody();
301 setEndOfMainPath();
302
303 // === Footer code generation ===
304 //
305 // Generate the stack overflow handling; if the stack check in the entry head fails,
306 // we need to call out to a helper function to throw the StackOverflowError.
307 stackOverflow.link(this);
308
309 emitStoreCodeOrigin(CodeOrigin(0));
310
311 if (maxFrameExtentForSlowPathCall)
312 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
313
314 m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
315
316 // Generate slow path code.
317 m_speculative->runSlowPathGenerators();
318
319 compileExceptionHandlers();
320 linkOSRExits();
321
322 // Create OSR entry trampolines if necessary.
323 m_speculative->createOSREntries();
324 setEndOfCode();
325
326 auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
327 if (linkBuffer->didFailToAllocate()) {
328 m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
329 return;
330 }
331
332 link(*linkBuffer);
333 m_speculative->linkOSREntries(*linkBuffer);
334
335 m_jitCode->shrinkToFit();
336 codeBlock()->shrinkToFit(CodeBlock::LateShrink);
337
338 disassemble(*linkBuffer);
339
340 m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
341 m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
342 }
343
344 void JITCompiler::compileFunction()
345 {
346 SamplingRegion samplingRegion("DFG Backend");
347
348 setStartOfCode();
349 compileEntry();
350
351 // === Function header code generation ===
352 // This is the main entry point, without performing an arity check.
353 // If we needed to perform an arity check we will already have moved the return address,
354 // so enter after this.
355 Label fromArityCheck(this);
356 // Plant a check that sufficient space is available in the JSStack.
357 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
358 Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
359
360 // Move the stack pointer down to accommodate locals
361 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
362 checkStackPointerAlignment();
363
364 // === Function body code generation ===
365 m_speculative = std::make_unique<SpeculativeJIT>(*this);
366 compileBody();
367 setEndOfMainPath();
368
369 // === Function footer code generation ===
370 //
371 // Generate code to perform the stack overflow handling (if the stack check in
372 // the function header fails), and generate the entry point with arity check.
373 //
374 // Generate the stack overflow handling; if the stack check in the function head fails,
375 // we need to call out to a helper function to throw the StackOverflowError.
376 stackOverflow.link(this);
377
378 emitStoreCodeOrigin(CodeOrigin(0));
379
380 if (maxFrameExtentForSlowPathCall)
381 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
382
383 m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
384
385 // The fast entry point into a function does not check the correct number of arguments
386 // have been passed to the call (we only use the fast entry point where we can statically
387 // determine the correct number of arguments have been passed, or have already checked).
388 // In cases where an arity check is necessary, we enter here.
389 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
390 m_arityCheck = label();
391 compileEntry();
392
393 load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
394 branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
395 emitStoreCodeOrigin(CodeOrigin(0));
396 if (maxFrameExtentForSlowPathCall)
397 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
398 m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
399 if (maxFrameExtentForSlowPathCall)
400 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
401 branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
402 emitStoreCodeOrigin(CodeOrigin(0));
403 GPRReg thunkReg;
404 #if USE(JSVALUE64)
405 thunkReg = GPRInfo::regT7;
406 #else
407 thunkReg = GPRInfo::regT5;
408 #endif
409 CodeLocationLabel* arityThunkLabels =
410 m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
411 move(TrustedImmPtr(arityThunkLabels), thunkReg);
412 loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
413 m_callArityFixup = call();
414 jump(fromArityCheck);
415
416 // Generate slow path code.
417 m_speculative->runSlowPathGenerators();
418
419 compileExceptionHandlers();
420 linkOSRExits();
421
422 // Create OSR entry trampolines if necessary.
423 m_speculative->createOSREntries();
424 setEndOfCode();
425
426 // === Link ===
427 auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
428 if (linkBuffer->didFailToAllocate()) {
429 m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
430 return;
431 }
432 link(*linkBuffer);
433 m_speculative->linkOSREntries(*linkBuffer);
434
435 m_jitCode->shrinkToFit();
436 codeBlock()->shrinkToFit(CodeBlock::LateShrink);
437
438 linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
439
440 disassemble(*linkBuffer);
441
442 MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
443
444 m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
445 m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
446 }
447
448 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
449 {
450 if (shouldShowDisassembly()) {
451 m_disassembler->dump(linkBuffer);
452 linkBuffer.didAlreadyDisassemble();
453 }
454
455 if (m_graph.m_plan.compilation)
456 m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
457 }
458
459 #if USE(JSVALUE32_64)
460 void* JITCompiler::addressOfDoubleConstant(Node* node)
461 {
462 double value = node->asNumber();
463 int64_t valueBits = bitwise_cast<int64_t>(value);
464 auto it = m_graph.m_doubleConstantsMap.find(valueBits);
465 if (it != m_graph.m_doubleConstantsMap.end())
466 return it->second;
467
468 if (!m_graph.m_doubleConstants)
469 m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
470
471 double* addressInConstantPool = m_graph.m_doubleConstants->add();
472 *addressInConstantPool = value;
473 m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
474 return addressInConstantPool;
475 }
476 #endif
477
478 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
479 {
480 // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
481 if (!basicBlock.intersectionOfCFAHasVisited)
482 return;
483
484 OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
485
486 entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
487
488 // Fix the expected values: in our protocol, a dead variable will have an expected
489 // value of (None, []). But the old JIT may stash some values there. So we really
490 // need (Top, TOP).
491 for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
492 Node* node = basicBlock.variablesAtHead.argument(argument);
493 if (!node || !node->shouldGenerate())
494 entry->m_expectedValues.argument(argument).makeHeapTop();
495 }
496 for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
497 Node* node = basicBlock.variablesAtHead.local(local);
498 if (!node || !node->shouldGenerate())
499 entry->m_expectedValues.local(local).makeHeapTop();
500 else {
501 VariableAccessData* variable = node->variableAccessData();
502 entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
503
504 switch (variable->flushFormat()) {
505 case FlushedDouble:
506 entry->m_localsForcedDouble.set(local);
507 break;
508 case FlushedInt52:
509 entry->m_localsForcedMachineInt.set(local);
510 break;
511 default:
512 break;
513 }
514
515 if (variable->local() != variable->machineLocal()) {
516 entry->m_reshufflings.append(
517 OSREntryReshuffling(
518 variable->local().offset(), variable->machineLocal().offset()));
519 }
520 }
521 }
522
523 entry->m_reshufflings.shrinkToFit();
524 }
525
526 } } // namespace JSC::DFG
527
528 #endif // ENABLE(DFG_JIT)