2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGJITCompiler.h"
31 #include "ArityCheckFailReturnThunks.h"
32 #include "CodeBlock.h"
33 #include "DFGFailedFinalizer.h"
34 #include "DFGInlineCacheWrapperInlines.h"
35 #include "DFGJITCode.h"
36 #include "DFGJITFinalizer.h"
37 #include "DFGOSRExitCompiler.h"
38 #include "DFGOperations.h"
39 #include "DFGRegisterBank.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSpeculativeJIT.h"
42 #include "DFGThunks.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "JSCInlines.h"
49 namespace JSC
{ namespace DFG
{
51 JITCompiler::JITCompiler(Graph
& dfg
)
52 : CCallHelpers(&dfg
.m_vm
, dfg
.m_codeBlock
)
54 , m_jitCode(adoptRef(new JITCode()))
55 , m_blockHeads(dfg
.numBlocks())
57 if (shouldShowDisassembly() || m_graph
.m_vm
.m_perBytecodeProfiler
)
58 m_disassembler
= std::make_unique
<Disassembler
>(dfg
);
61 JITCompiler::~JITCompiler()
65 void JITCompiler::linkOSRExits()
67 ASSERT(m_jitCode
->osrExit
.size() == m_exitCompilationInfo
.size());
68 if (m_graph
.compilation()) {
69 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
70 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
72 if (!info
.m_failureJumps
.empty()) {
73 for (unsigned j
= 0; j
< info
.m_failureJumps
.jumps().size(); ++j
)
74 labels
.append(info
.m_failureJumps
.jumps()[j
].label());
76 labels
.append(info
.m_replacementSource
);
77 m_exitSiteLabels
.append(labels
);
81 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
82 OSRExit
& exit
= m_jitCode
->osrExit
[i
];
83 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
84 JumpList
& failureJumps
= info
.m_failureJumps
;
85 if (!failureJumps
.empty())
86 failureJumps
.link(this);
88 info
.m_replacementDestination
= label();
89 jitAssertHasValidCallFrame();
90 store32(TrustedImm32(i
), &vm()->osrExitIndex
);
91 exit
.setPatchableCodeOffset(patchableJump());
95 void JITCompiler::compileEntry()
97 // This code currently matches the old JIT. In the function header we need to
98 // save return address and call frame via the prologue and perform a fast stack check.
99 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
100 // We'll need to convert the remaining cti_ style calls (specifically the stack
101 // check) which will be dependent on stack layout. (We'd need to account for this in
102 // both normal return code and when jumping to an exception handler).
103 emitFunctionPrologue();
104 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
105 jitAssertTagsInPlace();
108 void JITCompiler::compileBody()
110 // We generate the speculative code path, followed by OSR exit code to return
111 // to the old JIT code if speculations fail.
113 bool compiledSpeculative
= m_speculative
->compile();
114 ASSERT_UNUSED(compiledSpeculative
, compiledSpeculative
);
117 void JITCompiler::compileExceptionHandlers()
119 if (!m_exceptionChecksWithCallFrameRollback
.empty()) {
120 m_exceptionChecksWithCallFrameRollback
.link(this);
122 // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
123 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
124 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
125 addPtr(TrustedImm32(m_graph
.stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, stackPointerRegister
);
128 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
129 poke(GPRInfo::argumentGPR0
);
130 poke(GPRInfo::argumentGPR1
, 1);
132 m_calls
.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame
));
134 jumpToExceptionHandler();
137 if (!m_exceptionChecks
.empty()) {
138 m_exceptionChecks
.link(this);
140 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
141 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
142 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
145 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
146 poke(GPRInfo::argumentGPR0
);
147 poke(GPRInfo::argumentGPR1
, 1);
149 m_calls
.append(CallLinkRecord(call(), lookupExceptionHandler
));
151 jumpToExceptionHandler();
155 void JITCompiler::link(LinkBuffer
& linkBuffer
)
157 // Link the code, populate data in CodeBlock data structures.
158 m_jitCode
->common
.frameRegisterCount
= m_graph
.frameRegisterCount();
159 m_jitCode
->common
.requiredRegisterCountForExit
= m_graph
.requiredRegisterCountForExit();
161 if (!m_graph
.m_plan
.inlineCallFrames
->isEmpty())
162 m_jitCode
->common
.inlineCallFrames
= m_graph
.m_plan
.inlineCallFrames
;
164 #if USE(JSVALUE32_64)
165 m_jitCode
->common
.doubleConstants
= WTF::move(m_graph
.m_doubleConstants
);
168 m_graph
.registerFrozenValues();
170 BitVector usedJumpTables
;
171 for (Bag
<SwitchData
>::iterator iter
= m_graph
.m_switchData
.begin(); !!iter
; ++iter
) {
172 SwitchData
& data
= **iter
;
173 if (!data
.didUseJumpTable
)
176 if (data
.kind
== SwitchString
)
179 RELEASE_ASSERT(data
.kind
== SwitchImm
|| data
.kind
== SwitchChar
);
181 usedJumpTables
.set(data
.switchTableIndex
);
182 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
183 table
.ctiDefault
= linkBuffer
.locationOf(m_blockHeads
[data
.fallThrough
.block
->index
]);
184 table
.ctiOffsets
.grow(table
.branchOffsets
.size());
185 for (unsigned j
= table
.ctiOffsets
.size(); j
--;)
186 table
.ctiOffsets
[j
] = table
.ctiDefault
;
187 for (unsigned j
= data
.cases
.size(); j
--;) {
188 SwitchCase
& myCase
= data
.cases
[j
];
189 table
.ctiOffsets
[myCase
.value
.switchLookupValue(data
.kind
) - table
.min
] =
190 linkBuffer
.locationOf(m_blockHeads
[myCase
.target
.block
->index
]);
194 for (unsigned i
= m_codeBlock
->numberOfSwitchJumpTables(); i
--;) {
195 if (usedJumpTables
.get(i
))
198 m_codeBlock
->switchJumpTable(i
).clear();
201 // NOTE: we cannot clear string switch tables because (1) we're running concurrently
202 // and we cannot deref StringImpl's and (2) it would be weird to deref those
203 // StringImpl's since we refer to them.
204 for (Bag
<SwitchData
>::iterator switchDataIter
= m_graph
.m_switchData
.begin(); !!switchDataIter
; ++switchDataIter
) {
205 SwitchData
& data
= **switchDataIter
;
206 if (!data
.didUseJumpTable
)
209 if (data
.kind
!= SwitchString
)
212 StringJumpTable
& table
= m_codeBlock
->stringSwitchJumpTable(data
.switchTableIndex
);
213 table
.ctiDefault
= linkBuffer
.locationOf(m_blockHeads
[data
.fallThrough
.block
->index
]);
214 StringJumpTable::StringOffsetTable::iterator iter
;
215 StringJumpTable::StringOffsetTable::iterator end
= table
.offsetTable
.end();
216 for (iter
= table
.offsetTable
.begin(); iter
!= end
; ++iter
)
217 iter
->value
.ctiOffset
= table
.ctiDefault
;
218 for (unsigned j
= data
.cases
.size(); j
--;) {
219 SwitchCase
& myCase
= data
.cases
[j
];
220 iter
= table
.offsetTable
.find(myCase
.value
.stringImpl());
221 RELEASE_ASSERT(iter
!= end
);
222 iter
->value
.ctiOffset
= linkBuffer
.locationOf(m_blockHeads
[myCase
.target
.block
->index
]);
226 // Link all calls out from the JIT code to their respective functions.
227 for (unsigned i
= 0; i
< m_calls
.size(); ++i
)
228 linkBuffer
.link(m_calls
[i
].m_call
, m_calls
[i
].m_function
);
230 for (unsigned i
= m_getByIds
.size(); i
--;)
231 m_getByIds
[i
].finalize(linkBuffer
);
232 for (unsigned i
= m_putByIds
.size(); i
--;)
233 m_putByIds
[i
].finalize(linkBuffer
);
235 for (unsigned i
= 0; i
< m_ins
.size(); ++i
) {
236 StructureStubInfo
& info
= *m_ins
[i
].m_stubInfo
;
237 CodeLocationCall callReturnLocation
= linkBuffer
.locationOf(m_ins
[i
].m_slowPathGenerator
->call());
238 info
.patch
.deltaCallToDone
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_done
));
239 info
.patch
.deltaCallToJump
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_jump
));
240 info
.callReturnLocation
= callReturnLocation
;
241 info
.patch
.deltaCallToSlowCase
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_slowPathGenerator
->label()));
244 for (unsigned i
= 0; i
< m_jsCalls
.size(); ++i
) {
245 JSCallRecord
& record
= m_jsCalls
[i
];
246 CallLinkInfo
& info
= *record
.m_info
;
247 ThunkGenerator generator
= linkThunkGeneratorFor(
248 info
.specializationKind(),
249 RegisterPreservationNotRequired
);
250 linkBuffer
.link(record
.m_slowCall
, FunctionPtr(m_vm
->getCTIStub(generator
).code().executableAddress()));
251 info
.setCallLocations(linkBuffer
.locationOfNearCall(record
.m_slowCall
),
252 linkBuffer
.locationOf(record
.m_targetToCheck
),
253 linkBuffer
.locationOfNearCall(record
.m_fastCall
));
256 MacroAssemblerCodeRef osrExitThunk
= vm()->getCTIStub(osrExitGenerationThunkGenerator
);
257 CodeLocationLabel target
= CodeLocationLabel(osrExitThunk
.code());
258 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
259 OSRExit
& exit
= m_jitCode
->osrExit
[i
];
260 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
261 linkBuffer
.link(exit
.getPatchableCodeOffsetAsJump(), target
);
262 exit
.correctJump(linkBuffer
);
263 if (info
.m_replacementSource
.isSet()) {
264 m_jitCode
->common
.jumpReplacements
.append(JumpReplacement(
265 linkBuffer
.locationOf(info
.m_replacementSource
),
266 linkBuffer
.locationOf(info
.m_replacementDestination
)));
270 if (m_graph
.compilation()) {
271 ASSERT(m_exitSiteLabels
.size() == m_jitCode
->osrExit
.size());
272 for (unsigned i
= 0; i
< m_exitSiteLabels
.size(); ++i
) {
273 Vector
<Label
>& labels
= m_exitSiteLabels
[i
];
274 Vector
<const void*> addresses
;
275 for (unsigned j
= 0; j
< labels
.size(); ++j
)
276 addresses
.append(linkBuffer
.locationOf(labels
[j
]).executableAddress());
277 m_graph
.compilation()->addOSRExitSite(addresses
);
280 ASSERT(!m_exitSiteLabels
.size());
282 m_jitCode
->common
.compilation
= m_graph
.compilation();
286 void JITCompiler::compile()
288 SamplingRegion
samplingRegion("DFG Backend");
292 m_speculative
= std::make_unique
<SpeculativeJIT
>(*this);
294 // Plant a check that sufficient space is available in the JSStack.
295 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph
.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register
)), GPRInfo::callFrameRegister
, GPRInfo::regT1
);
296 Jump stackOverflow
= branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), GPRInfo::regT1
);
298 addPtr(TrustedImm32(m_graph
.stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, stackPointerRegister
);
299 checkStackPointerAlignment();
303 // === Footer code generation ===
305 // Generate the stack overflow handling; if the stack check in the entry head fails,
306 // we need to call out to a helper function to throw the StackOverflowError.
307 stackOverflow
.link(this);
309 emitStoreCodeOrigin(CodeOrigin(0));
311 if (maxFrameExtentForSlowPathCall
)
312 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
314 m_speculative
->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError
, m_codeBlock
);
316 // Generate slow path code.
317 m_speculative
->runSlowPathGenerators();
319 compileExceptionHandlers();
322 // Create OSR entry trampolines if necessary.
323 m_speculative
->createOSREntries();
326 auto linkBuffer
= std::make_unique
<LinkBuffer
>(*m_vm
, *this, m_codeBlock
, JITCompilationCanFail
);
327 if (linkBuffer
->didFailToAllocate()) {
328 m_graph
.m_plan
.finalizer
= std::make_unique
<FailedFinalizer
>(m_graph
.m_plan
);
333 m_speculative
->linkOSREntries(*linkBuffer
);
335 m_jitCode
->shrinkToFit();
336 codeBlock()->shrinkToFit(CodeBlock::LateShrink
);
338 disassemble(*linkBuffer
);
340 m_graph
.m_plan
.finalizer
= std::make_unique
<JITFinalizer
>(
341 m_graph
.m_plan
, m_jitCode
.release(), WTF::move(linkBuffer
));
344 void JITCompiler::compileFunction()
346 SamplingRegion
samplingRegion("DFG Backend");
351 // === Function header code generation ===
352 // This is the main entry point, without performing an arity check.
353 // If we needed to perform an arity check we will already have moved the return address,
354 // so enter after this.
355 Label
fromArityCheck(this);
356 // Plant a check that sufficient space is available in the JSStack.
357 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph
.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register
)), GPRInfo::callFrameRegister
, GPRInfo::regT1
);
358 Jump stackOverflow
= branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), GPRInfo::regT1
);
360 // Move the stack pointer down to accommodate locals
361 addPtr(TrustedImm32(m_graph
.stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, stackPointerRegister
);
362 checkStackPointerAlignment();
364 // === Function body code generation ===
365 m_speculative
= std::make_unique
<SpeculativeJIT
>(*this);
369 // === Function footer code generation ===
371 // Generate code to perform the stack overflow handling (if the stack check in
372 // the function header fails), and generate the entry point with arity check.
374 // Generate the stack overflow handling; if the stack check in the function head fails,
375 // we need to call out to a helper function to throw the StackOverflowError.
376 stackOverflow
.link(this);
378 emitStoreCodeOrigin(CodeOrigin(0));
380 if (maxFrameExtentForSlowPathCall
)
381 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
383 m_speculative
->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError
, m_codeBlock
);
385 // The fast entry point into a function does not check the correct number of arguments
386 // have been passed to the call (we only use the fast entry point where we can statically
387 // determine the correct number of arguments have been passed, or have already checked).
388 // In cases where an arity check is necessary, we enter here.
389 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
390 m_arityCheck
= label();
393 load32(AssemblyHelpers::payloadFor((VirtualRegister
)JSStack::ArgumentCount
), GPRInfo::regT1
);
394 branch32(AboveOrEqual
, GPRInfo::regT1
, TrustedImm32(m_codeBlock
->numParameters())).linkTo(fromArityCheck
, this);
395 emitStoreCodeOrigin(CodeOrigin(0));
396 if (maxFrameExtentForSlowPathCall
)
397 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
398 m_speculative
->callOperationWithCallFrameRollbackOnException(m_codeBlock
->m_isConstructor
? operationConstructArityCheck
: operationCallArityCheck
, GPRInfo::regT0
);
399 if (maxFrameExtentForSlowPathCall
)
400 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall
), stackPointerRegister
);
401 branchTest32(Zero
, GPRInfo::regT0
).linkTo(fromArityCheck
, this);
402 emitStoreCodeOrigin(CodeOrigin(0));
405 thunkReg
= GPRInfo::regT7
;
407 thunkReg
= GPRInfo::regT5
;
409 CodeLocationLabel
* arityThunkLabels
=
410 m_vm
->arityCheckFailReturnThunks
->returnPCsFor(*m_vm
, m_codeBlock
->numParameters());
411 move(TrustedImmPtr(arityThunkLabels
), thunkReg
);
412 loadPtr(BaseIndex(thunkReg
, GPRInfo::regT0
, timesPtr()), thunkReg
);
413 m_callArityFixup
= call();
414 jump(fromArityCheck
);
416 // Generate slow path code.
417 m_speculative
->runSlowPathGenerators();
419 compileExceptionHandlers();
422 // Create OSR entry trampolines if necessary.
423 m_speculative
->createOSREntries();
427 auto linkBuffer
= std::make_unique
<LinkBuffer
>(*m_vm
, *this, m_codeBlock
, JITCompilationCanFail
);
428 if (linkBuffer
->didFailToAllocate()) {
429 m_graph
.m_plan
.finalizer
= std::make_unique
<FailedFinalizer
>(m_graph
.m_plan
);
433 m_speculative
->linkOSREntries(*linkBuffer
);
435 m_jitCode
->shrinkToFit();
436 codeBlock()->shrinkToFit(CodeBlock::LateShrink
);
438 linkBuffer
->link(m_callArityFixup
, FunctionPtr((m_vm
->getCTIStub(arityFixupGenerator
)).code().executableAddress()));
440 disassemble(*linkBuffer
);
442 MacroAssemblerCodePtr withArityCheck
= linkBuffer
->locationOf(m_arityCheck
);
444 m_graph
.m_plan
.finalizer
= std::make_unique
<JITFinalizer
>(
445 m_graph
.m_plan
, m_jitCode
.release(), WTF::move(linkBuffer
), withArityCheck
);
448 void JITCompiler::disassemble(LinkBuffer
& linkBuffer
)
450 if (shouldShowDisassembly()) {
451 m_disassembler
->dump(linkBuffer
);
452 linkBuffer
.didAlreadyDisassemble();
455 if (m_graph
.m_plan
.compilation
)
456 m_disassembler
->reportToProfiler(m_graph
.m_plan
.compilation
.get(), linkBuffer
);
459 #if USE(JSVALUE32_64)
460 void* JITCompiler::addressOfDoubleConstant(Node
* node
)
462 double value
= node
->asNumber();
463 int64_t valueBits
= bitwise_cast
<int64_t>(value
);
464 auto it
= m_graph
.m_doubleConstantsMap
.find(valueBits
);
465 if (it
!= m_graph
.m_doubleConstantsMap
.end())
468 if (!m_graph
.m_doubleConstants
)
469 m_graph
.m_doubleConstants
= std::make_unique
<Bag
<double>>();
471 double* addressInConstantPool
= m_graph
.m_doubleConstants
->add();
472 *addressInConstantPool
= value
;
473 m_graph
.m_doubleConstantsMap
[valueBits
] = addressInConstantPool
;
474 return addressInConstantPool
;
478 void JITCompiler::noticeOSREntry(BasicBlock
& basicBlock
, JITCompiler::Label blockHead
, LinkBuffer
& linkBuffer
)
480 // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
481 if (!basicBlock
.intersectionOfCFAHasVisited
)
484 OSREntryData
* entry
= m_jitCode
->appendOSREntryData(basicBlock
.bytecodeBegin
, linkBuffer
.offsetOf(blockHead
));
486 entry
->m_expectedValues
= basicBlock
.intersectionOfPastValuesAtHead
;
488 // Fix the expected values: in our protocol, a dead variable will have an expected
489 // value of (None, []). But the old JIT may stash some values there. So we really
491 for (size_t argument
= 0; argument
< basicBlock
.variablesAtHead
.numberOfArguments(); ++argument
) {
492 Node
* node
= basicBlock
.variablesAtHead
.argument(argument
);
493 if (!node
|| !node
->shouldGenerate())
494 entry
->m_expectedValues
.argument(argument
).makeHeapTop();
496 for (size_t local
= 0; local
< basicBlock
.variablesAtHead
.numberOfLocals(); ++local
) {
497 Node
* node
= basicBlock
.variablesAtHead
.local(local
);
498 if (!node
|| !node
->shouldGenerate())
499 entry
->m_expectedValues
.local(local
).makeHeapTop();
501 VariableAccessData
* variable
= node
->variableAccessData();
502 entry
->m_machineStackUsed
.set(variable
->machineLocal().toLocal());
504 switch (variable
->flushFormat()) {
506 entry
->m_localsForcedDouble
.set(local
);
509 entry
->m_localsForcedMachineInt
.set(local
);
515 if (variable
->local() != variable
->machineLocal()) {
516 entry
->m_reshufflings
.append(
518 variable
->local().offset(), variable
->machineLocal().offset()));
523 entry
->m_reshufflings
.shrinkToFit();
526 } } // namespace JSC::DFG
528 #endif // ENABLE(DFG_JIT)