]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGJITCompiler.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGJITCompiler.cpp
CommitLineData
14957cd0 1/*
ed1e77d3 2 * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
14957cd0
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGJITCompiler.h"
28
29#if ENABLE(DFG_JIT)
30
81345200 31#include "ArityCheckFailReturnThunks.h"
14957cd0 32#include "CodeBlock.h"
81345200
A
33#include "DFGFailedFinalizer.h"
34#include "DFGInlineCacheWrapperInlines.h"
35#include "DFGJITCode.h"
36#include "DFGJITFinalizer.h"
6fe7ccc8 37#include "DFGOSRExitCompiler.h"
14957cd0
A
38#include "DFGOperations.h"
39#include "DFGRegisterBank.h"
93a37866 40#include "DFGSlowPathGenerator.h"
14957cd0 41#include "DFGSpeculativeJIT.h"
6fe7ccc8 42#include "DFGThunks.h"
93a37866 43#include "JSCJSValueInlines.h"
14957cd0 44#include "LinkBuffer.h"
81345200
A
45#include "MaxFrameExtentForSlowPathCall.h"
46#include "JSCInlines.h"
47#include "VM.h"
14957cd0
A
48
49namespace JSC { namespace DFG {
50
93a37866
A
51JITCompiler::JITCompiler(Graph& dfg)
52 : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
53 , m_graph(dfg)
81345200
A
54 , m_jitCode(adoptRef(new JITCode()))
55 , m_blockHeads(dfg.numBlocks())
93a37866
A
56{
57 if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
ed1e77d3 58 m_disassembler = std::make_unique<Disassembler>(dfg);
93a37866
A
59}
60
81345200
A
61JITCompiler::~JITCompiler()
62{
63}
64
6fe7ccc8 65void JITCompiler::linkOSRExits()
14957cd0 66{
81345200
A
67 ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
68 if (m_graph.compilation()) {
69 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
70 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
93a37866 71 Vector<Label> labels;
81345200 72 if (!info.m_failureJumps.empty()) {
93a37866
A
73 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
74 labels.append(info.m_failureJumps.jumps()[j].label());
75 } else
81345200 76 labels.append(info.m_replacementSource);
93a37866
A
77 m_exitSiteLabels.append(labels);
78 }
79 }
80
81345200
A
81 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
82 OSRExit& exit = m_jitCode->osrExit[i];
83 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
84 JumpList& failureJumps = info.m_failureJumps;
85 if (!failureJumps.empty())
93a37866
A
86 failureJumps.link(this);
87 else
81345200 88 info.m_replacementDestination = label();
6fe7ccc8 89 jitAssertHasValidCallFrame();
93a37866
A
90 store32(TrustedImm32(i), &vm()->osrExitIndex);
91 exit.setPatchableCodeOffset(patchableJump());
14957cd0
A
92 }
93}
94
6fe7ccc8 95void JITCompiler::compileEntry()
14957cd0 96{
6fe7ccc8 97 // This code currently matches the old JIT. In the function header we need to
81345200 98 // save return address and call frame via the prologue and perform a fast stack check.
6fe7ccc8 99 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
93a37866 100 // We'll need to convert the remaining cti_ style calls (specifically the stack
6fe7ccc8
A
101 // check) which will be dependent on stack layout. (We'd need to account for this in
102 // both normal return code and when jumping to an exception handler).
81345200 103 emitFunctionPrologue();
93a37866 104 emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
81345200 105 jitAssertTagsInPlace();
14957cd0
A
106}
107
81345200 108void JITCompiler::compileBody()
14957cd0 109{
6fe7ccc8
A
110 // We generate the speculative code path, followed by OSR exit code to return
111 // to the old JIT code if speculations fail.
14957cd0 112
81345200 113 bool compiledSpeculative = m_speculative->compile();
6fe7ccc8 114 ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
93a37866 115}
14957cd0 116
93a37866
A
117void JITCompiler::compileExceptionHandlers()
118{
81345200
A
119 if (!m_exceptionChecksWithCallFrameRollback.empty()) {
120 m_exceptionChecksWithCallFrameRollback.link(this);
14957cd0 121
ed1e77d3
A
122 // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
123 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
124 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
125 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
126
127#if CPU(X86)
128 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
129 poke(GPRInfo::argumentGPR0);
130 poke(GPRInfo::argumentGPR1, 1);
131#endif
132 m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
81345200 133
ed1e77d3
A
134 jumpToExceptionHandler();
135 }
81345200 136
ed1e77d3
A
137 if (!m_exceptionChecks.empty()) {
138 m_exceptionChecks.link(this);
81345200 139
ed1e77d3
A
140 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
141 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
142 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
81345200 143
6fe7ccc8 144#if CPU(X86)
ed1e77d3
A
145 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
146 poke(GPRInfo::argumentGPR0);
147 poke(GPRInfo::argumentGPR1, 1);
6fe7ccc8 148#endif
ed1e77d3
A
149 m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
150
151 jumpToExceptionHandler();
152 }
6fe7ccc8 153}
14957cd0 154
6fe7ccc8
A
155void JITCompiler::link(LinkBuffer& linkBuffer)
156{
157 // Link the code, populate data in CodeBlock data structures.
81345200
A
158 m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
159 m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
160
161 if (!m_graph.m_plan.inlineCallFrames->isEmpty())
162 m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
163
81345200
A
164#if USE(JSVALUE32_64)
165 m_jitCode->common.doubleConstants = WTF::move(m_graph.m_doubleConstants);
6fe7ccc8 166#endif
ed1e77d3
A
167
168 m_graph.registerFrozenValues();
14957cd0 169
81345200
A
170 BitVector usedJumpTables;
171 for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
172 SwitchData& data = **iter;
173 if (!data.didUseJumpTable)
174 continue;
175
176 if (data.kind == SwitchString)
177 continue;
178
179 RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
180
181 usedJumpTables.set(data.switchTableIndex);
182 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
183 table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
184 table.ctiOffsets.grow(table.branchOffsets.size());
185 for (unsigned j = table.ctiOffsets.size(); j--;)
186 table.ctiOffsets[j] = table.ctiDefault;
187 for (unsigned j = data.cases.size(); j--;) {
188 SwitchCase& myCase = data.cases[j];
ed1e77d3 189 table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
81345200
A
190 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
191 }
192 }
193
194 for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
195 if (usedJumpTables.get(i))
196 continue;
197
198 m_codeBlock->switchJumpTable(i).clear();
199 }
200
201 // NOTE: we cannot clear string switch tables because (1) we're running concurrently
202 // and we cannot deref StringImpl's and (2) it would be weird to deref those
203 // StringImpl's since we refer to them.
204 for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
205 SwitchData& data = **switchDataIter;
206 if (!data.didUseJumpTable)
207 continue;
208
209 if (data.kind != SwitchString)
210 continue;
211
212 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
213 table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
214 StringJumpTable::StringOffsetTable::iterator iter;
215 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
216 for (iter = table.offsetTable.begin(); iter != end; ++iter)
217 iter->value.ctiOffset = table.ctiDefault;
218 for (unsigned j = data.cases.size(); j--;) {
219 SwitchCase& myCase = data.cases[j];
220 iter = table.offsetTable.find(myCase.value.stringImpl());
221 RELEASE_ASSERT(iter != end);
222 iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
223 }
224 }
225
6fe7ccc8
A
226 // Link all calls out from the JIT code to their respective functions.
227 for (unsigned i = 0; i < m_calls.size(); ++i)
228 linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
14957cd0 229
81345200
A
230 for (unsigned i = m_getByIds.size(); i--;)
231 m_getByIds[i].finalize(linkBuffer);
232 for (unsigned i = m_putByIds.size(); i--;)
233 m_putByIds[i].finalize(linkBuffer);
14957cd0 234
81345200
A
235 for (unsigned i = 0; i < m_ins.size(); ++i) {
236 StructureStubInfo& info = *m_ins[i].m_stubInfo;
237 CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
238 info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
239 info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
6fe7ccc8 240 info.callReturnLocation = callReturnLocation;
81345200 241 info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
6fe7ccc8
A
242 }
243
6fe7ccc8 244 for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
81345200
A
245 JSCallRecord& record = m_jsCalls[i];
246 CallLinkInfo& info = *record.m_info;
247 ThunkGenerator generator = linkThunkGeneratorFor(
ed1e77d3 248 info.specializationKind(),
81345200
A
249 RegisterPreservationNotRequired);
250 linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
ed1e77d3
A
251 info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
252 linkBuffer.locationOf(record.m_targetToCheck),
253 linkBuffer.locationOfNearCall(record.m_fastCall));
6fe7ccc8
A
254 }
255
93a37866 256 MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
6fe7ccc8 257 CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
81345200
A
258 for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
259 OSRExit& exit = m_jitCode->osrExit[i];
260 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
93a37866
A
261 linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
262 exit.correctJump(linkBuffer);
81345200
A
263 if (info.m_replacementSource.isSet()) {
264 m_jitCode->common.jumpReplacements.append(JumpReplacement(
265 linkBuffer.locationOf(info.m_replacementSource),
266 linkBuffer.locationOf(info.m_replacementDestination)));
267 }
6fe7ccc8
A
268 }
269
81345200
A
270 if (m_graph.compilation()) {
271 ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
93a37866
A
272 for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
273 Vector<Label>& labels = m_exitSiteLabels[i];
274 Vector<const void*> addresses;
275 for (unsigned j = 0; j < labels.size(); ++j)
276 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
81345200 277 m_graph.compilation()->addOSRExitSite(addresses);
93a37866
A
278 }
279 } else
280 ASSERT(!m_exitSiteLabels.size());
281
81345200
A
282 m_jitCode->common.compilation = m_graph.compilation();
283
14957cd0
A
284}
285
81345200 286void JITCompiler::compile()
14957cd0 287{
93a37866
A
288 SamplingRegion samplingRegion("DFG Backend");
289
290 setStartOfCode();
6fe7ccc8 291 compileEntry();
ed1e77d3
A
292 m_speculative = std::make_unique<SpeculativeJIT>(*this);
293
294 // Plant a check that sufficient space is available in the JSStack.
295 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
296 Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
297
81345200
A
298 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
299 checkStackPointerAlignment();
300 compileBody();
93a37866 301 setEndOfMainPath();
6fe7ccc8 302
ed1e77d3
A
303 // === Footer code generation ===
304 //
305 // Generate the stack overflow handling; if the stack check in the entry head fails,
306 // we need to call out to a helper function to throw the StackOverflowError.
307 stackOverflow.link(this);
308
309 emitStoreCodeOrigin(CodeOrigin(0));
310
311 if (maxFrameExtentForSlowPathCall)
312 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
313
314 m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
315
93a37866 316 // Generate slow path code.
81345200 317 m_speculative->runSlowPathGenerators();
93a37866
A
318
319 compileExceptionHandlers();
320 linkOSRExits();
321
6fe7ccc8 322 // Create OSR entry trampolines if necessary.
81345200 323 m_speculative->createOSREntries();
93a37866 324 setEndOfCode();
14957cd0 325
ed1e77d3 326 auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
81345200 327 if (linkBuffer->didFailToAllocate()) {
ed1e77d3 328 m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
81345200
A
329 return;
330 }
331
332 link(*linkBuffer);
333 m_speculative->linkOSREntries(*linkBuffer);
334
335 m_jitCode->shrinkToFit();
93a37866
A
336 codeBlock()->shrinkToFit(CodeBlock::LateShrink);
337
81345200
A
338 disassemble(*linkBuffer);
339
ed1e77d3
A
340 m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
341 m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer));
14957cd0
A
342}
343
81345200 344void JITCompiler::compileFunction()
14957cd0 345{
93a37866
A
346 SamplingRegion samplingRegion("DFG Backend");
347
348 setStartOfCode();
6fe7ccc8 349 compileEntry();
14957cd0 350
6fe7ccc8 351 // === Function header code generation ===
14957cd0 352 // This is the main entry point, without performing an arity check.
14957cd0
A
353 // If we needed to perform an arity check we will already have moved the return address,
354 // so enter after this.
355 Label fromArityCheck(this);
93a37866 356 // Plant a check that sufficient space is available in the JSStack.
81345200
A
357 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
358 Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
14957cd0 359
81345200
A
360 // Move the stack pointer down to accommodate locals
361 addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
362 checkStackPointerAlignment();
14957cd0 363
6fe7ccc8 364 // === Function body code generation ===
ed1e77d3 365 m_speculative = std::make_unique<SpeculativeJIT>(*this);
81345200 366 compileBody();
93a37866 367 setEndOfMainPath();
14957cd0 368
6fe7ccc8
A
369 // === Function footer code generation ===
370 //
81345200 371 // Generate code to perform the stack overflow handling (if the stack check in
6fe7ccc8 372 // the function header fails), and generate the entry point with arity check.
14957cd0 373 //
81345200
A
374 // Generate the stack overflow handling; if the stack check in the function head fails,
375 // we need to call out to a helper function to throw the StackOverflowError.
376 stackOverflow.link(this);
377
378 emitStoreCodeOrigin(CodeOrigin(0));
379
380 if (maxFrameExtentForSlowPathCall)
381 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
382
383 m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
6fe7ccc8 384
14957cd0
A
385 // The fast entry point into a function does not check the correct number of arguments
386 // have been passed to the call (we only use the fast entry point where we can statically
387 // determine the correct number of arguments have been passed, or have already checked).
388 // In cases where an arity check is necessary, we enter here.
389 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
81345200 390 m_arityCheck = label();
6fe7ccc8
A
391 compileEntry();
392
93a37866 393 load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
6fe7ccc8 394 branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
81345200
A
395 emitStoreCodeOrigin(CodeOrigin(0));
396 if (maxFrameExtentForSlowPathCall)
397 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
398 m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
399 if (maxFrameExtentForSlowPathCall)
400 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
401 branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
402 emitStoreCodeOrigin(CodeOrigin(0));
403 GPRReg thunkReg;
404#if USE(JSVALUE64)
405 thunkReg = GPRInfo::regT7;
406#else
407 thunkReg = GPRInfo::regT5;
408#endif
ed1e77d3
A
409 CodeLocationLabel* arityThunkLabels =
410 m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters());
411 move(TrustedImmPtr(arityThunkLabels), thunkReg);
81345200
A
412 loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
413 m_callArityFixup = call();
14957cd0 414 jump(fromArityCheck);
6fe7ccc8 415
93a37866 416 // Generate slow path code.
81345200 417 m_speculative->runSlowPathGenerators();
93a37866
A
418
419 compileExceptionHandlers();
420 linkOSRExits();
421
6fe7ccc8 422 // Create OSR entry trampolines if necessary.
81345200 423 m_speculative->createOSREntries();
93a37866 424 setEndOfCode();
6fe7ccc8
A
425
426 // === Link ===
ed1e77d3 427 auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
81345200 428 if (linkBuffer->didFailToAllocate()) {
ed1e77d3 429 m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
81345200
A
430 return;
431 }
432 link(*linkBuffer);
433 m_speculative->linkOSREntries(*linkBuffer);
434
435 m_jitCode->shrinkToFit();
93a37866 436 codeBlock()->shrinkToFit(CodeBlock::LateShrink);
6fe7ccc8 437
ed1e77d3 438 linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
93a37866 439
81345200
A
440 disassemble(*linkBuffer);
441
442 MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
443
ed1e77d3
A
444 m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
445 m_graph.m_plan, m_jitCode.release(), WTF::move(linkBuffer), withArityCheck);
81345200
A
446}
447
448void JITCompiler::disassemble(LinkBuffer& linkBuffer)
449{
ed1e77d3 450 if (shouldShowDisassembly()) {
93a37866 451 m_disassembler->dump(linkBuffer);
ed1e77d3
A
452 linkBuffer.didAlreadyDisassemble();
453 }
81345200
A
454
455 if (m_graph.m_plan.compilation)
456 m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
14957cd0 457}
14957cd0 458
81345200
A
459#if USE(JSVALUE32_64)
460void* JITCompiler::addressOfDoubleConstant(Node* node)
461{
ed1e77d3 462 double value = node->asNumber();
81345200
A
463 int64_t valueBits = bitwise_cast<int64_t>(value);
464 auto it = m_graph.m_doubleConstantsMap.find(valueBits);
465 if (it != m_graph.m_doubleConstantsMap.end())
466 return it->second;
467
468 if (!m_graph.m_doubleConstants)
469 m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
470
471 double* addressInConstantPool = m_graph.m_doubleConstants->add();
472 *addressInConstantPool = value;
473 m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
474 return addressInConstantPool;
475}
476#endif
477
ed1e77d3
A
478void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
479{
480 // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
481 if (!basicBlock.intersectionOfCFAHasVisited)
482 return;
483
484 OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
485
486 entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
487
488 // Fix the expected values: in our protocol, a dead variable will have an expected
489 // value of (None, []). But the old JIT may stash some values there. So we really
490 // need (Top, TOP).
491 for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
492 Node* node = basicBlock.variablesAtHead.argument(argument);
493 if (!node || !node->shouldGenerate())
494 entry->m_expectedValues.argument(argument).makeHeapTop();
495 }
496 for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
497 Node* node = basicBlock.variablesAtHead.local(local);
498 if (!node || !node->shouldGenerate())
499 entry->m_expectedValues.local(local).makeHeapTop();
500 else {
501 VariableAccessData* variable = node->variableAccessData();
502 entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
503
504 switch (variable->flushFormat()) {
505 case FlushedDouble:
506 entry->m_localsForcedDouble.set(local);
507 break;
508 case FlushedInt52:
509 entry->m_localsForcedMachineInt.set(local);
510 break;
511 default:
512 break;
513 }
514
515 if (variable->local() != variable->machineLocal()) {
516 entry->m_reshufflings.append(
517 OSREntryReshuffling(
518 variable->local().offset(), variable->machineLocal().offset()));
519 }
520 }
521 }
522
523 entry->m_reshufflings.shrinkToFit();
524}
525
14957cd0
A
526} } // namespace JSC::DFG
527
6fe7ccc8 528#endif // ENABLE(DFG_JIT)