2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGJITCompiler.h"
31 #include "ArityCheckFailReturnThunks.h"
32 #include "CodeBlock.h"
33 #include "DFGFailedFinalizer.h"
34 #include "DFGInlineCacheWrapperInlines.h"
35 #include "DFGJITCode.h"
36 #include "DFGJITFinalizer.h"
37 #include "DFGOSRExitCompiler.h"
38 #include "DFGOperations.h"
39 #include "DFGRegisterBank.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSpeculativeJIT.h"
42 #include "DFGThunks.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "JSCInlines.h"
49 namespace JSC
{ namespace DFG
{
51 JITCompiler::JITCompiler(Graph
& dfg
)
52 : CCallHelpers(&dfg
.m_vm
, dfg
.m_codeBlock
)
54 , m_jitCode(adoptRef(new JITCode()))
55 , m_blockHeads(dfg
.numBlocks())
57 if (shouldShowDisassembly() || m_graph
.m_vm
.m_perBytecodeProfiler
)
58 m_disassembler
= adoptPtr(new Disassembler(dfg
));
61 JITCompiler::~JITCompiler()
65 void JITCompiler::linkOSRExits()
67 ASSERT(m_jitCode
->osrExit
.size() == m_exitCompilationInfo
.size());
68 if (m_graph
.compilation()) {
69 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
70 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
72 if (!info
.m_failureJumps
.empty()) {
73 for (unsigned j
= 0; j
< info
.m_failureJumps
.jumps().size(); ++j
)
74 labels
.append(info
.m_failureJumps
.jumps()[j
].label());
76 labels
.append(info
.m_replacementSource
);
77 m_exitSiteLabels
.append(labels
);
81 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
82 OSRExit
& exit
= m_jitCode
->osrExit
[i
];
83 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
84 JumpList
& failureJumps
= info
.m_failureJumps
;
85 if (!failureJumps
.empty())
86 failureJumps
.link(this);
88 info
.m_replacementDestination
= label();
89 jitAssertHasValidCallFrame();
90 store32(TrustedImm32(i
), &vm()->osrExitIndex
);
91 exit
.setPatchableCodeOffset(patchableJump());
95 void JITCompiler::compileEntry()
97 // This code currently matches the old JIT. In the function header we need to
98 // save return address and call frame via the prologue and perform a fast stack check.
99 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
100 // We'll need to convert the remaining cti_ style calls (specifically the stack
101 // check) which will be dependent on stack layout. (We'd need to account for this in
102 // both normal return code and when jumping to an exception handler).
103 emitFunctionPrologue();
104 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
105 jitAssertTagsInPlace();
108 void JITCompiler::compileBody()
110 // We generate the speculative code path, followed by OSR exit code to return
111 // to the old JIT code if speculations fail.
113 bool compiledSpeculative
= m_speculative
->compile();
114 ASSERT_UNUSED(compiledSpeculative
, compiledSpeculative
);
117 void JITCompiler::compileExceptionHandlers()
119 if (m_exceptionChecks
.empty() && m_exceptionChecksWithCallFrameRollback
.empty())
124 if (!m_exceptionChecksWithCallFrameRollback
.empty()) {
125 m_exceptionChecksWithCallFrameRollback
.link(this);
126 emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1
);
130 if (!m_exceptionChecks
.empty())
131 m_exceptionChecks
.link(this);
133 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
134 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
136 if (doLookup
.isSet())
139 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
142 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
143 poke(GPRInfo::argumentGPR0
);
144 poke(GPRInfo::argumentGPR1
, 1);
146 m_calls
.append(CallLinkRecord(call(), lookupExceptionHandler
));
147 jumpToExceptionHandler();
150 void JITCompiler::link(LinkBuffer
& linkBuffer
)
152 // Link the code, populate data in CodeBlock data structures.
153 m_jitCode
->common
.frameRegisterCount
= m_graph
.frameRegisterCount();
154 m_jitCode
->common
.requiredRegisterCountForExit
= m_graph
.requiredRegisterCountForExit();
156 if (!m_graph
.m_plan
.inlineCallFrames
->isEmpty())
157 m_jitCode
->common
.inlineCallFrames
= m_graph
.m_plan
.inlineCallFrames
;
159 m_jitCode
->common
.machineCaptureStart
= m_graph
.m_machineCaptureStart
;
160 m_jitCode
->common
.slowArguments
= WTF::move(m_graph
.m_slowArguments
);
162 #if USE(JSVALUE32_64)
163 m_jitCode
->common
.doubleConstants
= WTF::move(m_graph
.m_doubleConstants
);
166 BitVector usedJumpTables
;
167 for (Bag
<SwitchData
>::iterator iter
= m_graph
.m_switchData
.begin(); !!iter
; ++iter
) {
168 SwitchData
& data
= **iter
;
169 if (!data
.didUseJumpTable
)
172 if (data
.kind
== SwitchString
)
175 RELEASE_ASSERT(data
.kind
== SwitchImm
|| data
.kind
== SwitchChar
);
177 usedJumpTables
.set(data
.switchTableIndex
);
178 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
179 table
.ctiDefault
= linkBuffer
.locationOf(m_blockHeads
[data
.fallThrough
.block
->index
]);
180 table
.ctiOffsets
.grow(table
.branchOffsets
.size());
181 for (unsigned j
= table
.ctiOffsets
.size(); j
--;)
182 table
.ctiOffsets
[j
] = table
.ctiDefault
;
183 for (unsigned j
= data
.cases
.size(); j
--;) {
184 SwitchCase
& myCase
= data
.cases
[j
];
185 table
.ctiOffsets
[myCase
.value
.switchLookupValue() - table
.min
] =
186 linkBuffer
.locationOf(m_blockHeads
[myCase
.target
.block
->index
]);
190 for (unsigned i
= m_codeBlock
->numberOfSwitchJumpTables(); i
--;) {
191 if (usedJumpTables
.get(i
))
194 m_codeBlock
->switchJumpTable(i
).clear();
197 // NOTE: we cannot clear string switch tables because (1) we're running concurrently
198 // and we cannot deref StringImpl's and (2) it would be weird to deref those
199 // StringImpl's since we refer to them.
200 for (Bag
<SwitchData
>::iterator switchDataIter
= m_graph
.m_switchData
.begin(); !!switchDataIter
; ++switchDataIter
) {
201 SwitchData
& data
= **switchDataIter
;
202 if (!data
.didUseJumpTable
)
205 if (data
.kind
!= SwitchString
)
208 StringJumpTable
& table
= m_codeBlock
->stringSwitchJumpTable(data
.switchTableIndex
);
209 table
.ctiDefault
= linkBuffer
.locationOf(m_blockHeads
[data
.fallThrough
.block
->index
]);
210 StringJumpTable::StringOffsetTable::iterator iter
;
211 StringJumpTable::StringOffsetTable::iterator end
= table
.offsetTable
.end();
212 for (iter
= table
.offsetTable
.begin(); iter
!= end
; ++iter
)
213 iter
->value
.ctiOffset
= table
.ctiDefault
;
214 for (unsigned j
= data
.cases
.size(); j
--;) {
215 SwitchCase
& myCase
= data
.cases
[j
];
216 iter
= table
.offsetTable
.find(myCase
.value
.stringImpl());
217 RELEASE_ASSERT(iter
!= end
);
218 iter
->value
.ctiOffset
= linkBuffer
.locationOf(m_blockHeads
[myCase
.target
.block
->index
]);
222 // Link all calls out from the JIT code to their respective functions.
223 for (unsigned i
= 0; i
< m_calls
.size(); ++i
)
224 linkBuffer
.link(m_calls
[i
].m_call
, m_calls
[i
].m_function
);
226 for (unsigned i
= m_getByIds
.size(); i
--;)
227 m_getByIds
[i
].finalize(linkBuffer
);
228 for (unsigned i
= m_putByIds
.size(); i
--;)
229 m_putByIds
[i
].finalize(linkBuffer
);
231 for (unsigned i
= 0; i
< m_ins
.size(); ++i
) {
232 StructureStubInfo
& info
= *m_ins
[i
].m_stubInfo
;
233 CodeLocationCall callReturnLocation
= linkBuffer
.locationOf(m_ins
[i
].m_slowPathGenerator
->call());
234 info
.patch
.deltaCallToDone
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_done
));
235 info
.patch
.deltaCallToJump
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_jump
));
236 info
.callReturnLocation
= callReturnLocation
;
237 info
.patch
.deltaCallToSlowCase
= differenceBetweenCodePtr(callReturnLocation
, linkBuffer
.locationOf(m_ins
[i
].m_slowPathGenerator
->label()));
240 for (unsigned i
= 0; i
< m_jsCalls
.size(); ++i
) {
241 JSCallRecord
& record
= m_jsCalls
[i
];
242 CallLinkInfo
& info
= *record
.m_info
;
243 ThunkGenerator generator
= linkThunkGeneratorFor(
244 info
.callType
== CallLinkInfo::Construct
? CodeForConstruct
: CodeForCall
,
245 RegisterPreservationNotRequired
);
246 linkBuffer
.link(record
.m_slowCall
, FunctionPtr(m_vm
->getCTIStub(generator
).code().executableAddress()));
247 info
.callReturnLocation
= linkBuffer
.locationOfNearCall(record
.m_slowCall
);
248 info
.hotPathBegin
= linkBuffer
.locationOf(record
.m_targetToCheck
);
249 info
.hotPathOther
= linkBuffer
.locationOfNearCall(record
.m_fastCall
);
252 MacroAssemblerCodeRef osrExitThunk
= vm()->getCTIStub(osrExitGenerationThunkGenerator
);
253 CodeLocationLabel target
= CodeLocationLabel(osrExitThunk
.code());
254 for (unsigned i
= 0; i
< m_jitCode
->osrExit
.size(); ++i
) {
255 OSRExit
& exit
= m_jitCode
->osrExit
[i
];
256 OSRExitCompilationInfo
& info
= m_exitCompilationInfo
[i
];
257 linkBuffer
.link(exit
.getPatchableCodeOffsetAsJump(), target
);
258 exit
.correctJump(linkBuffer
);
259 if (info
.m_replacementSource
.isSet()) {
260 m_jitCode
->common
.jumpReplacements
.append(JumpReplacement(
261 linkBuffer
.locationOf(info
.m_replacementSource
),
262 linkBuffer
.locationOf(info
.m_replacementDestination
)));
266 if (m_graph
.compilation()) {
267 ASSERT(m_exitSiteLabels
.size() == m_jitCode
->osrExit
.size());
268 for (unsigned i
= 0; i
< m_exitSiteLabels
.size(); ++i
) {
269 Vector
<Label
>& labels
= m_exitSiteLabels
[i
];
270 Vector
<const void*> addresses
;
271 for (unsigned j
= 0; j
< labels
.size(); ++j
)
272 addresses
.append(linkBuffer
.locationOf(labels
[j
]).executableAddress());
273 m_graph
.compilation()->addOSRExitSite(addresses
);
276 ASSERT(!m_exitSiteLabels
.size());
278 m_jitCode
->common
.compilation
= m_graph
.compilation();
282 void JITCompiler::compile()
284 SamplingRegion
samplingRegion("DFG Backend");
288 m_speculative
= adoptPtr(new SpeculativeJIT(*this));
289 addPtr(TrustedImm32(m_graph
.stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, stackPointerRegister
);
290 checkStackPointerAlignment();
294 // Generate slow path code.
295 m_speculative
->runSlowPathGenerators();
297 compileExceptionHandlers();
300 // Create OSR entry trampolines if necessary.
301 m_speculative
->createOSREntries();
305 void JITCompiler::link()
307 OwnPtr
<LinkBuffer
> linkBuffer
= adoptPtr(new LinkBuffer(*m_vm
, *this, m_codeBlock
, JITCompilationCanFail
));
308 if (linkBuffer
->didFailToAllocate()) {
309 m_graph
.m_plan
.finalizer
= adoptPtr(new FailedFinalizer(m_graph
.m_plan
));
314 m_speculative
->linkOSREntries(*linkBuffer
);
316 m_jitCode
->shrinkToFit();
317 codeBlock()->shrinkToFit(CodeBlock::LateShrink
);
319 disassemble(*linkBuffer
);
321 m_graph
.m_plan
.finalizer
= adoptPtr(new JITFinalizer(
322 m_graph
.m_plan
, m_jitCode
.release(), linkBuffer
.release()));
325 void JITCompiler::compileFunction()
327 SamplingRegion
samplingRegion("DFG Backend");
332 // === Function header code generation ===
333 // This is the main entry point, without performing an arity check.
334 // If we needed to perform an arity check we will already have moved the return address,
335 // so enter after this.
336 Label
fromArityCheck(this);
337 // Plant a check that sufficient space is available in the JSStack.
338 addPtr(TrustedImm32(virtualRegisterForLocal(m_graph
.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register
)), GPRInfo::callFrameRegister
, GPRInfo::regT1
);
339 Jump stackOverflow
= branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), GPRInfo::regT1
);
341 // Move the stack pointer down to accommodate locals
342 addPtr(TrustedImm32(m_graph
.stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, stackPointerRegister
);
343 checkStackPointerAlignment();
345 // === Function body code generation ===
346 m_speculative
= adoptPtr(new SpeculativeJIT(*this));
350 // === Function footer code generation ===
352 // Generate code to perform the stack overflow handling (if the stack check in
353 // the function header fails), and generate the entry point with arity check.
355 // Generate the stack overflow handling; if the stack check in the function head fails,
356 // we need to call out to a helper function to throw the StackOverflowError.
357 stackOverflow
.link(this);
359 emitStoreCodeOrigin(CodeOrigin(0));
361 if (maxFrameExtentForSlowPathCall
)
362 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
364 m_speculative
->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError
, m_codeBlock
);
366 // The fast entry point into a function does not check the correct number of arguments
367 // have been passed to the call (we only use the fast entry point where we can statically
368 // determine the correct number of arguments have been passed, or have already checked).
369 // In cases where an arity check is necessary, we enter here.
370 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
371 m_arityCheck
= label();
374 load32(AssemblyHelpers::payloadFor((VirtualRegister
)JSStack::ArgumentCount
), GPRInfo::regT1
);
375 branch32(AboveOrEqual
, GPRInfo::regT1
, TrustedImm32(m_codeBlock
->numParameters())).linkTo(fromArityCheck
, this);
376 emitStoreCodeOrigin(CodeOrigin(0));
377 if (maxFrameExtentForSlowPathCall
)
378 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
379 m_speculative
->callOperationWithCallFrameRollbackOnException(m_codeBlock
->m_isConstructor
? operationConstructArityCheck
: operationCallArityCheck
, GPRInfo::regT0
);
380 if (maxFrameExtentForSlowPathCall
)
381 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall
), stackPointerRegister
);
382 branchTest32(Zero
, GPRInfo::regT0
).linkTo(fromArityCheck
, this);
383 emitStoreCodeOrigin(CodeOrigin(0));
386 thunkReg
= GPRInfo::regT7
;
388 thunkReg
= GPRInfo::regT5
;
390 move(TrustedImmPtr(m_vm
->arityCheckFailReturnThunks
->returnPCsFor(*m_vm
, m_codeBlock
->numParameters())), thunkReg
);
391 loadPtr(BaseIndex(thunkReg
, GPRInfo::regT0
, timesPtr()), thunkReg
);
392 m_callArityFixup
= call();
393 jump(fromArityCheck
);
395 // Generate slow path code.
396 m_speculative
->runSlowPathGenerators();
398 compileExceptionHandlers();
401 // Create OSR entry trampolines if necessary.
402 m_speculative
->createOSREntries();
406 void JITCompiler::linkFunction()
409 OwnPtr
<LinkBuffer
> linkBuffer
= adoptPtr(new LinkBuffer(*m_vm
, *this, m_codeBlock
, JITCompilationCanFail
));
410 if (linkBuffer
->didFailToAllocate()) {
411 m_graph
.m_plan
.finalizer
= adoptPtr(new FailedFinalizer(m_graph
.m_plan
));
415 m_speculative
->linkOSREntries(*linkBuffer
);
417 m_jitCode
->shrinkToFit();
418 codeBlock()->shrinkToFit(CodeBlock::LateShrink
);
420 linkBuffer
->link(m_callArityFixup
, FunctionPtr((m_vm
->getCTIStub(arityFixup
)).code().executableAddress()));
422 disassemble(*linkBuffer
);
424 MacroAssemblerCodePtr withArityCheck
= linkBuffer
->locationOf(m_arityCheck
);
426 m_graph
.m_plan
.finalizer
= adoptPtr(new JITFinalizer(
427 m_graph
.m_plan
, m_jitCode
.release(), linkBuffer
.release(), withArityCheck
));
430 void JITCompiler::disassemble(LinkBuffer
& linkBuffer
)
432 if (shouldShowDisassembly())
433 m_disassembler
->dump(linkBuffer
);
435 if (m_graph
.m_plan
.compilation
)
436 m_disassembler
->reportToProfiler(m_graph
.m_plan
.compilation
.get(), linkBuffer
);
439 #if USE(JSVALUE32_64)
440 void* JITCompiler::addressOfDoubleConstant(Node
* node
)
442 ASSERT(m_graph
.isNumberConstant(node
));
443 JSValue jsvalue
= node
->valueOfJSConstant(codeBlock());
444 ASSERT(jsvalue
.isDouble());
446 double value
= jsvalue
.asDouble();
447 int64_t valueBits
= bitwise_cast
<int64_t>(value
);
448 auto it
= m_graph
.m_doubleConstantsMap
.find(valueBits
);
449 if (it
!= m_graph
.m_doubleConstantsMap
.end())
452 if (!m_graph
.m_doubleConstants
)
453 m_graph
.m_doubleConstants
= std::make_unique
<Bag
<double>>();
455 double* addressInConstantPool
= m_graph
.m_doubleConstants
->add();
456 *addressInConstantPool
= value
;
457 m_graph
.m_doubleConstantsMap
[valueBits
] = addressInConstantPool
;
458 return addressInConstantPool
;
462 } } // namespace JSC::DFG
464 #endif // ENABLE(DFG_JIT)