2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGJITCompiler.h"
31 #include "CodeBlock.h"
32 #include "DFGJITCodeGenerator.h"
33 #include "DFGNonSpeculativeJIT.h"
34 #include "DFGOperations.h"
35 #include "DFGRegisterBank.h"
36 #include "DFGSpeculativeJIT.h"
37 #include "JSGlobalData.h"
38 #include "LinkBuffer.h"
40 namespace JSC
{ namespace DFG
{
42 // This method used to fill a numeric value to a FPR when linking speculative -> non-speculative.
43 void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex
, FPRReg fpr
, GPRReg temporary
)
45 Node
& node
= graph()[nodeIndex
];
47 if (node
.isConstant()) {
48 ASSERT(node
.op
== DoubleConstant
);
49 move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfDoubleConstant(nodeIndex
)))), temporary
);
50 movePtrToDouble(temporary
, fpr
);
52 loadPtr(addressFor(node
.virtualRegister()), temporary
);
53 Jump isInteger
= branchPtr(MacroAssembler::AboveOrEqual
, temporary
, GPRInfo::tagTypeNumberRegister
);
54 jitAssertIsJSDouble(temporary
);
55 addPtr(GPRInfo::tagTypeNumberRegister
, temporary
);
56 movePtrToDouble(temporary
, fpr
);
57 Jump hasUnboxedDouble
= jump();
59 convertInt32ToDouble(temporary
, fpr
);
60 hasUnboxedDouble
.link(this);
64 // This method used to fill an integer value to a GPR when linking speculative -> non-speculative.
65 void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex
, GPRReg gpr
)
67 Node
& node
= graph()[nodeIndex
];
69 if (node
.isConstant()) {
70 ASSERT(node
.op
== Int32Constant
);
71 move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
74 // Redundant load, just so we can check the tag!
75 loadPtr(addressFor(node
.virtualRegister()), gpr
);
76 jitAssertIsJSInt32(gpr
);
78 load32(addressFor(node
.virtualRegister()), gpr
);
82 // This method used to fill a JSValue to a GPR when linking speculative -> non-speculative.
83 void JITCompiler::fillToJS(NodeIndex nodeIndex
, GPRReg gpr
)
85 Node
& node
= graph()[nodeIndex
];
87 if (node
.isConstant()) {
88 if (isInt32Constant(nodeIndex
)) {
89 JSValue jsValue
= jsNumber(valueOfInt32Constant(nodeIndex
));
90 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
91 } else if (isDoubleConstant(nodeIndex
)) {
92 JSValue
jsValue(JSValue::EncodeAsDouble
, valueOfDoubleConstant(nodeIndex
));
93 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
95 ASSERT(isJSConstant(nodeIndex
));
96 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
97 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
102 loadPtr(addressFor(node
.virtualRegister()), gpr
);
105 void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck
& check
, const EntryLocation
& entry
, SpeculationRecovery
* recovery
)
107 ASSERT(check
.m_nodeIndex
== entry
.m_nodeIndex
);
109 // Link the jump from the Speculative path to here.
110 check
.m_check
.link(this);
112 // Does this speculation check require any additional recovery to be performed,
113 // to restore any state that has been overwritten before we enter back in to the
114 // non-speculative path.
116 // The only additional recovery we currently support is for integer add operation
117 ASSERT(recovery
->type() == SpeculativeAdd
);
119 sub32(recovery
->src(), recovery
->dest());
122 // FIXME: - This is hideously inefficient!
123 // Where a value is live in a register in the speculative path, and is required in a register
124 // on the non-speculative path, we should not need to be spilling it and reloading (we may
125 // need to spill anyway, if the value is marked as spilled on the non-speculative path).
126 // This may also be spilling values that don't need spilling, e.g. are already spilled,
127 // are constants, or are arguments.
129 // Spill all GPRs in use by the speculative path.
130 for (unsigned index
= 0; index
< GPRInfo::numberOfRegisters
; ++index
) {
131 NodeIndex nodeIndex
= check
.m_gprInfo
[index
].nodeIndex
;
132 if (nodeIndex
== NoNode
)
135 DataFormat dataFormat
= check
.m_gprInfo
[index
].format
;
136 VirtualRegister virtualRegister
= graph()[nodeIndex
].virtualRegister();
138 ASSERT(dataFormat
== DataFormatInteger
|| DataFormatCell
|| dataFormat
& DataFormatJS
);
139 if (dataFormat
== DataFormatInteger
)
140 orPtr(GPRInfo::tagTypeNumberRegister
, GPRInfo::toRegister(index
));
141 storePtr(GPRInfo::toRegister(index
), addressFor(virtualRegister
));
144 // Spill all FPRs in use by the speculative path.
145 for (unsigned index
= 0; index
< FPRInfo::numberOfRegisters
; ++index
) {
146 NodeIndex nodeIndex
= check
.m_fprInfo
[index
];
147 if (nodeIndex
== NoNode
)
150 VirtualRegister virtualRegister
= graph()[nodeIndex
].virtualRegister();
152 moveDoubleToPtr(FPRInfo::toRegister(index
), GPRInfo::regT0
);
153 subPtr(GPRInfo::tagTypeNumberRegister
, GPRInfo::regT0
);
154 storePtr(GPRInfo::regT0
, addressFor(virtualRegister
));
157 // Fill all FPRs in use by the non-speculative path.
158 for (unsigned index
= 0; index
< FPRInfo::numberOfRegisters
; ++index
) {
159 NodeIndex nodeIndex
= entry
.m_fprInfo
[index
];
160 if (nodeIndex
== NoNode
)
163 fillNumericToDouble(nodeIndex
, FPRInfo::toRegister(index
), GPRInfo::regT0
);
166 // Fill all GPRs in use by the non-speculative path.
167 for (unsigned index
= 0; index
< GPRInfo::numberOfRegisters
; ++index
) {
168 NodeIndex nodeIndex
= entry
.m_gprInfo
[index
].nodeIndex
;
169 if (nodeIndex
== NoNode
)
172 DataFormat dataFormat
= entry
.m_gprInfo
[index
].format
;
173 if (dataFormat
== DataFormatInteger
)
174 fillInt32ToInteger(nodeIndex
, GPRInfo::toRegister(index
));
176 ASSERT(dataFormat
& DataFormatJS
|| dataFormat
== DataFormatCell
); // Treat cell as JSValue for now!
177 fillToJS(nodeIndex
, GPRInfo::toRegister(index
));
178 // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype?
182 // Jump into the non-speculative path.
186 void JITCompiler::linkSpeculationChecks(SpeculativeJIT
& speculative
, NonSpeculativeJIT
& nonSpeculative
)
188 // Iterators to walk over the set of bail outs & corresponding entry points.
189 SpeculationCheckVector::Iterator checksIter
= speculative
.speculationChecks().begin();
190 SpeculationCheckVector::Iterator checksEnd
= speculative
.speculationChecks().end();
191 NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter
= nonSpeculative
.entryLocations().begin();
192 NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd
= nonSpeculative
.entryLocations().end();
194 // Iterate over the speculation checks.
195 while (checksIter
!= checksEnd
) {
196 // For every bail out from the speculative path, we must have provided an entry point
197 // into the non-speculative one.
198 ASSERT(checksIter
->m_nodeIndex
== entriesIter
->m_nodeIndex
);
200 // There may be multiple bail outs that map to the same entry point!
202 ASSERT(checksIter
!= checksEnd
);
203 ASSERT(entriesIter
!= entriesEnd
);
205 // Plant code to link this speculation failure.
206 const SpeculationCheck
& check
= *checksIter
;
207 const EntryLocation
& entry
= *entriesIter
;
208 jumpFromSpeculativeToNonSpeculative(check
, entry
, speculative
.speculationRecovery(check
.m_recoveryIndex
));
210 } while (checksIter
!= checksEnd
&& checksIter
->m_nodeIndex
== entriesIter
->m_nodeIndex
);
214 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289
215 ASSERT(!(checksIter
!= checksEnd
));
216 ASSERT(!(entriesIter
!= entriesEnd
));
219 void JITCompiler::compileFunction(JITCode
& entry
, MacroAssemblerCodePtr
& entryWithArityCheck
)
221 // === Stage 1 - Function header code generation ===
223 // This code currently matches the old JIT. In the function header we need to
224 // pop the return address (since we do not allow any recursion on the machine
225 // stack), and perform a fast register file check.
227 // This is the main entry point, without performing an arity check.
228 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
229 // We'll need to convert the remaining cti_ style calls (specifically the register file
230 // check) which will be dependent on stack layout. (We'd need to account for this in
231 // both normal return code and when jumping to an exception handler).
232 preserveReturnAddressAfterCall(GPRInfo::regT2
);
233 emitPutToCallFrameHeader(GPRInfo::regT2
, RegisterFile::ReturnPC
);
234 // If we needed to perform an arity check we will already have moved the return address,
235 // so enter after this.
236 Label
fromArityCheck(this);
238 // Setup a pointer to the codeblock in the CallFrameHeader.
239 emitPutImmediateToCallFrameHeader(m_codeBlock
, RegisterFile::CodeBlock
);
241 // Plant a check that sufficient space is available in the RegisterFile.
242 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
243 addPtr(Imm32(m_codeBlock
->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
, GPRInfo::regT1
);
244 Jump registerFileCheck
= branchPtr(Below
, AbsoluteAddress(m_globalData
->interpreter
->registerFile().addressOfEnd()), GPRInfo::regT1
);
245 // Return here after register file check.
246 Label fromRegisterFileCheck
= label();
249 // === Stage 2 - Function body code generation ===
251 // We generate the speculative code path, followed by the non-speculative
252 // code for the function. Next we need to link the two together, making
253 // bail-outs from the speculative path jump to the corresponding point on
254 // the non-speculative one (and generating any code necessary to juggle
255 // register values around, rebox values, and ensure spilled, to match the
256 // non-speculative path's requirements).
258 #if DFG_JIT_BREAK_ON_EVERY_FUNCTION
263 // First generate the speculative path.
264 Label speculativePathBegin
= label();
265 SpeculativeJIT
speculative(*this);
266 #if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE
267 bool compiledSpeculative
= speculative
.compile();
269 bool compiledSpeculative
= false;
272 // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
273 // to allow it to check which nodes in the graph may bail out, and may need to reenter the
274 // non-speculative path.
275 if (compiledSpeculative
) {
276 SpeculationCheckIndexIterator
checkIterator(speculative
.speculationChecks());
277 NonSpeculativeJIT
nonSpeculative(*this);
278 nonSpeculative
.compile(checkIterator
);
280 // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
281 linkSpeculationChecks(speculative
, nonSpeculative
);
283 // If compilation through the SpeculativeJIT failed, throw away the code we generated.
285 rewindToLabel(speculativePathBegin
);
287 SpeculationCheckVector noChecks
;
288 SpeculationCheckIndexIterator
checkIterator(noChecks
);
289 NonSpeculativeJIT
nonSpeculative(*this);
290 nonSpeculative
.compile(checkIterator
);
293 // === Stage 3 - Function footer code generation ===
295 // Generate code to lookup and jump to exception handlers, to perform the slow
296 // register file check (if the fast one in the function header fails), and
297 // generate the entry point with arity check.
299 // Iterate over the m_calls vector, checking for exception checks,
300 // and linking them to here.
301 unsigned exceptionCheckCount
= 0;
302 for (unsigned i
= 0; i
< m_calls
.size(); ++i
) {
303 Jump
& exceptionCheck
= m_calls
[i
].m_exceptionCheck
;
304 if (exceptionCheck
.isSet()) {
305 exceptionCheck
.link(this);
306 ++exceptionCheckCount
;
309 // If any exception checks were linked, generate code to lookup a handler.
310 if (exceptionCheckCount
) {
311 // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
312 // an identifier for the operation that threw the exception, which we can use
313 // to look up handler information. The identifier we use is the return address
314 // of the call out from JIT code that threw the exception; this is still
315 // available on the stack, just below the stack pointer!
316 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
317 peek(GPRInfo::argumentGPR1
, -1);
318 m_calls
.append(CallRecord(call(), lookupExceptionHandler
));
319 // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
320 // and the address of the handler in returnValueGPR2.
321 jump(GPRInfo::returnValueGPR2
);
324 // Generate the register file check; if the fast check in the function head fails,
325 // we need to call out to a helper function to check whether more space is available.
326 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
327 registerFileCheck
.link(this);
328 move(stackPointerRegister
, GPRInfo::argumentGPR0
);
329 poke(GPRInfo::callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof(void*));
330 Call callRegisterFileCheck
= call();
331 jump(fromRegisterFileCheck
);
333 // The fast entry point into a function does not check the correct number of arguments
334 // have been passed to the call (we only use the fast entry point where we can statically
335 // determine the correct number of arguments have been passed, or have already checked).
336 // In cases where an arity check is necessary, we enter here.
337 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
338 Label arityCheck
= label();
339 preserveReturnAddressAfterCall(GPRInfo::regT2
);
340 emitPutToCallFrameHeader(GPRInfo::regT2
, RegisterFile::ReturnPC
);
341 branch32(Equal
, GPRInfo::regT1
, Imm32(m_codeBlock
->m_numParameters
)).linkTo(fromArityCheck
, this);
342 move(stackPointerRegister
, GPRInfo::argumentGPR0
);
343 poke(GPRInfo::callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof(void*));
344 Call callArityCheck
= call();
345 move(GPRInfo::regT0
, GPRInfo::callFrameRegister
);
346 jump(fromArityCheck
);
349 // === Stage 4 - Link ===
351 // Link the code, populate data in CodeBlock data structures.
353 LinkBuffer
linkBuffer(*m_globalData
, this, m_globalData
->executableAllocator
);
355 #if DFG_DEBUG_VERBOSE
356 fprintf(stderr
, "JIT code start at %p\n", linkBuffer
.debugAddress());
359 // Link all calls out from the JIT code to their respective functions.
360 for (unsigned i
= 0; i
< m_calls
.size(); ++i
)
361 linkBuffer
.link(m_calls
[i
].m_call
, m_calls
[i
].m_function
);
363 if (m_codeBlock
->needsCallReturnIndices()) {
364 m_codeBlock
->callReturnIndexVector().reserveCapacity(exceptionCheckCount
);
365 for (unsigned i
= 0; i
< m_calls
.size(); ++i
) {
366 if (m_calls
[i
].m_exceptionCheck
.isSet()) {
367 unsigned returnAddressOffset
= linkBuffer
.returnAddressOffset(m_calls
[i
].m_call
);
368 unsigned exceptionInfo
= m_calls
[i
].m_exceptionInfo
;
369 m_codeBlock
->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset
, exceptionInfo
));
374 // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
375 linkBuffer
.link(callRegisterFileCheck
, cti_register_file_check
);
376 linkBuffer
.link(callArityCheck
, m_codeBlock
->m_isConstructor
? cti_op_construct_arityCheck
: cti_op_call_arityCheck
);
378 entryWithArityCheck
= linkBuffer
.locationOf(arityCheck
);
379 entry
= linkBuffer
.finalizeCode();
383 void JITCompiler::jitAssertIsInt32(GPRReg gpr
)
386 Jump checkInt32
= branchPtr(BelowOrEqual
, gpr
, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu
))));
388 checkInt32
.link(this);
394 void JITCompiler::jitAssertIsJSInt32(GPRReg gpr
)
396 Jump checkJSInt32
= branchPtr(AboveOrEqual
, gpr
, GPRInfo::tagTypeNumberRegister
);
398 checkJSInt32
.link(this);
401 void JITCompiler::jitAssertIsJSNumber(GPRReg gpr
)
403 Jump checkJSNumber
= branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagTypeNumberRegister
);
405 checkJSNumber
.link(this);
408 void JITCompiler::jitAssertIsJSDouble(GPRReg gpr
)
410 Jump checkJSInt32
= branchPtr(AboveOrEqual
, gpr
, GPRInfo::tagTypeNumberRegister
);
411 Jump checkJSNumber
= branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagTypeNumberRegister
);
412 checkJSInt32
.link(this);
414 checkJSNumber
.link(this);
418 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86_64) // Or any other 64-bit platform!
419 void JITCompiler::emitCount(AbstractSamplingCounter
& counter
, uint32_t increment
)
421 addPtr(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
425 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform!
426 void JITCompiler::emitCount(AbstractSamplingCounter
& counter
, uint32_t increment
)
428 intptr_t hiWord
= reinterpret_cast<intptr_t>(counter
.addressOfCounter()) + sizeof(int32_t);
429 add32(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
430 addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord
)));
434 #if ENABLE(SAMPLING_FLAGS)
435 void JITCompiler::setSamplingFlag(int32_t flag
)
439 or32(TrustedImm32(1u << (flag
- 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
442 void JITCompiler::clearSamplingFlag(int32_t flag
)
446 and32(TrustedImm32(~(1u << (flag
- 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
450 } } // namespace JSC::DFG