2 * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "ArityCheckFailReturnThunks.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGCapabilities.h"
36 #include "Interpreter.h"
37 #include "JITInlines.h"
38 #include "JITOperations.h"
40 #include "JSFunction.h"
41 #include "LinkBuffer.h"
42 #include "MaxFrameExtentForSlowPathCall.h"
43 #include "JSCInlines.h"
44 #include "ProfilerDatabase.h"
45 #include "RepatchBuffer.h"
46 #include "ResultType.h"
47 #include "SamplingTool.h"
48 #include "SlowPathCall.h"
49 #include "StackAlignment.h"
50 #include "TypeProfilerLog.h"
51 #include <wtf/CryptographicallyRandomNumber.h>
57 void ctiPatchNearCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
59 RepatchBuffer
repatchBuffer(codeblock
);
60 repatchBuffer
.relinkNearCallerToTrampoline(returnAddress
, newCalleeFunction
);
63 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
65 RepatchBuffer
repatchBuffer(codeblock
);
66 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, newCalleeFunction
);
69 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, FunctionPtr newCalleeFunction
)
71 RepatchBuffer
repatchBuffer(codeblock
);
72 repatchBuffer
.relinkCallerToFunction(returnAddress
, newCalleeFunction
);
75 JIT::JIT(VM
* vm
, CodeBlock
* codeBlock
)
76 : JSInterfaceJIT(vm
, codeBlock
)
77 , m_interpreter(vm
->interpreter
)
78 , m_labels(codeBlock
? codeBlock
->numberOfInstructions() : 0)
79 , m_bytecodeOffset(std::numeric_limits
<unsigned>::max())
80 , m_getByIdIndex(UINT_MAX
)
81 , m_putByIdIndex(UINT_MAX
)
82 , m_byValInstructionIndex(UINT_MAX
)
83 , m_callLinkInfoIndex(UINT_MAX
)
84 , m_randomGenerator(cryptographicallyRandomNumber())
85 , m_canBeOptimized(false)
86 , m_shouldEmitProfiling(false)
91 void JIT::emitEnterOptimizationCheck()
93 if (!canBeOptimized())
96 JumpList skipOptimize
;
98 skipOptimize
.append(branchAdd32(Signed
, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock
->addressOfJITExecuteCounter())));
99 ASSERT(!m_bytecodeOffset
);
100 callOperation(operationOptimize
, m_bytecodeOffset
);
101 skipOptimize
.append(branchTestPtr(Zero
, returnValueGPR
));
102 move(returnValueGPR2
, stackPointerRegister
);
103 jump(returnValueGPR
);
104 skipOptimize
.link(this);
108 void JIT::emitNotifyWrite(WatchpointSet
* set
)
110 if (!set
|| set
->state() == IsInvalidated
)
113 addSlowCase(branch8(NotEqual
, AbsoluteAddress(set
->addressOfState()), TrustedImm32(IsInvalidated
)));
116 void JIT::assertStackPointerOffset()
121 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, regT0
);
122 Jump ok
= branchPtr(Equal
, regT0
, stackPointerRegister
);
127 #define NEXT_OPCODE(name) \
128 m_bytecodeOffset += OPCODE_LENGTH(name); \
131 #define DEFINE_SLOW_OP(name) \
133 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
134 slowPathCall.call(); \
135 NEXT_OPCODE(op_##name); \
138 #define DEFINE_OP(name) \
140 emit_##name(currentInstruction); \
144 #define DEFINE_SLOWCASE_OP(name) \
146 emitSlow_##name(currentInstruction, iter); \
150 void JIT::privateCompileMainPass()
152 jitAssertTagsInPlace();
153 jitAssertArgumentCountSane();
155 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
156 unsigned instructionCount
= m_codeBlock
->instructions().size();
158 m_callLinkInfoIndex
= 0;
160 for (m_bytecodeOffset
= 0; m_bytecodeOffset
< instructionCount
; ) {
162 m_disassembler
->setForBytecodeMainPath(m_bytecodeOffset
, label());
163 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
164 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset
);
166 #if ENABLE(OPCODE_SAMPLING)
167 if (m_bytecodeOffset
> 0) // Avoid the overhead of sampling op_enter twice.
168 sampleInstruction(currentInstruction
);
171 m_labels
[m_bytecodeOffset
] = label();
173 #if ENABLE(JIT_VERBOSE)
174 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
177 OpcodeID opcodeID
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
182 AbsoluteAddress(m_compilation
->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
183 m_compilation
->bytecodes(), m_bytecodeOffset
)))->address()));
186 if (Options::eagerlyUpdateTopCallFrame())
187 updateTopCallFrame();
190 DEFINE_SLOW_OP(del_by_val
)
193 DEFINE_SLOW_OP(lesseq
)
194 DEFINE_SLOW_OP(greater
)
195 DEFINE_SLOW_OP(greatereq
)
196 DEFINE_SLOW_OP(is_function
)
197 DEFINE_SLOW_OP(is_object_or_null
)
198 DEFINE_SLOW_OP(typeof)
205 DEFINE_OP(op_call_eval
)
206 DEFINE_OP(op_call_varargs
)
207 DEFINE_OP(op_construct_varargs
)
209 DEFINE_OP(op_construct
)
210 DEFINE_OP(op_create_this
)
211 DEFINE_OP(op_to_this
)
212 DEFINE_OP(op_create_direct_arguments
)
213 DEFINE_OP(op_create_scoped_arguments
)
214 DEFINE_OP(op_create_out_of_band_arguments
)
215 DEFINE_OP(op_check_tdz
)
217 DEFINE_OP(op_del_by_id
)
221 DEFINE_OP(op_create_lexical_environment
)
222 DEFINE_OP(op_get_scope
)
224 DEFINE_OP(op_eq_null
)
225 case op_get_by_id_out_of_line
:
226 case op_get_array_length
:
227 DEFINE_OP(op_get_by_id
)
228 DEFINE_OP(op_get_by_val
)
229 DEFINE_OP(op_check_has_instance
)
230 DEFINE_OP(op_instanceof
)
231 DEFINE_OP(op_is_undefined
)
232 DEFINE_OP(op_is_boolean
)
233 DEFINE_OP(op_is_number
)
234 DEFINE_OP(op_is_string
)
235 DEFINE_OP(op_is_object
)
236 DEFINE_OP(op_jeq_null
)
239 DEFINE_OP(op_jneq_null
)
240 DEFINE_OP(op_jneq_ptr
)
242 DEFINE_OP(op_jlesseq
)
243 DEFINE_OP(op_jgreater
)
244 DEFINE_OP(op_jgreatereq
)
246 DEFINE_OP(op_jnlesseq
)
247 DEFINE_OP(op_jngreater
)
248 DEFINE_OP(op_jngreatereq
)
250 DEFINE_OP(op_loop_hint
)
257 DEFINE_OP(op_neq_null
)
258 DEFINE_OP(op_new_array
)
259 DEFINE_OP(op_new_array_with_size
)
260 DEFINE_OP(op_new_array_buffer
)
261 DEFINE_OP(op_new_func
)
262 DEFINE_OP(op_new_func_exp
)
263 DEFINE_OP(op_new_object
)
264 DEFINE_OP(op_new_regexp
)
266 DEFINE_OP(op_nstricteq
)
267 DEFINE_OP(op_pop_scope
)
270 DEFINE_OP(op_profile_did_call
)
271 DEFINE_OP(op_profile_will_call
)
272 DEFINE_OP(op_profile_type
)
273 DEFINE_OP(op_profile_control_flow
)
274 DEFINE_OP(op_push_name_scope
)
275 DEFINE_OP(op_push_with_scope
)
276 case op_put_by_id_out_of_line
:
277 case op_put_by_id_transition_direct
:
278 case op_put_by_id_transition_normal
:
279 case op_put_by_id_transition_direct_out_of_line
:
280 case op_put_by_id_transition_normal_out_of_line
:
281 DEFINE_OP(op_put_by_id
)
282 DEFINE_OP(op_put_by_index
)
283 case op_put_by_val_direct
:
284 DEFINE_OP(op_put_by_val
)
285 DEFINE_OP(op_put_getter_by_id
)
286 DEFINE_OP(op_put_setter_by_id
)
287 DEFINE_OP(op_put_getter_setter
)
288 case op_init_global_const_nop
:
289 NEXT_OPCODE(op_init_global_const_nop
);
290 DEFINE_OP(op_init_global_const
)
294 DEFINE_OP(op_unsigned
)
295 DEFINE_OP(op_urshift
)
297 DEFINE_OP(op_stricteq
)
299 DEFINE_OP(op_switch_char
)
300 DEFINE_OP(op_switch_imm
)
301 DEFINE_OP(op_switch_string
)
303 DEFINE_OP(op_throw_static_error
)
304 DEFINE_OP(op_to_number
)
305 DEFINE_OP(op_to_string
)
306 DEFINE_OP(op_to_primitive
)
308 DEFINE_OP(op_resolve_scope
)
309 DEFINE_OP(op_get_from_scope
)
310 DEFINE_OP(op_put_to_scope
)
311 DEFINE_OP(op_get_from_arguments
)
312 DEFINE_OP(op_put_to_arguments
)
314 DEFINE_OP(op_get_enumerable_length
)
315 DEFINE_OP(op_has_generic_property
)
316 DEFINE_OP(op_has_structure_property
)
317 DEFINE_OP(op_has_indexed_property
)
318 DEFINE_OP(op_get_direct_pname
)
319 DEFINE_OP(op_get_property_enumerator
)
320 DEFINE_OP(op_enumerator_structure_pname
)
321 DEFINE_OP(op_enumerator_generic_pname
)
322 DEFINE_OP(op_to_index_string
)
324 RELEASE_ASSERT_NOT_REACHED();
328 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callCompilationInfo
.size());
331 // Reset this, in order to guard its use with ASSERTs.
332 m_bytecodeOffset
= std::numeric_limits
<unsigned>::max();
336 void JIT::privateCompileLinkPass()
338 unsigned jmpTableCount
= m_jmpTable
.size();
339 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
340 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeOffset
], this);
344 void JIT::privateCompileSlowCases()
346 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
350 m_byValInstructionIndex
= 0;
351 m_callLinkInfoIndex
= 0;
353 // Use this to assert that slow-path code associates new profiling sites with existing
354 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
355 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
356 // instructions and the slow-path executions. Furthermore, if the slow-path code created
357 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
358 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
359 unsigned numberOfValueProfiles
= m_codeBlock
->numberOfValueProfiles();
361 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
362 m_bytecodeOffset
= iter
->to
;
364 unsigned firstTo
= m_bytecodeOffset
;
366 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
368 RareCaseProfile
* rareCaseProfile
= 0;
369 if (shouldEmitProfiling())
370 rareCaseProfile
= m_codeBlock
->addRareCaseProfile(m_bytecodeOffset
);
372 #if ENABLE(JIT_VERBOSE)
373 dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
377 m_disassembler
->setForBytecodeSlowPath(m_bytecodeOffset
, label());
379 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
380 DEFINE_SLOWCASE_OP(op_add
)
381 DEFINE_SLOWCASE_OP(op_bitand
)
382 DEFINE_SLOWCASE_OP(op_bitor
)
383 DEFINE_SLOWCASE_OP(op_bitxor
)
384 DEFINE_SLOWCASE_OP(op_call
)
385 DEFINE_SLOWCASE_OP(op_call_eval
)
386 DEFINE_SLOWCASE_OP(op_call_varargs
)
387 DEFINE_SLOWCASE_OP(op_construct_varargs
)
388 DEFINE_SLOWCASE_OP(op_construct
)
389 DEFINE_SLOWCASE_OP(op_to_this
)
390 DEFINE_SLOWCASE_OP(op_check_tdz
)
391 DEFINE_SLOWCASE_OP(op_create_this
)
392 DEFINE_SLOWCASE_OP(op_div
)
393 DEFINE_SLOWCASE_OP(op_eq
)
394 case op_get_by_id_out_of_line
:
395 case op_get_array_length
:
396 DEFINE_SLOWCASE_OP(op_get_by_id
)
397 DEFINE_SLOWCASE_OP(op_get_by_val
)
398 DEFINE_SLOWCASE_OP(op_check_has_instance
)
399 DEFINE_SLOWCASE_OP(op_instanceof
)
400 DEFINE_SLOWCASE_OP(op_jfalse
)
401 DEFINE_SLOWCASE_OP(op_jless
)
402 DEFINE_SLOWCASE_OP(op_jlesseq
)
403 DEFINE_SLOWCASE_OP(op_jgreater
)
404 DEFINE_SLOWCASE_OP(op_jgreatereq
)
405 DEFINE_SLOWCASE_OP(op_jnless
)
406 DEFINE_SLOWCASE_OP(op_jnlesseq
)
407 DEFINE_SLOWCASE_OP(op_jngreater
)
408 DEFINE_SLOWCASE_OP(op_jngreatereq
)
409 DEFINE_SLOWCASE_OP(op_jtrue
)
410 DEFINE_SLOWCASE_OP(op_loop_hint
)
411 DEFINE_SLOWCASE_OP(op_lshift
)
412 DEFINE_SLOWCASE_OP(op_mod
)
413 DEFINE_SLOWCASE_OP(op_mul
)
414 DEFINE_SLOWCASE_OP(op_negate
)
415 DEFINE_SLOWCASE_OP(op_neq
)
416 DEFINE_SLOWCASE_OP(op_new_object
)
417 DEFINE_SLOWCASE_OP(op_not
)
418 DEFINE_SLOWCASE_OP(op_nstricteq
)
419 DEFINE_SLOWCASE_OP(op_dec
)
420 DEFINE_SLOWCASE_OP(op_inc
)
421 case op_put_by_id_out_of_line
:
422 case op_put_by_id_transition_direct
:
423 case op_put_by_id_transition_normal
:
424 case op_put_by_id_transition_direct_out_of_line
:
425 case op_put_by_id_transition_normal_out_of_line
:
426 DEFINE_SLOWCASE_OP(op_put_by_id
)
427 case op_put_by_val_direct
:
428 DEFINE_SLOWCASE_OP(op_put_by_val
)
429 DEFINE_SLOWCASE_OP(op_rshift
)
430 DEFINE_SLOWCASE_OP(op_unsigned
)
431 DEFINE_SLOWCASE_OP(op_urshift
)
432 DEFINE_SLOWCASE_OP(op_stricteq
)
433 DEFINE_SLOWCASE_OP(op_sub
)
434 DEFINE_SLOWCASE_OP(op_to_number
)
435 DEFINE_SLOWCASE_OP(op_to_string
)
436 DEFINE_SLOWCASE_OP(op_to_primitive
)
437 DEFINE_SLOWCASE_OP(op_has_indexed_property
)
438 DEFINE_SLOWCASE_OP(op_has_structure_property
)
439 DEFINE_SLOWCASE_OP(op_get_direct_pname
)
441 DEFINE_SLOWCASE_OP(op_resolve_scope
)
442 DEFINE_SLOWCASE_OP(op_get_from_scope
)
443 DEFINE_SLOWCASE_OP(op_put_to_scope
)
446 RELEASE_ASSERT_NOT_REACHED();
449 RELEASE_ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
, "Not enough jumps linked in slow case codegen.");
450 RELEASE_ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
452 if (shouldEmitProfiling())
453 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile
->m_counter
));
455 emitJumpSlowToHot(jump(), 0);
458 RELEASE_ASSERT(m_getByIdIndex
== m_getByIds
.size());
459 RELEASE_ASSERT(m_putByIdIndex
== m_putByIds
.size());
460 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callCompilationInfo
.size());
461 RELEASE_ASSERT(numberOfValueProfiles
== m_codeBlock
->numberOfValueProfiles());
464 // Reset this, in order to guard its use with ASSERTs.
465 m_bytecodeOffset
= std::numeric_limits
<unsigned>::max();
469 CompilationResult
JIT::privateCompile(JITCompilationEffort effort
)
471 DFG::CapabilityLevel level
= m_codeBlock
->capabilityLevel();
473 case DFG::CannotCompile
:
474 m_canBeOptimized
= false;
475 m_canBeOptimizedOrInlined
= false;
476 m_shouldEmitProfiling
= false;
478 case DFG::CanCompile
:
479 case DFG::CanCompileAndInline
:
480 m_canBeOptimized
= true;
481 m_canBeOptimizedOrInlined
= true;
482 m_shouldEmitProfiling
= true;
485 RELEASE_ASSERT_NOT_REACHED();
489 switch (m_codeBlock
->codeType()) {
492 m_codeBlock
->m_shouldAlwaysBeInlined
= false;
495 // We could have already set it to false because we detected an uninlineable call.
496 // Don't override that observation.
497 m_codeBlock
->m_shouldAlwaysBeInlined
&= canInline(level
) && DFG::mightInlineFunction(m_codeBlock
);
501 // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
502 if (m_vm
->typeProfiler())
503 m_vm
->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
505 if (Options::showDisassembly() || m_vm
->m_perBytecodeProfiler
)
506 m_disassembler
= std::make_unique
<JITDisassembler
>(m_codeBlock
);
507 if (m_vm
->m_perBytecodeProfiler
) {
508 m_compilation
= adoptRef(
509 new Profiler::Compilation(
510 m_vm
->m_perBytecodeProfiler
->ensureBytecodesFor(m_codeBlock
),
511 Profiler::Baseline
));
512 m_compilation
->addProfiledBytecodes(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
);
516 m_disassembler
->setStartOfCode(label());
518 // Just add a little bit of randomness to the codegen
519 if (m_randomGenerator
.getUint32() & 1)
522 emitFunctionPrologue();
523 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
525 Label
beginLabel(this);
527 sampleCodeBlock(m_codeBlock
);
528 #if ENABLE(OPCODE_SAMPLING)
529 sampleInstruction(m_codeBlock
->instructions().begin());
532 if (m_codeBlock
->codeType() == FunctionCode
) {
533 ASSERT(m_bytecodeOffset
== std::numeric_limits
<unsigned>::max());
534 if (shouldEmitProfiling()) {
535 for (int argument
= 0; argument
< m_codeBlock
->numParameters(); ++argument
) {
536 // If this is a constructor, then we want to put in a dummy profiling site (to
537 // keep things consistent) but we don't actually want to record the dummy value.
538 if (m_codeBlock
->m_isConstructor
&& !argument
)
540 int offset
= CallFrame::argumentOffsetIncludingThis(argument
) * static_cast<int>(sizeof(Register
));
542 load64(Address(callFrameRegister
, offset
), regT0
);
543 #elif USE(JSVALUE32_64)
544 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
545 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
547 emitValueProfilingSite(m_codeBlock
->valueProfileForArgument(argument
));
552 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, regT1
);
553 Jump stackOverflow
= branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), regT1
);
555 move(regT1
, stackPointerRegister
);
556 checkStackPointerAlignment();
558 privateCompileMainPass();
559 privateCompileLinkPass();
560 privateCompileSlowCases();
563 m_disassembler
->setEndOfSlowPath(label());
565 stackOverflow
.link(this);
566 m_bytecodeOffset
= 0;
567 if (maxFrameExtentForSlowPathCall
)
568 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
569 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError
, m_codeBlock
);
572 if (m_codeBlock
->codeType() == FunctionCode
) {
573 arityCheck
= label();
574 store8(TrustedImm32(0), &m_codeBlock
->m_shouldAlwaysBeInlined
);
575 emitFunctionPrologue();
576 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
578 load32(payloadFor(JSStack::ArgumentCount
), regT1
);
579 branch32(AboveOrEqual
, regT1
, TrustedImm32(m_codeBlock
->m_numParameters
)).linkTo(beginLabel
, this);
581 m_bytecodeOffset
= 0;
583 if (maxFrameExtentForSlowPathCall
)
584 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
585 callOperationWithCallFrameRollbackOnException(m_codeBlock
->m_isConstructor
? operationConstructArityCheck
: operationCallArityCheck
);
586 if (maxFrameExtentForSlowPathCall
)
587 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall
), stackPointerRegister
);
588 if (returnValueGPR
!= regT0
)
589 move(returnValueGPR
, regT0
);
590 branchTest32(Zero
, regT0
).linkTo(beginLabel
, this);
593 thunkReg
= GPRInfo::regT7
;
595 thunkReg
= GPRInfo::regT5
;
597 CodeLocationLabel
* failThunkLabels
=
598 m_vm
->arityCheckFailReturnThunks
->returnPCsFor(*m_vm
, m_codeBlock
->numParameters());
599 move(TrustedImmPtr(failThunkLabels
), thunkReg
);
600 loadPtr(BaseIndex(thunkReg
, regT0
, timesPtr()), thunkReg
);
601 emitNakedCall(m_vm
->getCTIStub(arityFixupGenerator
).code());
604 m_bytecodeOffset
= std::numeric_limits
<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
610 ASSERT(m_jmpTable
.isEmpty());
612 privateCompileExceptionHandlers();
615 m_disassembler
->setEndOfCode(label());
617 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
, effort
);
618 if (patchBuffer
.didFailToAllocate())
619 return CompilationFailed
;
621 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
622 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
623 SwitchRecord record
= m_switches
[i
];
624 unsigned bytecodeOffset
= record
.bytecodeOffset
;
626 if (record
.type
!= SwitchRecord::String
) {
627 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
628 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
630 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
632 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
633 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
634 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
637 ASSERT(record
.type
== SwitchRecord::String
);
639 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
641 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
642 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
643 unsigned offset
= it
->value
.branchOffset
;
644 it
->value
.ctiOffset
= offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
649 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
650 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
651 handler
.nativeCode
= patchBuffer
.locationOf(m_labels
[handler
.target
]);
654 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
656 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
659 for (unsigned i
= m_getByIds
.size(); i
--;)
660 m_getByIds
[i
].finalize(patchBuffer
);
661 for (unsigned i
= m_putByIds
.size(); i
--;)
662 m_putByIds
[i
].finalize(patchBuffer
);
664 m_codeBlock
->setNumberOfByValInfos(m_byValCompilationInfo
.size());
665 for (unsigned i
= 0; i
< m_byValCompilationInfo
.size(); ++i
) {
666 CodeLocationJump badTypeJump
= CodeLocationJump(patchBuffer
.locationOf(m_byValCompilationInfo
[i
].badTypeJump
));
667 CodeLocationLabel doneTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].doneTarget
);
668 CodeLocationLabel slowPathTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].slowPathTarget
);
669 CodeLocationCall returnAddress
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].returnAddress
);
671 m_codeBlock
->byValInfo(i
) = ByValInfo(
672 m_byValCompilationInfo
[i
].bytecodeIndex
,
674 m_byValCompilationInfo
[i
].arrayMode
,
675 differenceBetweenCodePtr(badTypeJump
, doneTarget
),
676 differenceBetweenCodePtr(returnAddress
, slowPathTarget
));
678 for (unsigned i
= 0; i
< m_callCompilationInfo
.size(); ++i
) {
679 CallCompilationInfo
& compilationInfo
= m_callCompilationInfo
[i
];
680 CallLinkInfo
& info
= *compilationInfo
.callLinkInfo
;
681 info
.setCallLocations(patchBuffer
.locationOfNearCall(compilationInfo
.callReturnLocation
),
682 patchBuffer
.locationOf(compilationInfo
.hotPathBegin
),
683 patchBuffer
.locationOfNearCall(compilationInfo
.hotPathOther
));
686 CompactJITCodeMap::Encoder jitCodeMapEncoder
;
687 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< m_labels
.size(); ++bytecodeOffset
) {
688 if (m_labels
[bytecodeOffset
].isSet())
689 jitCodeMapEncoder
.append(bytecodeOffset
, patchBuffer
.offsetOf(m_labels
[bytecodeOffset
]));
691 m_codeBlock
->setJITCodeMap(jitCodeMapEncoder
.finish());
693 MacroAssemblerCodePtr withArityCheck
;
694 if (m_codeBlock
->codeType() == FunctionCode
)
695 withArityCheck
= patchBuffer
.locationOf(arityCheck
);
697 if (Options::showDisassembly()) {
698 m_disassembler
->dump(patchBuffer
);
699 patchBuffer
.didAlreadyDisassemble();
702 m_disassembler
->reportToProfiler(m_compilation
.get(), patchBuffer
);
703 m_vm
->m_perBytecodeProfiler
->addCompilation(m_compilation
);
706 CodeRef result
= FINALIZE_CODE(
708 ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock
, JITCode::BaselineJIT
)).data()));
710 m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.add(
711 static_cast<double>(result
.size()) /
712 static_cast<double>(m_codeBlock
->instructions().size()));
714 m_codeBlock
->shrinkToFit(CodeBlock::LateShrink
);
715 m_codeBlock
->setJITCode(
716 adoptRef(new DirectJITCode(result
, withArityCheck
, JITCode::BaselineJIT
)));
718 #if ENABLE(JIT_VERBOSE)
719 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock
, result
.executableMemory()->start(), result
.executableMemory()->end());
722 return CompilationSuccessful
;
725 void JIT::privateCompileExceptionHandlers()
727 if (!m_exceptionChecksWithCallFrameRollback
.empty()) {
728 m_exceptionChecksWithCallFrameRollback
.link(this);
730 // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
732 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
733 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
736 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
737 poke(GPRInfo::argumentGPR0
);
738 poke(GPRInfo::argumentGPR1
, 1);
740 m_calls
.append(CallRecord(call(), std::numeric_limits
<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame
).value()));
741 jumpToExceptionHandler();
744 if (!m_exceptionChecks
.empty()) {
745 m_exceptionChecks
.link(this);
747 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
748 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
749 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
752 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
753 poke(GPRInfo::argumentGPR0
);
754 poke(GPRInfo::argumentGPR1
, 1);
756 m_calls
.append(CallRecord(call(), std::numeric_limits
<unsigned>::max(), FunctionPtr(lookupExceptionHandler
).value()));
757 jumpToExceptionHandler();
761 unsigned JIT::frameRegisterCountFor(CodeBlock
* codeBlock
)
763 ASSERT(static_cast<unsigned>(codeBlock
->m_numCalleeRegisters
) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock
->m_numCalleeRegisters
)));
765 return roundLocalRegisterCountForFramePointerOffset(codeBlock
->m_numCalleeRegisters
+ maxFrameExtentForSlowPathCallInRegisters
);
768 int JIT::stackPointerOffsetFor(CodeBlock
* codeBlock
)
770 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock
) - 1).offset();
775 #endif // ENABLE(JIT)