2 * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // This probably does not belong here; adding here for now as a quick Windows build fix.
33 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
34 #include "MacroAssembler.h"
35 JSC::MacroAssemblerX86Common::SSE2CheckState
JSC::MacroAssemblerX86Common::s_sse2CheckState
= NotCheckedSSE2
;
38 #include "ArityCheckFailReturnThunks.h"
39 #include "CodeBlock.h"
40 #include "DFGCapabilities.h"
41 #include "Interpreter.h"
42 #include "JITInlines.h"
43 #include "JITOperations.h"
45 #include "JSFunction.h"
46 #include "LinkBuffer.h"
47 #include "MaxFrameExtentForSlowPathCall.h"
48 #include "JSCInlines.h"
49 #include "ProfilerDatabase.h"
50 #include "RepatchBuffer.h"
51 #include "ResultType.h"
52 #include "SamplingTool.h"
53 #include "SlowPathCall.h"
54 #include "StackAlignment.h"
55 #include <wtf/CryptographicallyRandomNumber.h>
61 void ctiPatchNearCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
63 RepatchBuffer
repatchBuffer(codeblock
);
64 repatchBuffer
.relinkNearCallerToTrampoline(returnAddress
, newCalleeFunction
);
67 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
69 RepatchBuffer
repatchBuffer(codeblock
);
70 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, newCalleeFunction
);
73 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, FunctionPtr newCalleeFunction
)
75 RepatchBuffer
repatchBuffer(codeblock
);
76 repatchBuffer
.relinkCallerToFunction(returnAddress
, newCalleeFunction
);
79 JIT::JIT(VM
* vm
, CodeBlock
* codeBlock
)
80 : JSInterfaceJIT(vm
, codeBlock
)
81 , m_interpreter(vm
->interpreter
)
82 , m_labels(codeBlock
? codeBlock
->numberOfInstructions() : 0)
83 , m_bytecodeOffset((unsigned)-1)
84 , m_getByIdIndex(UINT_MAX
)
85 , m_putByIdIndex(UINT_MAX
)
86 , m_byValInstructionIndex(UINT_MAX
)
87 , m_callLinkInfoIndex(UINT_MAX
)
88 , m_randomGenerator(cryptographicallyRandomNumber())
89 , m_canBeOptimized(false)
90 , m_shouldEmitProfiling(false)
95 void JIT::emitEnterOptimizationCheck()
97 if (!canBeOptimized())
100 JumpList skipOptimize
;
102 skipOptimize
.append(branchAdd32(Signed
, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock
->addressOfJITExecuteCounter())));
103 ASSERT(!m_bytecodeOffset
);
104 callOperation(operationOptimize
, m_bytecodeOffset
);
105 skipOptimize
.append(branchTestPtr(Zero
, returnValueGPR
));
106 move(returnValueGPR2
, stackPointerRegister
);
107 jump(returnValueGPR
);
108 skipOptimize
.link(this);
112 #define NEXT_OPCODE(name) \
113 m_bytecodeOffset += OPCODE_LENGTH(name); \
116 #define DEFINE_SLOW_OP(name) \
118 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
119 slowPathCall.call(); \
120 NEXT_OPCODE(op_##name); \
123 #define DEFINE_OP(name) \
125 emit_##name(currentInstruction); \
129 #define DEFINE_SLOWCASE_OP(name) \
131 emitSlow_##name(currentInstruction, iter); \
135 void JIT::privateCompileMainPass()
137 jitAssertTagsInPlace();
138 jitAssertArgumentCountSane();
140 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
141 unsigned instructionCount
= m_codeBlock
->instructions().size();
143 m_callLinkInfoIndex
= 0;
145 for (m_bytecodeOffset
= 0; m_bytecodeOffset
< instructionCount
; ) {
147 m_disassembler
->setForBytecodeMainPath(m_bytecodeOffset
, label());
148 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
149 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset
);
151 #if ENABLE(OPCODE_SAMPLING)
152 if (m_bytecodeOffset
> 0) // Avoid the overhead of sampling op_enter twice.
153 sampleInstruction(currentInstruction
);
156 m_labels
[m_bytecodeOffset
] = label();
158 #if ENABLE(JIT_VERBOSE)
159 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
162 OpcodeID opcodeID
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
167 AbsoluteAddress(m_compilation
->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
168 m_compilation
->bytecodes(), m_bytecodeOffset
)))->address()));
171 if (Options::eagerlyUpdateTopCallFrame())
172 updateTopCallFrame();
175 DEFINE_SLOW_OP(del_by_val
)
178 DEFINE_SLOW_OP(lesseq
)
179 DEFINE_SLOW_OP(greater
)
180 DEFINE_SLOW_OP(greatereq
)
181 DEFINE_SLOW_OP(is_function
)
182 DEFINE_SLOW_OP(is_object
)
183 DEFINE_SLOW_OP(typeof)
185 DEFINE_OP(op_touch_entry
)
191 DEFINE_OP(op_call_eval
)
192 DEFINE_OP(op_call_varargs
)
193 DEFINE_OP(op_construct_varargs
)
195 DEFINE_OP(op_construct
)
196 DEFINE_OP(op_get_callee
)
197 DEFINE_OP(op_create_this
)
198 DEFINE_OP(op_to_this
)
199 DEFINE_OP(op_init_lazy_reg
)
200 DEFINE_OP(op_create_arguments
)
202 DEFINE_OP(op_del_by_id
)
206 DEFINE_OP(op_create_activation
)
208 DEFINE_OP(op_eq_null
)
209 case op_get_by_id_out_of_line
:
210 case op_get_array_length
:
211 DEFINE_OP(op_get_by_id
)
212 DEFINE_OP(op_get_arguments_length
)
213 DEFINE_OP(op_get_by_val
)
214 DEFINE_OP(op_get_argument_by_val
)
215 DEFINE_OP(op_get_by_pname
)
216 DEFINE_OP(op_get_pnames
)
217 DEFINE_OP(op_check_has_instance
)
218 DEFINE_OP(op_instanceof
)
219 DEFINE_OP(op_is_undefined
)
220 DEFINE_OP(op_is_boolean
)
221 DEFINE_OP(op_is_number
)
222 DEFINE_OP(op_is_string
)
223 DEFINE_OP(op_jeq_null
)
226 DEFINE_OP(op_jneq_null
)
227 DEFINE_OP(op_jneq_ptr
)
229 DEFINE_OP(op_jlesseq
)
230 DEFINE_OP(op_jgreater
)
231 DEFINE_OP(op_jgreatereq
)
233 DEFINE_OP(op_jnlesseq
)
234 DEFINE_OP(op_jngreater
)
235 DEFINE_OP(op_jngreatereq
)
237 DEFINE_OP(op_loop_hint
)
240 DEFINE_OP(op_captured_mov
)
245 DEFINE_OP(op_neq_null
)
246 DEFINE_OP(op_new_array
)
247 DEFINE_OP(op_new_array_with_size
)
248 DEFINE_OP(op_new_array_buffer
)
249 DEFINE_OP(op_new_func
)
250 DEFINE_OP(op_new_captured_func
)
251 DEFINE_OP(op_new_func_exp
)
252 DEFINE_OP(op_new_object
)
253 DEFINE_OP(op_new_regexp
)
254 DEFINE_OP(op_next_pname
)
256 DEFINE_OP(op_nstricteq
)
257 DEFINE_OP(op_pop_scope
)
260 DEFINE_OP(op_profile_did_call
)
261 DEFINE_OP(op_profile_will_call
)
262 DEFINE_OP(op_push_name_scope
)
263 DEFINE_OP(op_push_with_scope
)
264 case op_put_by_id_out_of_line
:
265 case op_put_by_id_transition_direct
:
266 case op_put_by_id_transition_normal
:
267 case op_put_by_id_transition_direct_out_of_line
:
268 case op_put_by_id_transition_normal_out_of_line
:
269 DEFINE_OP(op_put_by_id
)
270 DEFINE_OP(op_put_by_index
)
271 case op_put_by_val_direct
:
272 DEFINE_OP(op_put_by_val
)
273 DEFINE_OP(op_put_getter_setter
)
274 case op_init_global_const_nop
:
275 NEXT_OPCODE(op_init_global_const_nop
);
276 DEFINE_OP(op_init_global_const
)
279 DEFINE_OP(op_ret_object_or_this
)
281 DEFINE_OP(op_unsigned
)
282 DEFINE_OP(op_urshift
)
284 DEFINE_OP(op_stricteq
)
286 DEFINE_OP(op_switch_char
)
287 DEFINE_OP(op_switch_imm
)
288 DEFINE_OP(op_switch_string
)
289 DEFINE_OP(op_tear_off_activation
)
290 DEFINE_OP(op_tear_off_arguments
)
292 DEFINE_OP(op_throw_static_error
)
293 DEFINE_OP(op_to_number
)
294 DEFINE_OP(op_to_primitive
)
296 DEFINE_OP(op_resolve_scope
)
297 DEFINE_OP(op_get_from_scope
)
298 DEFINE_OP(op_put_to_scope
)
300 RELEASE_ASSERT_NOT_REACHED();
304 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callCompilationInfo
.size());
307 // Reset this, in order to guard its use with ASSERTs.
308 m_bytecodeOffset
= (unsigned)-1;
312 void JIT::privateCompileLinkPass()
314 unsigned jmpTableCount
= m_jmpTable
.size();
315 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
316 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeOffset
], this);
320 void JIT::privateCompileSlowCases()
322 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
326 m_byValInstructionIndex
= 0;
327 m_callLinkInfoIndex
= 0;
329 // Use this to assert that slow-path code associates new profiling sites with existing
330 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
331 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
332 // instructions and the slow-path executions. Furthermore, if the slow-path code created
333 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
334 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
335 unsigned numberOfValueProfiles
= m_codeBlock
->numberOfValueProfiles();
337 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
338 m_bytecodeOffset
= iter
->to
;
340 unsigned firstTo
= m_bytecodeOffset
;
342 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
344 RareCaseProfile
* rareCaseProfile
= 0;
345 if (shouldEmitProfiling())
346 rareCaseProfile
= m_codeBlock
->addRareCaseProfile(m_bytecodeOffset
);
348 #if ENABLE(JIT_VERBOSE)
349 dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
353 m_disassembler
->setForBytecodeSlowPath(m_bytecodeOffset
, label());
355 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
356 DEFINE_SLOWCASE_OP(op_add
)
357 DEFINE_SLOWCASE_OP(op_bitand
)
358 DEFINE_SLOWCASE_OP(op_bitor
)
359 DEFINE_SLOWCASE_OP(op_bitxor
)
360 DEFINE_SLOWCASE_OP(op_call
)
361 DEFINE_SLOWCASE_OP(op_call_eval
)
362 DEFINE_SLOWCASE_OP(op_call_varargs
)
363 DEFINE_SLOWCASE_OP(op_construct_varargs
)
364 DEFINE_SLOWCASE_OP(op_construct
)
365 DEFINE_SLOWCASE_OP(op_to_this
)
366 DEFINE_SLOWCASE_OP(op_create_this
)
367 DEFINE_SLOWCASE_OP(op_captured_mov
)
368 DEFINE_SLOWCASE_OP(op_div
)
369 DEFINE_SLOWCASE_OP(op_eq
)
370 DEFINE_SLOWCASE_OP(op_get_callee
)
371 case op_get_by_id_out_of_line
:
372 case op_get_array_length
:
373 DEFINE_SLOWCASE_OP(op_get_by_id
)
374 DEFINE_SLOWCASE_OP(op_get_arguments_length
)
375 DEFINE_SLOWCASE_OP(op_get_by_val
)
376 DEFINE_SLOWCASE_OP(op_get_argument_by_val
)
377 DEFINE_SLOWCASE_OP(op_get_by_pname
)
378 DEFINE_SLOWCASE_OP(op_check_has_instance
)
379 DEFINE_SLOWCASE_OP(op_instanceof
)
380 DEFINE_SLOWCASE_OP(op_jfalse
)
381 DEFINE_SLOWCASE_OP(op_jless
)
382 DEFINE_SLOWCASE_OP(op_jlesseq
)
383 DEFINE_SLOWCASE_OP(op_jgreater
)
384 DEFINE_SLOWCASE_OP(op_jgreatereq
)
385 DEFINE_SLOWCASE_OP(op_jnless
)
386 DEFINE_SLOWCASE_OP(op_jnlesseq
)
387 DEFINE_SLOWCASE_OP(op_jngreater
)
388 DEFINE_SLOWCASE_OP(op_jngreatereq
)
389 DEFINE_SLOWCASE_OP(op_jtrue
)
390 DEFINE_SLOWCASE_OP(op_loop_hint
)
391 DEFINE_SLOWCASE_OP(op_lshift
)
392 DEFINE_SLOWCASE_OP(op_mod
)
393 DEFINE_SLOWCASE_OP(op_mul
)
394 DEFINE_SLOWCASE_OP(op_negate
)
395 DEFINE_SLOWCASE_OP(op_neq
)
396 DEFINE_SLOWCASE_OP(op_new_object
)
397 DEFINE_SLOWCASE_OP(op_not
)
398 DEFINE_SLOWCASE_OP(op_nstricteq
)
399 DEFINE_SLOWCASE_OP(op_dec
)
400 DEFINE_SLOWCASE_OP(op_inc
)
401 case op_put_by_id_out_of_line
:
402 case op_put_by_id_transition_direct
:
403 case op_put_by_id_transition_normal
:
404 case op_put_by_id_transition_direct_out_of_line
:
405 case op_put_by_id_transition_normal_out_of_line
:
406 DEFINE_SLOWCASE_OP(op_put_by_id
)
407 case op_put_by_val_direct
:
408 DEFINE_SLOWCASE_OP(op_put_by_val
)
409 DEFINE_SLOWCASE_OP(op_rshift
)
410 DEFINE_SLOWCASE_OP(op_unsigned
)
411 DEFINE_SLOWCASE_OP(op_urshift
)
412 DEFINE_SLOWCASE_OP(op_stricteq
)
413 DEFINE_SLOWCASE_OP(op_sub
)
414 DEFINE_SLOWCASE_OP(op_to_number
)
415 DEFINE_SLOWCASE_OP(op_to_primitive
)
417 DEFINE_SLOWCASE_OP(op_resolve_scope
)
418 DEFINE_SLOWCASE_OP(op_get_from_scope
)
419 DEFINE_SLOWCASE_OP(op_put_to_scope
)
422 RELEASE_ASSERT_NOT_REACHED();
425 RELEASE_ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
, "Not enough jumps linked in slow case codegen.");
426 RELEASE_ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
428 if (shouldEmitProfiling())
429 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile
->m_counter
));
431 emitJumpSlowToHot(jump(), 0);
434 RELEASE_ASSERT(m_getByIdIndex
== m_getByIds
.size());
435 RELEASE_ASSERT(m_putByIdIndex
== m_putByIds
.size());
436 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callCompilationInfo
.size());
437 RELEASE_ASSERT(numberOfValueProfiles
== m_codeBlock
->numberOfValueProfiles());
440 // Reset this, in order to guard its use with ASSERTs.
441 m_bytecodeOffset
= (unsigned)-1;
445 CompilationResult
JIT::privateCompile(JITCompilationEffort effort
)
447 DFG::CapabilityLevel level
= m_codeBlock
->capabilityLevel();
449 case DFG::CannotCompile
:
450 m_canBeOptimized
= false;
451 m_canBeOptimizedOrInlined
= false;
452 m_shouldEmitProfiling
= false;
455 m_canBeOptimized
= false;
456 m_canBeOptimizedOrInlined
= true;
457 m_shouldEmitProfiling
= true;
459 case DFG::CanCompile
:
460 case DFG::CanCompileAndInline
:
461 m_canBeOptimized
= true;
462 m_canBeOptimizedOrInlined
= true;
463 m_shouldEmitProfiling
= true;
466 RELEASE_ASSERT_NOT_REACHED();
470 switch (m_codeBlock
->codeType()) {
473 m_codeBlock
->m_shouldAlwaysBeInlined
= false;
476 // We could have already set it to false because we detected an uninlineable call.
477 // Don't override that observation.
478 m_codeBlock
->m_shouldAlwaysBeInlined
&= canInline(level
) && DFG::mightInlineFunction(m_codeBlock
);
482 if (Options::showDisassembly() || m_vm
->m_perBytecodeProfiler
)
483 m_disassembler
= adoptPtr(new JITDisassembler(m_codeBlock
));
484 if (m_vm
->m_perBytecodeProfiler
) {
485 m_compilation
= adoptRef(
486 new Profiler::Compilation(
487 m_vm
->m_perBytecodeProfiler
->ensureBytecodesFor(m_codeBlock
),
488 Profiler::Baseline
));
489 m_compilation
->addProfiledBytecodes(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
);
493 m_disassembler
->setStartOfCode(label());
495 // Just add a little bit of randomness to the codegen
496 if (m_randomGenerator
.getUint32() & 1)
499 emitFunctionPrologue();
500 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
502 Label
beginLabel(this);
504 sampleCodeBlock(m_codeBlock
);
505 #if ENABLE(OPCODE_SAMPLING)
506 sampleInstruction(m_codeBlock
->instructions().begin());
510 if (m_codeBlock
->codeType() == FunctionCode
) {
511 ASSERT(m_bytecodeOffset
== (unsigned)-1);
512 if (shouldEmitProfiling()) {
513 for (int argument
= 0; argument
< m_codeBlock
->numParameters(); ++argument
) {
514 // If this is a constructor, then we want to put in a dummy profiling site (to
515 // keep things consistent) but we don't actually want to record the dummy value.
516 if (m_codeBlock
->m_isConstructor
&& !argument
)
518 int offset
= CallFrame::argumentOffsetIncludingThis(argument
) * static_cast<int>(sizeof(Register
));
520 load64(Address(callFrameRegister
, offset
), regT0
);
521 #elif USE(JSVALUE32_64)
522 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
523 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
525 emitValueProfilingSite(m_codeBlock
->valueProfileForArgument(argument
));
529 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, regT1
);
530 stackOverflow
= branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), regT1
);
533 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
534 checkStackPointerAlignment();
536 privateCompileMainPass();
537 privateCompileLinkPass();
538 privateCompileSlowCases();
541 m_disassembler
->setEndOfSlowPath(label());
544 if (m_codeBlock
->codeType() == FunctionCode
) {
545 stackOverflow
.link(this);
546 m_bytecodeOffset
= 0;
547 if (maxFrameExtentForSlowPathCall
)
548 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
549 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError
, m_codeBlock
);
551 arityCheck
= label();
552 store8(TrustedImm32(0), &m_codeBlock
->m_shouldAlwaysBeInlined
);
553 emitFunctionPrologue();
554 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
556 load32(payloadFor(JSStack::ArgumentCount
), regT1
);
557 branch32(AboveOrEqual
, regT1
, TrustedImm32(m_codeBlock
->m_numParameters
)).linkTo(beginLabel
, this);
559 m_bytecodeOffset
= 0;
561 if (maxFrameExtentForSlowPathCall
)
562 addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall
), stackPointerRegister
);
563 callOperationWithCallFrameRollbackOnException(m_codeBlock
->m_isConstructor
? operationConstructArityCheck
: operationCallArityCheck
);
564 if (maxFrameExtentForSlowPathCall
)
565 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall
), stackPointerRegister
);
566 if (returnValueGPR
!= regT0
)
567 move(returnValueGPR
, regT0
);
568 branchTest32(Zero
, regT0
).linkTo(beginLabel
, this);
571 thunkReg
= GPRInfo::regT7
;
573 thunkReg
= GPRInfo::regT5
;
575 move(TrustedImmPtr(m_vm
->arityCheckFailReturnThunks
->returnPCsFor(*m_vm
, m_codeBlock
->numParameters())), thunkReg
);
576 loadPtr(BaseIndex(thunkReg
, regT0
, timesPtr()), thunkReg
);
577 emitNakedCall(m_vm
->getCTIStub(arityFixup
).code());
580 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
586 ASSERT(m_jmpTable
.isEmpty());
588 privateCompileExceptionHandlers();
591 m_disassembler
->setEndOfCode(label());
593 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
, effort
);
594 if (patchBuffer
.didFailToAllocate())
595 return CompilationFailed
;
597 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
598 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
599 SwitchRecord record
= m_switches
[i
];
600 unsigned bytecodeOffset
= record
.bytecodeOffset
;
602 if (record
.type
!= SwitchRecord::String
) {
603 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
604 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
606 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
608 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
609 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
610 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
613 ASSERT(record
.type
== SwitchRecord::String
);
615 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
617 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
618 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
619 unsigned offset
= it
->value
.branchOffset
;
620 it
->value
.ctiOffset
= offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
625 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
626 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
627 handler
.nativeCode
= patchBuffer
.locationOf(m_labels
[handler
.target
]);
630 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
632 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
635 for (unsigned i
= m_getByIds
.size(); i
--;)
636 m_getByIds
[i
].finalize(patchBuffer
);
637 for (unsigned i
= m_putByIds
.size(); i
--;)
638 m_putByIds
[i
].finalize(patchBuffer
);
640 m_codeBlock
->setNumberOfByValInfos(m_byValCompilationInfo
.size());
641 for (unsigned i
= 0; i
< m_byValCompilationInfo
.size(); ++i
) {
642 CodeLocationJump badTypeJump
= CodeLocationJump(patchBuffer
.locationOf(m_byValCompilationInfo
[i
].badTypeJump
));
643 CodeLocationLabel doneTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].doneTarget
);
644 CodeLocationLabel slowPathTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].slowPathTarget
);
645 CodeLocationCall returnAddress
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].returnAddress
);
647 m_codeBlock
->byValInfo(i
) = ByValInfo(
648 m_byValCompilationInfo
[i
].bytecodeIndex
,
650 m_byValCompilationInfo
[i
].arrayMode
,
651 differenceBetweenCodePtr(badTypeJump
, doneTarget
),
652 differenceBetweenCodePtr(returnAddress
, slowPathTarget
));
654 for (unsigned i
= 0; i
< m_callCompilationInfo
.size(); ++i
) {
655 CallCompilationInfo
& compilationInfo
= m_callCompilationInfo
[i
];
656 CallLinkInfo
& info
= *compilationInfo
.callLinkInfo
;
657 info
.callReturnLocation
= patchBuffer
.locationOfNearCall(compilationInfo
.callReturnLocation
);
658 info
.hotPathBegin
= patchBuffer
.locationOf(compilationInfo
.hotPathBegin
);
659 info
.hotPathOther
= patchBuffer
.locationOfNearCall(compilationInfo
.hotPathOther
);
662 CompactJITCodeMap::Encoder jitCodeMapEncoder
;
663 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< m_labels
.size(); ++bytecodeOffset
) {
664 if (m_labels
[bytecodeOffset
].isSet())
665 jitCodeMapEncoder
.append(bytecodeOffset
, patchBuffer
.offsetOf(m_labels
[bytecodeOffset
]));
667 m_codeBlock
->setJITCodeMap(jitCodeMapEncoder
.finish());
669 MacroAssemblerCodePtr withArityCheck
;
670 if (m_codeBlock
->codeType() == FunctionCode
)
671 withArityCheck
= patchBuffer
.locationOf(arityCheck
);
673 if (Options::showDisassembly())
674 m_disassembler
->dump(patchBuffer
);
676 m_disassembler
->reportToProfiler(m_compilation
.get(), patchBuffer
);
677 m_vm
->m_perBytecodeProfiler
->addCompilation(m_compilation
);
680 CodeRef result
= patchBuffer
.finalizeCodeWithoutDisassembly();
682 m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.add(
683 static_cast<double>(result
.size()) /
684 static_cast<double>(m_codeBlock
->instructions().size()));
686 m_codeBlock
->shrinkToFit(CodeBlock::LateShrink
);
687 m_codeBlock
->setJITCode(
688 adoptRef(new DirectJITCode(result
, withArityCheck
, JITCode::BaselineJIT
)));
690 #if ENABLE(JIT_VERBOSE)
691 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock
, result
.executableMemory()->start(), result
.executableMemory()->end());
694 return CompilationSuccessful
;
697 void JIT::privateCompileExceptionHandlers()
699 if (m_exceptionChecks
.empty() && m_exceptionChecksWithCallFrameRollback
.empty())
704 if (!m_exceptionChecksWithCallFrameRollback
.empty()) {
705 m_exceptionChecksWithCallFrameRollback
.link(this);
706 emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1
);
710 if (!m_exceptionChecks
.empty())
711 m_exceptionChecks
.link(this);
713 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
714 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
716 if (doLookup
.isSet())
719 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0
);
722 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
723 poke(GPRInfo::argumentGPR0
);
724 poke(GPRInfo::argumentGPR1
, 1);
726 m_calls
.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler
).value()));
727 jumpToExceptionHandler();
730 unsigned JIT::frameRegisterCountFor(CodeBlock
* codeBlock
)
732 ASSERT(static_cast<unsigned>(codeBlock
->m_numCalleeRegisters
) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock
->m_numCalleeRegisters
)));
734 return roundLocalRegisterCountForFramePointerOffset(codeBlock
->m_numCalleeRegisters
+ maxFrameExtentForSlowPathCallInRegisters
);
737 int JIT::stackPointerOffsetFor(CodeBlock
* codeBlock
)
739 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock
) - 1).offset();
744 #endif // ENABLE(JIT)