2 * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState
JSC::MacroAssemblerX86Common::s_sse2CheckState
= NotCheckedSSE2
;
37 #include "CodeBlock.h"
38 #include <wtf/CryptographicallyRandomNumber.h>
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlines.h"
42 #include "JITStubCall.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "Operations.h"
47 #include "RepatchBuffer.h"
48 #include "ResultType.h"
49 #include "SamplingTool.h"
55 void ctiPatchNearCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
57 RepatchBuffer
repatchBuffer(codeblock
);
58 repatchBuffer
.relinkNearCallerToTrampoline(returnAddress
, newCalleeFunction
);
61 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
63 RepatchBuffer
repatchBuffer(codeblock
);
64 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, newCalleeFunction
);
67 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, FunctionPtr newCalleeFunction
)
69 RepatchBuffer
repatchBuffer(codeblock
);
70 repatchBuffer
.relinkCallerToFunction(returnAddress
, newCalleeFunction
);
73 JIT::JIT(VM
* vm
, CodeBlock
* codeBlock
)
74 : m_interpreter(vm
->interpreter
)
76 , m_codeBlock(codeBlock
)
77 , m_labels(codeBlock
? codeBlock
->numberOfInstructions() : 0)
78 , m_bytecodeOffset((unsigned)-1)
79 , m_propertyAccessInstructionIndex(UINT_MAX
)
80 , m_byValInstructionIndex(UINT_MAX
)
81 , m_globalResolveInfoIndex(UINT_MAX
)
82 , m_callLinkInfoIndex(UINT_MAX
)
84 , m_jumpTargetIndex(0)
85 , m_mappedBytecodeOffset((unsigned)-1)
86 , m_mappedVirtualRegisterIndex(JSStack::ReturnPC
)
87 , m_mappedTag((RegisterID
)-1)
88 , m_mappedPayload((RegisterID
)-1)
90 , m_lastResultBytecodeRegister(std::numeric_limits
<int>::max())
91 , m_jumpTargetsPosition(0)
93 , m_randomGenerator(cryptographicallyRandomNumber())
94 #if ENABLE(VALUE_PROFILER)
95 , m_canBeOptimized(false)
96 , m_shouldEmitProfiling(false)
102 void JIT::emitEnterOptimizationCheck()
104 if (!canBeOptimized())
107 Jump skipOptimize
= branchAdd32(Signed
, TrustedImm32(Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock
->addressOfJITExecuteCounter()));
108 JITStubCall
stubCall(this, cti_optimize
);
109 stubCall
.addArgument(TrustedImm32(m_bytecodeOffset
));
110 ASSERT(!m_bytecodeOffset
);
112 skipOptimize
.link(this);
116 #define NEXT_OPCODE(name) \
117 m_bytecodeOffset += OPCODE_LENGTH(name); \
120 #if USE(JSVALUE32_64)
121 #define DEFINE_BINARY_OP(name) \
123 JITStubCall stubCall(this, cti_##name); \
124 stubCall.addArgument(currentInstruction[2].u.operand); \
125 stubCall.addArgument(currentInstruction[3].u.operand); \
126 stubCall.call(currentInstruction[1].u.operand); \
130 #define DEFINE_UNARY_OP(name) \
132 JITStubCall stubCall(this, cti_##name); \
133 stubCall.addArgument(currentInstruction[2].u.operand); \
134 stubCall.call(currentInstruction[1].u.operand); \
138 #else // USE(JSVALUE32_64)
140 #define DEFINE_BINARY_OP(name) \
142 JITStubCall stubCall(this, cti_##name); \
143 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
144 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
145 stubCall.call(currentInstruction[1].u.operand); \
149 #define DEFINE_UNARY_OP(name) \
151 JITStubCall stubCall(this, cti_##name); \
152 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
153 stubCall.call(currentInstruction[1].u.operand); \
156 #endif // USE(JSVALUE32_64)
158 #define DEFINE_OP(name) \
160 emit_##name(currentInstruction); \
164 #define DEFINE_SLOWCASE_OP(name) \
166 emitSlow_##name(currentInstruction, iter); \
170 void JIT::privateCompileMainPass()
172 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
173 unsigned instructionCount
= m_codeBlock
->instructions().size();
175 m_globalResolveInfoIndex
= 0;
176 m_callLinkInfoIndex
= 0;
178 for (m_bytecodeOffset
= 0; m_bytecodeOffset
< instructionCount
; ) {
180 m_disassembler
->setForBytecodeMainPath(m_bytecodeOffset
, label());
181 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
182 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset
);
184 #if ENABLE(OPCODE_SAMPLING)
185 if (m_bytecodeOffset
> 0) // Avoid the overhead of sampling op_enter twice.
186 sampleInstruction(currentInstruction
);
191 killLastResultRegister();
194 m_labels
[m_bytecodeOffset
] = label();
196 #if ENABLE(JIT_VERBOSE)
197 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
200 OpcodeID opcodeID
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
202 if (m_compilation
&& opcodeID
!= op_call_put_result
) {
205 AbsoluteAddress(m_compilation
->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
206 m_compilation
->bytecodes(), m_bytecodeOffset
)))->address()));
210 DEFINE_BINARY_OP(op_del_by_val
)
211 DEFINE_BINARY_OP(op_in
)
212 DEFINE_BINARY_OP(op_less
)
213 DEFINE_BINARY_OP(op_lesseq
)
214 DEFINE_BINARY_OP(op_greater
)
215 DEFINE_BINARY_OP(op_greatereq
)
216 DEFINE_UNARY_OP(op_is_function
)
217 DEFINE_UNARY_OP(op_is_object
)
218 DEFINE_UNARY_OP(op_typeof
)
225 DEFINE_OP(op_call_eval
)
226 DEFINE_OP(op_call_varargs
)
228 DEFINE_OP(op_construct
)
229 DEFINE_OP(op_get_callee
)
230 DEFINE_OP(op_create_this
)
231 DEFINE_OP(op_convert_this
)
232 DEFINE_OP(op_init_lazy_reg
)
233 DEFINE_OP(op_create_arguments
)
235 DEFINE_OP(op_del_by_id
)
239 DEFINE_OP(op_create_activation
)
241 DEFINE_OP(op_eq_null
)
242 case op_get_by_id_out_of_line
:
243 case op_get_array_length
:
244 DEFINE_OP(op_get_by_id
)
245 DEFINE_OP(op_get_arguments_length
)
246 DEFINE_OP(op_get_by_val
)
247 DEFINE_OP(op_get_argument_by_val
)
248 DEFINE_OP(op_get_by_pname
)
249 DEFINE_OP(op_get_pnames
)
250 DEFINE_OP(op_check_has_instance
)
251 DEFINE_OP(op_instanceof
)
252 DEFINE_OP(op_is_undefined
)
253 DEFINE_OP(op_is_boolean
)
254 DEFINE_OP(op_is_number
)
255 DEFINE_OP(op_is_string
)
256 DEFINE_OP(op_jeq_null
)
259 DEFINE_OP(op_jneq_null
)
260 DEFINE_OP(op_jneq_ptr
)
262 DEFINE_OP(op_jlesseq
)
263 DEFINE_OP(op_jgreater
)
264 DEFINE_OP(op_jgreatereq
)
266 DEFINE_OP(op_jnlesseq
)
267 DEFINE_OP(op_jngreater
)
268 DEFINE_OP(op_jngreatereq
)
270 DEFINE_OP(op_loop_hint
)
277 DEFINE_OP(op_neq_null
)
278 DEFINE_OP(op_new_array
)
279 DEFINE_OP(op_new_array_with_size
)
280 DEFINE_OP(op_new_array_buffer
)
281 DEFINE_OP(op_new_func
)
282 DEFINE_OP(op_new_func_exp
)
283 DEFINE_OP(op_new_object
)
284 DEFINE_OP(op_new_regexp
)
285 DEFINE_OP(op_next_pname
)
287 DEFINE_OP(op_nstricteq
)
288 DEFINE_OP(op_pop_scope
)
291 DEFINE_OP(op_profile_did_call
)
292 DEFINE_OP(op_profile_will_call
)
293 DEFINE_OP(op_push_name_scope
)
294 DEFINE_OP(op_push_with_scope
)
295 case op_put_by_id_out_of_line
:
296 case op_put_by_id_transition_direct
:
297 case op_put_by_id_transition_normal
:
298 case op_put_by_id_transition_direct_out_of_line
:
299 case op_put_by_id_transition_normal_out_of_line
:
300 DEFINE_OP(op_put_by_id
)
301 DEFINE_OP(op_put_by_index
)
302 DEFINE_OP(op_put_by_val
)
303 DEFINE_OP(op_put_getter_setter
)
304 case op_init_global_const_nop
:
305 NEXT_OPCODE(op_init_global_const_nop
);
306 DEFINE_OP(op_init_global_const
)
307 DEFINE_OP(op_init_global_const_check
)
309 case op_resolve_global_property
:
310 case op_resolve_global_var
:
311 case op_resolve_scoped_var
:
312 case op_resolve_scoped_var_on_top_scope
:
313 case op_resolve_scoped_var_with_top_scope_check
:
314 DEFINE_OP(op_resolve
)
316 case op_resolve_base_to_global
:
317 case op_resolve_base_to_global_dynamic
:
318 case op_resolve_base_to_scope
:
319 case op_resolve_base_to_scope_with_top_scope_check
:
320 DEFINE_OP(op_resolve_base
)
322 case op_put_to_base_variable
:
323 DEFINE_OP(op_put_to_base
)
325 DEFINE_OP(op_resolve_with_base
)
326 DEFINE_OP(op_resolve_with_this
)
328 DEFINE_OP(op_call_put_result
)
329 DEFINE_OP(op_ret_object_or_this
)
331 DEFINE_OP(op_urshift
)
333 DEFINE_OP(op_stricteq
)
335 DEFINE_OP(op_switch_char
)
336 DEFINE_OP(op_switch_imm
)
337 DEFINE_OP(op_switch_string
)
338 DEFINE_OP(op_tear_off_activation
)
339 DEFINE_OP(op_tear_off_arguments
)
341 DEFINE_OP(op_throw_static_error
)
342 DEFINE_OP(op_to_number
)
343 DEFINE_OP(op_to_primitive
)
345 DEFINE_OP(op_get_scoped_var
)
346 DEFINE_OP(op_put_scoped_var
)
348 case op_get_by_id_chain
:
349 case op_get_by_id_generic
:
350 case op_get_by_id_proto
:
351 case op_get_by_id_self
:
352 case op_get_by_id_getter_chain
:
353 case op_get_by_id_getter_proto
:
354 case op_get_by_id_getter_self
:
355 case op_get_by_id_custom_chain
:
356 case op_get_by_id_custom_proto
:
357 case op_get_by_id_custom_self
:
358 case op_get_string_length
:
359 case op_put_by_id_generic
:
360 case op_put_by_id_replace
:
361 case op_put_by_id_transition
:
362 RELEASE_ASSERT_NOT_REACHED();
366 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callStructureStubCompilationInfo
.size());
369 // Reset this, in order to guard its use with ASSERTs.
370 m_bytecodeOffset
= (unsigned)-1;
374 void JIT::privateCompileLinkPass()
376 unsigned jmpTableCount
= m_jmpTable
.size();
377 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
378 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeOffset
], this);
382 void JIT::privateCompileSlowCases()
384 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
386 m_propertyAccessInstructionIndex
= 0;
387 m_byValInstructionIndex
= 0;
388 m_globalResolveInfoIndex
= 0;
389 m_callLinkInfoIndex
= 0;
391 #if ENABLE(VALUE_PROFILER)
392 // Use this to assert that slow-path code associates new profiling sites with existing
393 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
394 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
395 // instructions and the slow-path executions. Furthermore, if the slow-path code created
396 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
397 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
398 unsigned numberOfValueProfiles
= m_codeBlock
->numberOfValueProfiles();
401 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
403 killLastResultRegister();
406 m_bytecodeOffset
= iter
->to
;
408 unsigned firstTo
= m_bytecodeOffset
;
410 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
412 #if ENABLE(VALUE_PROFILER)
413 RareCaseProfile
* rareCaseProfile
= 0;
414 if (shouldEmitProfiling())
415 rareCaseProfile
= m_codeBlock
->addRareCaseProfile(m_bytecodeOffset
);
418 #if ENABLE(JIT_VERBOSE)
419 dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
423 m_disassembler
->setForBytecodeSlowPath(m_bytecodeOffset
, label());
425 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
426 DEFINE_SLOWCASE_OP(op_add
)
427 DEFINE_SLOWCASE_OP(op_bitand
)
428 DEFINE_SLOWCASE_OP(op_bitor
)
429 DEFINE_SLOWCASE_OP(op_bitxor
)
430 DEFINE_SLOWCASE_OP(op_call
)
431 DEFINE_SLOWCASE_OP(op_call_eval
)
432 DEFINE_SLOWCASE_OP(op_call_varargs
)
433 DEFINE_SLOWCASE_OP(op_construct
)
434 DEFINE_SLOWCASE_OP(op_convert_this
)
435 DEFINE_SLOWCASE_OP(op_create_this
)
436 DEFINE_SLOWCASE_OP(op_div
)
437 DEFINE_SLOWCASE_OP(op_eq
)
438 case op_get_by_id_out_of_line
:
439 case op_get_array_length
:
440 DEFINE_SLOWCASE_OP(op_get_by_id
)
441 DEFINE_SLOWCASE_OP(op_get_arguments_length
)
442 DEFINE_SLOWCASE_OP(op_get_by_val
)
443 DEFINE_SLOWCASE_OP(op_get_argument_by_val
)
444 DEFINE_SLOWCASE_OP(op_get_by_pname
)
445 DEFINE_SLOWCASE_OP(op_check_has_instance
)
446 DEFINE_SLOWCASE_OP(op_instanceof
)
447 DEFINE_SLOWCASE_OP(op_jfalse
)
448 DEFINE_SLOWCASE_OP(op_jless
)
449 DEFINE_SLOWCASE_OP(op_jlesseq
)
450 DEFINE_SLOWCASE_OP(op_jgreater
)
451 DEFINE_SLOWCASE_OP(op_jgreatereq
)
452 DEFINE_SLOWCASE_OP(op_jnless
)
453 DEFINE_SLOWCASE_OP(op_jnlesseq
)
454 DEFINE_SLOWCASE_OP(op_jngreater
)
455 DEFINE_SLOWCASE_OP(op_jngreatereq
)
456 DEFINE_SLOWCASE_OP(op_jtrue
)
457 DEFINE_SLOWCASE_OP(op_loop_hint
)
458 DEFINE_SLOWCASE_OP(op_lshift
)
459 DEFINE_SLOWCASE_OP(op_mod
)
460 DEFINE_SLOWCASE_OP(op_mul
)
461 DEFINE_SLOWCASE_OP(op_negate
)
462 DEFINE_SLOWCASE_OP(op_neq
)
463 DEFINE_SLOWCASE_OP(op_new_object
)
464 DEFINE_SLOWCASE_OP(op_not
)
465 DEFINE_SLOWCASE_OP(op_nstricteq
)
466 DEFINE_SLOWCASE_OP(op_dec
)
467 DEFINE_SLOWCASE_OP(op_inc
)
468 case op_put_by_id_out_of_line
:
469 case op_put_by_id_transition_direct
:
470 case op_put_by_id_transition_normal
:
471 case op_put_by_id_transition_direct_out_of_line
:
472 case op_put_by_id_transition_normal_out_of_line
:
473 DEFINE_SLOWCASE_OP(op_put_by_id
)
474 DEFINE_SLOWCASE_OP(op_put_by_val
)
475 DEFINE_SLOWCASE_OP(op_init_global_const_check
);
476 DEFINE_SLOWCASE_OP(op_rshift
)
477 DEFINE_SLOWCASE_OP(op_urshift
)
478 DEFINE_SLOWCASE_OP(op_stricteq
)
479 DEFINE_SLOWCASE_OP(op_sub
)
480 DEFINE_SLOWCASE_OP(op_to_number
)
481 DEFINE_SLOWCASE_OP(op_to_primitive
)
483 case op_resolve_global_property
:
484 case op_resolve_global_var
:
485 case op_resolve_scoped_var
:
486 case op_resolve_scoped_var_on_top_scope
:
487 case op_resolve_scoped_var_with_top_scope_check
:
488 DEFINE_SLOWCASE_OP(op_resolve
)
490 case op_resolve_base_to_global
:
491 case op_resolve_base_to_global_dynamic
:
492 case op_resolve_base_to_scope
:
493 case op_resolve_base_to_scope_with_top_scope_check
:
494 DEFINE_SLOWCASE_OP(op_resolve_base
)
495 DEFINE_SLOWCASE_OP(op_resolve_with_base
)
496 DEFINE_SLOWCASE_OP(op_resolve_with_this
)
498 case op_put_to_base_variable
:
499 DEFINE_SLOWCASE_OP(op_put_to_base
)
502 RELEASE_ASSERT_NOT_REACHED();
505 RELEASE_ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
, "Not enough jumps linked in slow case codegen.");
506 RELEASE_ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
508 #if ENABLE(VALUE_PROFILER)
509 if (shouldEmitProfiling())
510 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile
->m_counter
));
513 emitJumpSlowToHot(jump(), 0);
516 RELEASE_ASSERT(m_propertyAccessInstructionIndex
== m_propertyAccessCompilationInfo
.size());
517 RELEASE_ASSERT(m_callLinkInfoIndex
== m_callStructureStubCompilationInfo
.size());
518 #if ENABLE(VALUE_PROFILER)
519 RELEASE_ASSERT(numberOfValueProfiles
== m_codeBlock
->numberOfValueProfiles());
523 // Reset this, in order to guard its use with ASSERTs.
524 m_bytecodeOffset
= (unsigned)-1;
528 ALWAYS_INLINE
void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo
& info
, LinkBuffer
&linkBuffer
)
530 ASSERT(bytecodeIndex
!= std::numeric_limits
<unsigned>::max());
531 info
.bytecodeIndex
= bytecodeIndex
;
532 info
.callReturnLocation
= linkBuffer
.locationOf(callReturnLocation
);
533 info
.hotPathBegin
= linkBuffer
.locationOf(hotPathBegin
);
537 CodeLocationLabel hotPathBeginLocation
= linkBuffer
.locationOf(hotPathBegin
);
538 info
.patch
.baseline
.u
.get
.structureToCompare
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getStructureToCompare
));
539 info
.patch
.baseline
.u
.get
.structureCheck
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getStructureCheck
));
540 info
.patch
.baseline
.u
.get
.propertyStorageLoad
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(propertyStorageLoad
));
542 info
.patch
.baseline
.u
.get
.displacementLabel
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel
));
544 info
.patch
.baseline
.u
.get
.displacementLabel1
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel1
));
545 info
.patch
.baseline
.u
.get
.displacementLabel2
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel2
));
547 info
.patch
.baseline
.u
.get
.putResult
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getPutResult
));
548 info
.patch
.baseline
.u
.get
.coldPathBegin
= MacroAssembler::differenceBetweenCodePtr(linkBuffer
.locationOf(getColdPathBegin
), linkBuffer
.locationOf(callReturnLocation
));
552 CodeLocationLabel hotPathBeginLocation
= linkBuffer
.locationOf(hotPathBegin
);
553 info
.patch
.baseline
.u
.put
.structureToCompare
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putStructureToCompare
));
554 info
.patch
.baseline
.u
.put
.propertyStorageLoad
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(propertyStorageLoad
));
556 info
.patch
.baseline
.u
.put
.displacementLabel
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel
));
558 info
.patch
.baseline
.u
.put
.displacementLabel1
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel1
));
559 info
.patch
.baseline
.u
.put
.displacementLabel2
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel2
));
565 JITCode
JIT::privateCompile(CodePtr
* functionEntryArityCheck
, JITCompilationEffort effort
)
567 #if ENABLE(JIT_VERBOSE_OSR)
568 printf("Compiling JIT code!\n");
571 #if ENABLE(VALUE_PROFILER)
572 DFG::CapabilityLevel level
= m_codeBlock
->canCompileWithDFG();
574 case DFG::CannotCompile
:
575 m_canBeOptimized
= false;
576 m_shouldEmitProfiling
= false;
579 m_canBeOptimized
= false;
580 m_canBeOptimizedOrInlined
= true;
581 m_shouldEmitProfiling
= true;
583 case DFG::CanCompile
:
584 m_canBeOptimized
= true;
585 m_canBeOptimizedOrInlined
= true;
586 m_shouldEmitProfiling
= true;
589 RELEASE_ASSERT_NOT_REACHED();
594 if (Options::showDisassembly() || m_vm
->m_perBytecodeProfiler
)
595 m_disassembler
= adoptPtr(new JITDisassembler(m_codeBlock
));
596 if (m_vm
->m_perBytecodeProfiler
) {
597 m_compilation
= m_vm
->m_perBytecodeProfiler
->newCompilation(m_codeBlock
, Profiler::Baseline
);
598 m_compilation
->addProfiledBytecodes(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
);
602 m_disassembler
->setStartOfCode(label());
604 // Just add a little bit of randomness to the codegen
605 if (m_randomGenerator
.getUint32() & 1)
608 preserveReturnAddressAfterCall(regT2
);
609 emitPutToCallFrameHeader(regT2
, JSStack::ReturnPC
);
610 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
612 Label
beginLabel(this);
614 sampleCodeBlock(m_codeBlock
);
615 #if ENABLE(OPCODE_SAMPLING)
616 sampleInstruction(m_codeBlock
->instructions().begin());
620 if (m_codeBlock
->codeType() == FunctionCode
) {
622 #if DFG_ENABLE(SUCCESS_STATS)
623 static SamplingCounter
counter("orignalJIT");
628 #if ENABLE(VALUE_PROFILER)
629 ASSERT(m_bytecodeOffset
== (unsigned)-1);
630 if (shouldEmitProfiling()) {
631 for (int argument
= 0; argument
< m_codeBlock
->numParameters(); ++argument
) {
632 // If this is a constructor, then we want to put in a dummy profiling site (to
633 // keep things consistent) but we don't actually want to record the dummy value.
634 if (m_codeBlock
->m_isConstructor
&& !argument
)
636 int offset
= CallFrame::argumentOffsetIncludingThis(argument
) * static_cast<int>(sizeof(Register
));
638 load64(Address(callFrameRegister
, offset
), regT0
);
639 #elif USE(JSVALUE32_64)
640 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
641 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
643 emitValueProfilingSite(m_codeBlock
->valueProfileForArgument(argument
));
648 addPtr(TrustedImm32(m_codeBlock
->m_numCalleeRegisters
* sizeof(Register
)), callFrameRegister
, regT1
);
649 stackCheck
= branchPtr(Below
, AbsoluteAddress(m_vm
->interpreter
->stack().addressOfEnd()), regT1
);
652 Label functionBody
= label();
654 privateCompileMainPass();
655 privateCompileLinkPass();
656 privateCompileSlowCases();
659 m_disassembler
->setEndOfSlowPath(label());
662 if (m_codeBlock
->codeType() == FunctionCode
) {
663 stackCheck
.link(this);
664 m_bytecodeOffset
= 0;
665 JITStubCall(this, cti_stack_check
).call();
667 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
671 arityCheck
= label();
672 preserveReturnAddressAfterCall(regT2
);
673 emitPutToCallFrameHeader(regT2
, JSStack::ReturnPC
);
674 emitPutImmediateToCallFrameHeader(m_codeBlock
, JSStack::CodeBlock
);
676 load32(payloadFor(JSStack::ArgumentCount
), regT1
);
677 branch32(AboveOrEqual
, regT1
, TrustedImm32(m_codeBlock
->m_numParameters
)).linkTo(beginLabel
, this);
679 m_bytecodeOffset
= 0;
680 JITStubCall(this, m_codeBlock
->m_isConstructor
? cti_op_construct_arityCheck
: cti_op_call_arityCheck
).call(callFrameRegister
);
682 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
688 ASSERT(m_jmpTable
.isEmpty());
691 m_disassembler
->setEndOfCode(label());
693 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
, effort
);
694 if (patchBuffer
.didFailToAllocate())
697 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
698 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
699 SwitchRecord record
= m_switches
[i
];
700 unsigned bytecodeOffset
= record
.bytecodeOffset
;
702 if (record
.type
!= SwitchRecord::String
) {
703 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
704 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
706 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
708 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
709 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
710 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
713 ASSERT(record
.type
== SwitchRecord::String
);
715 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
717 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
718 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
719 unsigned offset
= it
->value
.branchOffset
;
720 it
->value
.ctiOffset
= offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
725 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
726 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
727 handler
.nativeCode
= patchBuffer
.locationOf(m_labels
[handler
.target
]);
730 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
732 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
735 m_codeBlock
->callReturnIndexVector().reserveCapacity(m_calls
.size());
736 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
)
737 m_codeBlock
->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer
.returnAddressOffset(iter
->from
), iter
->bytecodeOffset
));
739 m_codeBlock
->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo
.size());
740 for (unsigned i
= 0; i
< m_propertyAccessCompilationInfo
.size(); ++i
)
741 m_propertyAccessCompilationInfo
[i
].copyToStubInfo(m_codeBlock
->structureStubInfo(i
), patchBuffer
);
742 m_codeBlock
->setNumberOfByValInfos(m_byValCompilationInfo
.size());
743 for (unsigned i
= 0; i
< m_byValCompilationInfo
.size(); ++i
) {
744 CodeLocationJump badTypeJump
= CodeLocationJump(patchBuffer
.locationOf(m_byValCompilationInfo
[i
].badTypeJump
));
745 CodeLocationLabel doneTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].doneTarget
);
746 CodeLocationLabel slowPathTarget
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].slowPathTarget
);
747 CodeLocationCall returnAddress
= patchBuffer
.locationOf(m_byValCompilationInfo
[i
].returnAddress
);
749 m_codeBlock
->byValInfo(i
) = ByValInfo(
750 m_byValCompilationInfo
[i
].bytecodeIndex
,
752 m_byValCompilationInfo
[i
].arrayMode
,
753 differenceBetweenCodePtr(badTypeJump
, doneTarget
),
754 differenceBetweenCodePtr(returnAddress
, slowPathTarget
));
756 m_codeBlock
->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo
.size());
757 for (unsigned i
= 0; i
< m_codeBlock
->numberOfCallLinkInfos(); ++i
) {
758 CallLinkInfo
& info
= m_codeBlock
->callLinkInfo(i
);
759 info
.callType
= m_callStructureStubCompilationInfo
[i
].callType
;
760 info
.codeOrigin
= CodeOrigin(m_callStructureStubCompilationInfo
[i
].bytecodeIndex
);
761 info
.callReturnLocation
= patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].callReturnLocation
);
762 info
.hotPathBegin
= patchBuffer
.locationOf(m_callStructureStubCompilationInfo
[i
].hotPathBegin
);
763 info
.hotPathOther
= patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].hotPathOther
);
764 info
.calleeGPR
= regT0
;
767 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
768 if (canBeOptimizedOrInlined()
773 CompactJITCodeMap::Encoder jitCodeMapEncoder
;
774 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< m_labels
.size(); ++bytecodeOffset
) {
775 if (m_labels
[bytecodeOffset
].isSet())
776 jitCodeMapEncoder
.append(bytecodeOffset
, patchBuffer
.offsetOf(m_labels
[bytecodeOffset
]));
778 m_codeBlock
->setJITCodeMap(jitCodeMapEncoder
.finish());
782 if (m_codeBlock
->codeType() == FunctionCode
&& functionEntryArityCheck
)
783 *functionEntryArityCheck
= patchBuffer
.locationOf(arityCheck
);
785 if (Options::showDisassembly())
786 m_disassembler
->dump(patchBuffer
);
788 m_disassembler
->reportToProfiler(m_compilation
.get(), patchBuffer
);
790 CodeRef result
= patchBuffer
.finalizeCodeWithoutDisassembly();
792 m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.add(
793 static_cast<double>(result
.size()) /
794 static_cast<double>(m_codeBlock
->instructions().size()));
796 m_codeBlock
->shrinkToFit(CodeBlock::LateShrink
);
798 #if ENABLE(JIT_VERBOSE)
799 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock
, result
.executableMemory()->start(), result
.executableMemory()->end());
802 return JITCode(result
, JITCode::BaselineJIT
);
805 void JIT::linkFor(JSFunction
* callee
, CodeBlock
* callerCodeBlock
, CodeBlock
* calleeCodeBlock
, JIT::CodePtr code
, CallLinkInfo
* callLinkInfo
, VM
* vm
, CodeSpecializationKind kind
)
807 RepatchBuffer
repatchBuffer(callerCodeBlock
);
809 ASSERT(!callLinkInfo
->isLinked());
810 callLinkInfo
->callee
.set(*vm
, callLinkInfo
->hotPathBegin
, callerCodeBlock
->ownerExecutable(), callee
);
811 callLinkInfo
->lastSeenCallee
.set(*vm
, callerCodeBlock
->ownerExecutable(), callee
);
812 repatchBuffer
.relink(callLinkInfo
->hotPathOther
, code
);
815 calleeCodeBlock
->linkIncomingCall(callLinkInfo
);
817 // Patch the slow patch so we do not continue to try to link.
818 if (kind
== CodeForCall
) {
819 ASSERT(callLinkInfo
->callType
== CallLinkInfo::Call
820 || callLinkInfo
->callType
== CallLinkInfo::CallVarargs
);
821 if (callLinkInfo
->callType
== CallLinkInfo::Call
) {
822 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, vm
->getCTIStub(linkClosureCallGenerator
).code());
826 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, vm
->getCTIStub(virtualCallGenerator
).code());
830 ASSERT(kind
== CodeForConstruct
);
831 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, vm
->getCTIStub(virtualConstructGenerator
).code());
834 void JIT::linkSlowCall(CodeBlock
* callerCodeBlock
, CallLinkInfo
* callLinkInfo
)
836 RepatchBuffer
repatchBuffer(callerCodeBlock
);
838 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, callerCodeBlock
->vm()->getCTIStub(virtualCallGenerator
).code());
843 #endif // ENABLE(JIT)