2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState
JSC::MacroAssemblerX86Common::s_sse2CheckState
= NotCheckedSSE2
;
37 #include "CodeBlock.h"
38 #include <wtf/CryptographicallyRandomNumber.h>
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlineMethods.h"
42 #include "JITStubCall.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
54 void ctiPatchNearCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
56 RepatchBuffer
repatchBuffer(codeblock
);
57 repatchBuffer
.relinkNearCallerToTrampoline(returnAddress
, newCalleeFunction
);
60 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
62 RepatchBuffer
repatchBuffer(codeblock
);
63 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, newCalleeFunction
);
66 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, FunctionPtr newCalleeFunction
)
68 RepatchBuffer
repatchBuffer(codeblock
);
69 repatchBuffer
.relinkCallerToFunction(returnAddress
, newCalleeFunction
);
72 JIT::JIT(JSGlobalData
* globalData
, CodeBlock
* codeBlock
)
73 : m_interpreter(globalData
->interpreter
)
74 , m_globalData(globalData
)
75 , m_codeBlock(codeBlock
)
76 , m_labels(codeBlock
? codeBlock
->numberOfInstructions() : 0)
77 , m_bytecodeOffset((unsigned)-1)
79 , m_jumpTargetIndex(0)
80 , m_mappedBytecodeOffset((unsigned)-1)
81 , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC
)
82 , m_mappedTag((RegisterID
)-1)
83 , m_mappedPayload((RegisterID
)-1)
85 , m_lastResultBytecodeRegister(std::numeric_limits
<int>::max())
86 , m_jumpTargetsPosition(0)
88 #if USE(OS_RANDOMNESS)
89 , m_randomGenerator(cryptographicallyRandomNumber())
91 , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
97 void JIT::emitOptimizationCheck(OptimizationCheckKind kind
)
99 if (!shouldEmitProfiling())
102 Jump skipOptimize
= branchAdd32(Signed
, TrustedImm32(kind
== LoopOptimizationCheck
? Options::executionCounterIncrementForLoop
: Options::executionCounterIncrementForReturn
), AbsoluteAddress(m_codeBlock
->addressOfJITExecuteCounter()));
103 JITStubCall
stubCall(this, kind
== LoopOptimizationCheck
? cti_optimize_from_loop
: cti_optimize_from_ret
);
104 if (kind
== LoopOptimizationCheck
)
105 stubCall
.addArgument(TrustedImm32(m_bytecodeOffset
));
107 skipOptimize
.link(this);
112 void JIT::emitTimeoutCheck()
114 Jump skipTimeout
= branchSub32(NonZero
, TrustedImm32(1), AbsoluteAddress(&m_globalData
->m_timeoutCount
));
115 JITStubCall
stubCall(this, cti_timeout_check
);
116 stubCall
.addArgument(regT1
, regT0
); // save last result registers.
117 stubCall
.call(regT0
);
118 store32(regT0
, &m_globalData
->m_timeoutCount
);
119 stubCall
.getArgument(0, regT1
, regT0
); // reload last result registers.
120 skipTimeout
.link(this);
122 #elif USE(JSVALUE32_64)
123 void JIT::emitTimeoutCheck()
125 Jump skipTimeout
= branchSub32(NonZero
, TrustedImm32(1), timeoutCheckRegister
);
126 JITStubCall
stubCall(this, cti_timeout_check
);
127 stubCall
.addArgument(regT1
, regT0
); // save last result registers.
128 stubCall
.call(timeoutCheckRegister
);
129 stubCall
.getArgument(0, regT1
, regT0
); // reload last result registers.
130 skipTimeout
.link(this);
133 void JIT::emitTimeoutCheck()
135 Jump skipTimeout
= branchSub32(NonZero
, TrustedImm32(1), timeoutCheckRegister
);
136 JITStubCall(this, cti_timeout_check
).call(timeoutCheckRegister
);
137 skipTimeout
.link(this);
139 killLastResultRegister();
143 #define NEXT_OPCODE(name) \
144 m_bytecodeOffset += OPCODE_LENGTH(name); \
147 #if USE(JSVALUE32_64)
148 #define DEFINE_BINARY_OP(name) \
150 JITStubCall stubCall(this, cti_##name); \
151 stubCall.addArgument(currentInstruction[2].u.operand); \
152 stubCall.addArgument(currentInstruction[3].u.operand); \
153 stubCall.call(currentInstruction[1].u.operand); \
157 #define DEFINE_UNARY_OP(name) \
159 JITStubCall stubCall(this, cti_##name); \
160 stubCall.addArgument(currentInstruction[2].u.operand); \
161 stubCall.call(currentInstruction[1].u.operand); \
165 #else // USE(JSVALUE32_64)
167 #define DEFINE_BINARY_OP(name) \
169 JITStubCall stubCall(this, cti_##name); \
170 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
171 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
172 stubCall.call(currentInstruction[1].u.operand); \
176 #define DEFINE_UNARY_OP(name) \
178 JITStubCall stubCall(this, cti_##name); \
179 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
180 stubCall.call(currentInstruction[1].u.operand); \
183 #endif // USE(JSVALUE32_64)
185 #define DEFINE_OP(name) \
187 emit_##name(currentInstruction); \
191 #define DEFINE_SLOWCASE_OP(name) \
193 emitSlow_##name(currentInstruction, iter); \
197 void JIT::privateCompileMainPass()
199 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
200 unsigned instructionCount
= m_codeBlock
->instructions().size();
202 m_globalResolveInfoIndex
= 0;
203 m_callLinkInfoIndex
= 0;
205 for (m_bytecodeOffset
= 0; m_bytecodeOffset
< instructionCount
; ) {
206 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
207 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset
);
209 #if ENABLE(OPCODE_SAMPLING)
210 if (m_bytecodeOffset
> 0) // Avoid the overhead of sampling op_enter twice.
211 sampleInstruction(currentInstruction
);
216 killLastResultRegister();
219 m_labels
[m_bytecodeOffset
] = label();
221 #if ENABLE(JIT_VERBOSE)
222 dataLog("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
225 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
226 DEFINE_BINARY_OP(op_del_by_val
)
227 DEFINE_BINARY_OP(op_in
)
228 DEFINE_BINARY_OP(op_less
)
229 DEFINE_BINARY_OP(op_lesseq
)
230 DEFINE_BINARY_OP(op_greater
)
231 DEFINE_BINARY_OP(op_greatereq
)
232 DEFINE_UNARY_OP(op_is_function
)
233 DEFINE_UNARY_OP(op_is_object
)
234 DEFINE_UNARY_OP(op_typeof
)
241 DEFINE_OP(op_call_eval
)
242 DEFINE_OP(op_call_varargs
)
244 DEFINE_OP(op_construct
)
245 DEFINE_OP(op_get_callee
)
246 DEFINE_OP(op_create_this
)
247 DEFINE_OP(op_convert_this
)
248 DEFINE_OP(op_init_lazy_reg
)
249 DEFINE_OP(op_create_arguments
)
251 DEFINE_OP(op_del_by_id
)
255 DEFINE_OP(op_create_activation
)
257 DEFINE_OP(op_eq_null
)
258 DEFINE_OP(op_get_by_id
)
259 DEFINE_OP(op_get_arguments_length
)
260 DEFINE_OP(op_get_by_val
)
261 DEFINE_OP(op_get_argument_by_val
)
262 DEFINE_OP(op_get_by_pname
)
263 DEFINE_OP(op_get_global_var
)
264 DEFINE_OP(op_get_pnames
)
265 DEFINE_OP(op_get_scoped_var
)
266 DEFINE_OP(op_check_has_instance
)
267 DEFINE_OP(op_instanceof
)
268 DEFINE_OP(op_is_undefined
)
269 DEFINE_OP(op_is_boolean
)
270 DEFINE_OP(op_is_number
)
271 DEFINE_OP(op_is_string
)
272 DEFINE_OP(op_jeq_null
)
275 DEFINE_OP(op_jmp_scopes
)
276 DEFINE_OP(op_jneq_null
)
277 DEFINE_OP(op_jneq_ptr
)
279 DEFINE_OP(op_jlesseq
)
280 DEFINE_OP(op_jgreater
)
281 DEFINE_OP(op_jgreatereq
)
283 DEFINE_OP(op_jnlesseq
)
284 DEFINE_OP(op_jngreater
)
285 DEFINE_OP(op_jngreatereq
)
288 DEFINE_OP(op_loop_hint
)
289 DEFINE_OP(op_loop_if_less
)
290 DEFINE_OP(op_loop_if_lesseq
)
291 DEFINE_OP(op_loop_if_greater
)
292 DEFINE_OP(op_loop_if_greatereq
)
293 DEFINE_OP(op_loop_if_true
)
294 DEFINE_OP(op_loop_if_false
)
296 DEFINE_OP(op_method_check
)
302 DEFINE_OP(op_neq_null
)
303 DEFINE_OP(op_new_array
)
304 DEFINE_OP(op_new_array_buffer
)
305 DEFINE_OP(op_new_func
)
306 DEFINE_OP(op_new_func_exp
)
307 DEFINE_OP(op_new_object
)
308 DEFINE_OP(op_new_regexp
)
309 DEFINE_OP(op_next_pname
)
311 DEFINE_OP(op_nstricteq
)
312 DEFINE_OP(op_pop_scope
)
313 DEFINE_OP(op_post_dec
)
314 DEFINE_OP(op_post_inc
)
315 DEFINE_OP(op_pre_dec
)
316 DEFINE_OP(op_pre_inc
)
317 DEFINE_OP(op_profile_did_call
)
318 DEFINE_OP(op_profile_will_call
)
319 DEFINE_OP(op_push_new_scope
)
320 DEFINE_OP(op_push_scope
)
321 case op_put_by_id_transition_direct
:
322 case op_put_by_id_transition_normal
:
323 DEFINE_OP(op_put_by_id
)
324 DEFINE_OP(op_put_by_index
)
325 DEFINE_OP(op_put_by_val
)
326 DEFINE_OP(op_put_getter_setter
)
327 DEFINE_OP(op_put_global_var
)
328 DEFINE_OP(op_put_scoped_var
)
329 DEFINE_OP(op_resolve
)
330 DEFINE_OP(op_resolve_base
)
331 DEFINE_OP(op_ensure_property_exists
)
332 DEFINE_OP(op_resolve_global
)
333 DEFINE_OP(op_resolve_global_dynamic
)
334 DEFINE_OP(op_resolve_skip
)
335 DEFINE_OP(op_resolve_with_base
)
336 DEFINE_OP(op_resolve_with_this
)
338 DEFINE_OP(op_call_put_result
)
339 DEFINE_OP(op_ret_object_or_this
)
341 DEFINE_OP(op_urshift
)
343 DEFINE_OP(op_stricteq
)
345 DEFINE_OP(op_switch_char
)
346 DEFINE_OP(op_switch_imm
)
347 DEFINE_OP(op_switch_string
)
348 DEFINE_OP(op_tear_off_activation
)
349 DEFINE_OP(op_tear_off_arguments
)
351 DEFINE_OP(op_throw_reference_error
)
352 DEFINE_OP(op_to_jsnumber
)
353 DEFINE_OP(op_to_primitive
)
355 case op_get_array_length
:
356 case op_get_by_id_chain
:
357 case op_get_by_id_generic
:
358 case op_get_by_id_proto
:
359 case op_get_by_id_self
:
360 case op_get_by_id_getter_chain
:
361 case op_get_by_id_getter_proto
:
362 case op_get_by_id_getter_self
:
363 case op_get_by_id_custom_chain
:
364 case op_get_by_id_custom_proto
:
365 case op_get_by_id_custom_self
:
366 case op_get_string_length
:
367 case op_put_by_id_generic
:
368 case op_put_by_id_replace
:
369 case op_put_by_id_transition
:
370 ASSERT_NOT_REACHED();
374 ASSERT(m_callLinkInfoIndex
== m_callStructureStubCompilationInfo
.size());
377 // Reset this, in order to guard its use with ASSERTs.
378 m_bytecodeOffset
= (unsigned)-1;
382 void JIT::privateCompileLinkPass()
384 unsigned jmpTableCount
= m_jmpTable
.size();
385 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
386 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeOffset
], this);
390 void JIT::privateCompileSlowCases()
392 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
394 m_propertyAccessInstructionIndex
= 0;
395 m_globalResolveInfoIndex
= 0;
396 m_callLinkInfoIndex
= 0;
398 #if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
399 // Use this to assert that slow-path code associates new profiling sites with existing
400 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
401 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
402 // instructions and the slow-path executions. Furthermore, if the slow-path code created
403 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
404 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
405 unsigned numberOfValueProfiles
= m_codeBlock
->numberOfValueProfiles();
408 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
410 killLastResultRegister();
413 m_bytecodeOffset
= iter
->to
;
415 unsigned firstTo
= m_bytecodeOffset
;
417 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
419 #if ENABLE(VALUE_PROFILER)
420 RareCaseProfile
* rareCaseProfile
= 0;
421 if (m_canBeOptimized
)
422 rareCaseProfile
= m_codeBlock
->addRareCaseProfile(m_bytecodeOffset
);
425 #if ENABLE(JIT_VERBOSE)
426 dataLog("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset
, (long)debugOffset());
429 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
430 DEFINE_SLOWCASE_OP(op_add
)
431 DEFINE_SLOWCASE_OP(op_bitand
)
432 DEFINE_SLOWCASE_OP(op_bitor
)
433 DEFINE_SLOWCASE_OP(op_bitxor
)
434 DEFINE_SLOWCASE_OP(op_call
)
435 DEFINE_SLOWCASE_OP(op_call_eval
)
436 DEFINE_SLOWCASE_OP(op_call_varargs
)
437 DEFINE_SLOWCASE_OP(op_construct
)
438 DEFINE_SLOWCASE_OP(op_convert_this
)
439 DEFINE_SLOWCASE_OP(op_create_this
)
440 DEFINE_SLOWCASE_OP(op_div
)
441 DEFINE_SLOWCASE_OP(op_eq
)
442 DEFINE_SLOWCASE_OP(op_get_by_id
)
443 DEFINE_SLOWCASE_OP(op_get_arguments_length
)
444 DEFINE_SLOWCASE_OP(op_get_by_val
)
445 DEFINE_SLOWCASE_OP(op_get_argument_by_val
)
446 DEFINE_SLOWCASE_OP(op_get_by_pname
)
447 DEFINE_SLOWCASE_OP(op_check_has_instance
)
448 DEFINE_SLOWCASE_OP(op_instanceof
)
449 DEFINE_SLOWCASE_OP(op_jfalse
)
450 DEFINE_SLOWCASE_OP(op_jless
)
451 DEFINE_SLOWCASE_OP(op_jlesseq
)
452 DEFINE_SLOWCASE_OP(op_jgreater
)
453 DEFINE_SLOWCASE_OP(op_jgreatereq
)
454 DEFINE_SLOWCASE_OP(op_jnless
)
455 DEFINE_SLOWCASE_OP(op_jnlesseq
)
456 DEFINE_SLOWCASE_OP(op_jngreater
)
457 DEFINE_SLOWCASE_OP(op_jngreatereq
)
458 DEFINE_SLOWCASE_OP(op_jtrue
)
459 DEFINE_SLOWCASE_OP(op_loop_if_less
)
460 DEFINE_SLOWCASE_OP(op_loop_if_lesseq
)
461 DEFINE_SLOWCASE_OP(op_loop_if_greater
)
462 DEFINE_SLOWCASE_OP(op_loop_if_greatereq
)
463 DEFINE_SLOWCASE_OP(op_loop_if_true
)
464 DEFINE_SLOWCASE_OP(op_loop_if_false
)
465 DEFINE_SLOWCASE_OP(op_lshift
)
466 DEFINE_SLOWCASE_OP(op_method_check
)
467 DEFINE_SLOWCASE_OP(op_mod
)
468 DEFINE_SLOWCASE_OP(op_mul
)
469 DEFINE_SLOWCASE_OP(op_negate
)
470 DEFINE_SLOWCASE_OP(op_neq
)
471 DEFINE_SLOWCASE_OP(op_new_array
)
472 DEFINE_SLOWCASE_OP(op_new_object
)
473 DEFINE_SLOWCASE_OP(op_new_func
)
474 DEFINE_SLOWCASE_OP(op_new_func_exp
)
475 DEFINE_SLOWCASE_OP(op_not
)
476 DEFINE_SLOWCASE_OP(op_nstricteq
)
477 DEFINE_SLOWCASE_OP(op_post_dec
)
478 DEFINE_SLOWCASE_OP(op_post_inc
)
479 DEFINE_SLOWCASE_OP(op_pre_dec
)
480 DEFINE_SLOWCASE_OP(op_pre_inc
)
481 case op_put_by_id_transition_direct
:
482 case op_put_by_id_transition_normal
:
483 DEFINE_SLOWCASE_OP(op_put_by_id
)
484 DEFINE_SLOWCASE_OP(op_put_by_val
)
485 DEFINE_SLOWCASE_OP(op_resolve_global
)
486 DEFINE_SLOWCASE_OP(op_resolve_global_dynamic
)
487 DEFINE_SLOWCASE_OP(op_rshift
)
488 DEFINE_SLOWCASE_OP(op_urshift
)
489 DEFINE_SLOWCASE_OP(op_stricteq
)
490 DEFINE_SLOWCASE_OP(op_sub
)
491 DEFINE_SLOWCASE_OP(op_to_jsnumber
)
492 DEFINE_SLOWCASE_OP(op_to_primitive
)
494 ASSERT_NOT_REACHED();
497 ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
,"Not enough jumps linked in slow case codegen.");
498 ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
500 #if ENABLE(VALUE_PROFILER)
501 if (m_canBeOptimized
)
502 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile
->m_counter
));
505 emitJumpSlowToHot(jump(), 0);
508 ASSERT(m_propertyAccessInstructionIndex
== m_propertyAccessCompilationInfo
.size());
509 ASSERT(m_callLinkInfoIndex
== m_callStructureStubCompilationInfo
.size());
510 #if ENABLE(VALUE_PROFILER)
511 ASSERT(numberOfValueProfiles
== m_codeBlock
->numberOfValueProfiles());
515 // Reset this, in order to guard its use with ASSERTs.
516 m_bytecodeOffset
= (unsigned)-1;
520 ALWAYS_INLINE
void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo
& info
, LinkBuffer
&linkBuffer
)
522 ASSERT(bytecodeIndex
!= std::numeric_limits
<unsigned>::max());
523 info
.bytecodeIndex
= bytecodeIndex
;
524 info
.callReturnLocation
= linkBuffer
.locationOf(callReturnLocation
);
525 info
.hotPathBegin
= linkBuffer
.locationOf(hotPathBegin
);
529 CodeLocationDataLabelPtr structureToCompareLocation
= linkBuffer
.locationOf(methodCheckStructureToCompare
);
530 info
.patch
.baseline
.methodCheckProtoObj
= MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation
, linkBuffer
.locationOf(methodCheckProtoObj
));
531 info
.patch
.baseline
.methodCheckProtoStructureToCompare
= MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation
, linkBuffer
.locationOf(methodCheckProtoStructureToCompare
));
532 info
.patch
.baseline
.methodCheckPutFunction
= MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation
, linkBuffer
.locationOf(methodCheckPutFunction
));
533 // No break - fall through to GetById.
536 CodeLocationLabel hotPathBeginLocation
= linkBuffer
.locationOf(hotPathBegin
);
537 info
.patch
.baseline
.u
.get
.structureToCompare
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getStructureToCompare
));
538 info
.patch
.baseline
.u
.get
.structureCheck
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getStructureCheck
));
540 info
.patch
.baseline
.u
.get
.displacementLabel
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel
));
542 info
.patch
.baseline
.u
.get
.displacementLabel1
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel1
));
543 info
.patch
.baseline
.u
.get
.displacementLabel2
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getDisplacementLabel2
));
545 info
.patch
.baseline
.u
.get
.putResult
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(getPutResult
));
546 info
.patch
.baseline
.u
.get
.coldPathBegin
= MacroAssembler::differenceBetweenCodePtr(linkBuffer
.locationOf(getColdPathBegin
), linkBuffer
.locationOf(callReturnLocation
));
550 CodeLocationLabel hotPathBeginLocation
= linkBuffer
.locationOf(hotPathBegin
);
551 info
.patch
.baseline
.u
.put
.structureToCompare
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putStructureToCompare
));
553 info
.patch
.baseline
.u
.put
.displacementLabel
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel
));
555 info
.patch
.baseline
.u
.put
.displacementLabel1
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel1
));
556 info
.patch
.baseline
.u
.put
.displacementLabel2
= MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation
, linkBuffer
.locationOf(putDisplacementLabel2
));
562 JITCode
JIT::privateCompile(CodePtr
* functionEntryArityCheck
, JITCompilationEffort effort
)
564 #if ENABLE(JIT_VERBOSE_OSR)
565 printf("Compiling JIT code!\n");
568 #if ENABLE(VALUE_PROFILER)
569 m_canBeOptimized
= m_codeBlock
->canCompileWithDFG();
572 // Just add a little bit of randomness to the codegen
573 if (m_randomGenerator
.getUint32() & 1)
576 preserveReturnAddressAfterCall(regT2
);
577 emitPutToCallFrameHeader(regT2
, RegisterFile::ReturnPC
);
578 emitPutImmediateToCallFrameHeader(m_codeBlock
, RegisterFile::CodeBlock
);
580 Label
beginLabel(this);
582 sampleCodeBlock(m_codeBlock
);
583 #if ENABLE(OPCODE_SAMPLING)
584 sampleInstruction(m_codeBlock
->instructions().begin());
587 Jump registerFileCheck
;
588 if (m_codeBlock
->codeType() == FunctionCode
) {
590 #if DFG_ENABLE(SUCCESS_STATS)
591 static SamplingCounter
counter("orignalJIT");
596 #if ENABLE(VALUE_PROFILER)
597 ASSERT(m_bytecodeOffset
== (unsigned)-1);
598 if (shouldEmitProfiling()) {
599 for (int argument
= 0; argument
< m_codeBlock
->numParameters(); ++argument
) {
600 // If this is a constructor, then we want to put in a dummy profiling site (to
601 // keep things consistent) but we don't actually want to record the dummy value.
602 if (m_codeBlock
->m_isConstructor
&& !argument
)
604 int offset
= CallFrame::argumentOffsetIncludingThis(argument
) * static_cast<int>(sizeof(Register
));
606 loadPtr(Address(callFrameRegister
, offset
), regT0
);
607 #elif USE(JSVALUE32_64)
608 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
609 load32(Address(callFrameRegister
, offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
611 emitValueProfilingSite(m_codeBlock
->valueProfileForArgument(argument
));
616 addPtr(TrustedImm32(m_codeBlock
->m_numCalleeRegisters
* sizeof(Register
)), callFrameRegister
, regT1
);
617 registerFileCheck
= branchPtr(Below
, AbsoluteAddress(m_globalData
->interpreter
->registerFile().addressOfEnd()), regT1
);
620 Label functionBody
= label();
622 #if ENABLE(VALUE_PROFILER)
623 if (m_canBeOptimized
)
624 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->m_executionEntryCount
));
627 privateCompileMainPass();
628 privateCompileLinkPass();
629 privateCompileSlowCases();
632 if (m_codeBlock
->codeType() == FunctionCode
) {
633 registerFileCheck
.link(this);
634 m_bytecodeOffset
= 0;
635 JITStubCall(this, cti_register_file_check
).call();
637 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
641 arityCheck
= label();
642 preserveReturnAddressAfterCall(regT2
);
643 emitPutToCallFrameHeader(regT2
, RegisterFile::ReturnPC
);
644 emitPutImmediateToCallFrameHeader(m_codeBlock
, RegisterFile::CodeBlock
);
646 load32(payloadFor(RegisterFile::ArgumentCount
), regT1
);
647 branch32(AboveOrEqual
, regT1
, TrustedImm32(m_codeBlock
->m_numParameters
)).linkTo(beginLabel
, this);
649 m_bytecodeOffset
= 0;
650 JITStubCall(this, m_codeBlock
->m_isConstructor
? cti_op_construct_arityCheck
: cti_op_call_arityCheck
).call(callFrameRegister
);
652 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
658 ASSERT(m_jmpTable
.isEmpty());
660 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
, effort
);
661 if (patchBuffer
.didFailToAllocate())
664 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
665 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
666 SwitchRecord record
= m_switches
[i
];
667 unsigned bytecodeOffset
= record
.bytecodeOffset
;
669 if (record
.type
!= SwitchRecord::String
) {
670 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
671 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
673 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
675 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
676 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
677 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
680 ASSERT(record
.type
== SwitchRecord::String
);
682 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
684 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
685 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
686 unsigned offset
= it
->second
.branchOffset
;
687 it
->second
.ctiOffset
= offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
692 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
693 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
694 handler
.nativeCode
= patchBuffer
.locationOf(m_labels
[handler
.target
]);
697 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
699 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
702 if (m_codeBlock
->needsCallReturnIndices()) {
703 m_codeBlock
->callReturnIndexVector().reserveCapacity(m_calls
.size());
704 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
)
705 m_codeBlock
->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer
.returnAddressOffset(iter
->from
), iter
->bytecodeOffset
));
708 m_codeBlock
->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo
.size());
709 for (unsigned i
= 0; i
< m_propertyAccessCompilationInfo
.size(); ++i
)
710 m_propertyAccessCompilationInfo
[i
].copyToStubInfo(m_codeBlock
->structureStubInfo(i
), patchBuffer
);
711 m_codeBlock
->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo
.size());
712 for (unsigned i
= 0; i
< m_codeBlock
->numberOfCallLinkInfos(); ++i
) {
713 CallLinkInfo
& info
= m_codeBlock
->callLinkInfo(i
);
714 info
.callType
= m_callStructureStubCompilationInfo
[i
].callType
;
715 info
.bytecodeIndex
= m_callStructureStubCompilationInfo
[i
].bytecodeIndex
;
716 info
.callReturnLocation
= CodeLocationLabel(patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].callReturnLocation
));
717 info
.hotPathBegin
= patchBuffer
.locationOf(m_callStructureStubCompilationInfo
[i
].hotPathBegin
);
718 info
.hotPathOther
= patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].hotPathOther
);
720 unsigned methodCallCount
= m_methodCallCompilationInfo
.size();
721 m_codeBlock
->addMethodCallLinkInfos(methodCallCount
);
722 for (unsigned i
= 0; i
< methodCallCount
; ++i
) {
723 MethodCallLinkInfo
& info
= m_codeBlock
->methodCallLinkInfo(i
);
724 info
.bytecodeIndex
= m_methodCallCompilationInfo
[i
].bytecodeIndex
;
725 info
.cachedStructure
.setLocation(patchBuffer
.locationOf(m_methodCallCompilationInfo
[i
].structureToCompare
));
726 info
.callReturnLocation
= m_codeBlock
->structureStubInfo(m_methodCallCompilationInfo
[i
].propertyAccessIndex
).callReturnLocation
;
729 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
735 CompactJITCodeMap::Encoder jitCodeMapEncoder
;
736 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< m_labels
.size(); ++bytecodeOffset
) {
737 if (m_labels
[bytecodeOffset
].isSet())
738 jitCodeMapEncoder
.append(bytecodeOffset
, patchBuffer
.offsetOf(m_labels
[bytecodeOffset
]));
740 m_codeBlock
->setJITCodeMap(jitCodeMapEncoder
.finish());
744 if (m_codeBlock
->codeType() == FunctionCode
&& functionEntryArityCheck
)
745 *functionEntryArityCheck
= patchBuffer
.locationOf(arityCheck
);
747 CodeRef result
= patchBuffer
.finalizeCode();
749 m_globalData
->machineCodeBytesPerBytecodeWordForBaselineJIT
.add(
750 static_cast<double>(result
.size()) /
751 static_cast<double>(m_codeBlock
->instructions().size()));
753 #if ENABLE(JIT_VERBOSE)
754 dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock
, result
.executableMemory()->start(), result
.executableMemory()->end());
757 return JITCode(result
, JITCode::BaselineJIT
);
760 void JIT::linkFor(JSFunction
* callee
, CodeBlock
* callerCodeBlock
, CodeBlock
* calleeCodeBlock
, JIT::CodePtr code
, CallLinkInfo
* callLinkInfo
, JSGlobalData
* globalData
, CodeSpecializationKind kind
)
762 RepatchBuffer
repatchBuffer(callerCodeBlock
);
764 ASSERT(!callLinkInfo
->isLinked());
765 callLinkInfo
->callee
.set(*globalData
, callLinkInfo
->hotPathBegin
, callerCodeBlock
->ownerExecutable(), callee
);
766 callLinkInfo
->lastSeenCallee
.set(*globalData
, callerCodeBlock
->ownerExecutable(), callee
);
767 repatchBuffer
.relink(callLinkInfo
->hotPathOther
, code
);
770 calleeCodeBlock
->linkIncomingCall(callLinkInfo
);
772 // Patch the slow patch so we do not continue to try to link.
773 if (kind
== CodeForCall
) {
774 repatchBuffer
.relink(CodeLocationNearCall(callLinkInfo
->callReturnLocation
), globalData
->jitStubs
->ctiVirtualCall());
778 ASSERT(kind
== CodeForConstruct
);
779 repatchBuffer
.relink(CodeLocationNearCall(callLinkInfo
->callReturnLocation
), globalData
->jitStubs
->ctiVirtualConstruct());
784 #endif // ENABLE(JIT)