2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState
JSC::MacroAssemblerX86Common::s_sse2CheckState
= NotCheckedSSE2
;
37 #include "CodeBlock.h"
38 #include "CryptographicallyRandomNumber.h"
39 #include "Interpreter.h"
40 #include "JITInlineMethods.h"
41 #include "JITStubCall.h"
43 #include "JSFunction.h"
44 #include "LinkBuffer.h"
45 #include "RepatchBuffer.h"
46 #include "ResultType.h"
47 #include "SamplingTool.h"
48 #include "dfg/DFGNode.h" // for DFG_SUCCESS_STATS
54 void ctiPatchNearCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
56 RepatchBuffer
repatchBuffer(codeblock
);
57 repatchBuffer
.relinkNearCallerToTrampoline(returnAddress
, newCalleeFunction
);
60 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, MacroAssemblerCodePtr newCalleeFunction
)
62 RepatchBuffer
repatchBuffer(codeblock
);
63 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, newCalleeFunction
);
66 void ctiPatchCallByReturnAddress(CodeBlock
* codeblock
, ReturnAddressPtr returnAddress
, FunctionPtr newCalleeFunction
)
68 RepatchBuffer
repatchBuffer(codeblock
);
69 repatchBuffer
.relinkCallerToFunction(returnAddress
, newCalleeFunction
);
72 JIT::JIT(JSGlobalData
* globalData
, CodeBlock
* codeBlock
)
73 : m_interpreter(globalData
->interpreter
)
74 , m_globalData(globalData
)
75 , m_codeBlock(codeBlock
)
76 , m_labels(codeBlock
? codeBlock
->instructions().size() : 0)
77 , m_propertyAccessCompilationInfo(codeBlock
? codeBlock
->numberOfStructureStubInfos() : 0)
78 , m_callStructureStubCompilationInfo(codeBlock
? codeBlock
->numberOfCallLinkInfos() : 0)
79 , m_bytecodeOffset((unsigned)-1)
81 , m_jumpTargetIndex(0)
82 , m_mappedBytecodeOffset((unsigned)-1)
83 , m_mappedVirtualRegisterIndex((unsigned)-1)
84 , m_mappedTag((RegisterID
)-1)
85 , m_mappedPayload((RegisterID
)-1)
87 , m_lastResultBytecodeRegister(std::numeric_limits
<int>::max())
88 , m_jumpTargetsPosition(0)
90 #if USE(OS_RANDOMNESS)
91 , m_randomGenerator(cryptographicallyRandomNumber())
93 , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
99 void JIT::emitTimeoutCheck()
101 Jump skipTimeout
= branchSub32(NonZero
, TrustedImm32(1), timeoutCheckRegister
);
102 JITStubCall
stubCall(this, cti_timeout_check
);
103 stubCall
.addArgument(regT1
, regT0
); // save last result registers.
104 stubCall
.call(timeoutCheckRegister
);
105 stubCall
.getArgument(0, regT1
, regT0
); // reload last result registers.
106 skipTimeout
.link(this);
109 void JIT::emitTimeoutCheck()
111 Jump skipTimeout
= branchSub32(NonZero
, TrustedImm32(1), timeoutCheckRegister
);
112 JITStubCall(this, cti_timeout_check
).call(timeoutCheckRegister
);
113 skipTimeout
.link(this);
115 killLastResultRegister();
119 #define NEXT_OPCODE(name) \
120 m_bytecodeOffset += OPCODE_LENGTH(name); \
123 #if USE(JSVALUE32_64)
124 #define DEFINE_BINARY_OP(name) \
126 JITStubCall stubCall(this, cti_##name); \
127 stubCall.addArgument(currentInstruction[2].u.operand); \
128 stubCall.addArgument(currentInstruction[3].u.operand); \
129 stubCall.call(currentInstruction[1].u.operand); \
133 #define DEFINE_UNARY_OP(name) \
135 JITStubCall stubCall(this, cti_##name); \
136 stubCall.addArgument(currentInstruction[2].u.operand); \
137 stubCall.call(currentInstruction[1].u.operand); \
141 #else // USE(JSVALUE32_64)
143 #define DEFINE_BINARY_OP(name) \
145 JITStubCall stubCall(this, cti_##name); \
146 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
147 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
148 stubCall.call(currentInstruction[1].u.operand); \
152 #define DEFINE_UNARY_OP(name) \
154 JITStubCall stubCall(this, cti_##name); \
155 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
156 stubCall.call(currentInstruction[1].u.operand); \
159 #endif // USE(JSVALUE32_64)
161 #define DEFINE_OP(name) \
163 emit_##name(currentInstruction); \
167 #define DEFINE_SLOWCASE_OP(name) \
169 emitSlow_##name(currentInstruction, iter); \
173 void JIT::privateCompileMainPass()
175 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
176 unsigned instructionCount
= m_codeBlock
->instructions().size();
178 m_propertyAccessInstructionIndex
= 0;
179 m_globalResolveInfoIndex
= 0;
180 m_callLinkInfoIndex
= 0;
182 for (m_bytecodeOffset
= 0; m_bytecodeOffset
< instructionCount
; ) {
183 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
184 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset
);
186 #if ENABLE(OPCODE_SAMPLING)
187 if (m_bytecodeOffset
> 0) // Avoid the overhead of sampling op_enter twice.
188 sampleInstruction(currentInstruction
);
193 killLastResultRegister();
196 m_labels
[m_bytecodeOffset
] = label();
198 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
199 DEFINE_BINARY_OP(op_del_by_val
)
200 DEFINE_BINARY_OP(op_in
)
201 DEFINE_BINARY_OP(op_less
)
202 DEFINE_BINARY_OP(op_lesseq
)
203 DEFINE_UNARY_OP(op_is_boolean
)
204 DEFINE_UNARY_OP(op_is_function
)
205 DEFINE_UNARY_OP(op_is_number
)
206 DEFINE_UNARY_OP(op_is_object
)
207 DEFINE_UNARY_OP(op_is_string
)
208 DEFINE_UNARY_OP(op_is_undefined
)
210 DEFINE_UNARY_OP(op_negate
)
212 DEFINE_UNARY_OP(op_typeof
)
220 DEFINE_OP(op_call_eval
)
221 DEFINE_OP(op_call_varargs
)
223 DEFINE_OP(op_construct
)
224 DEFINE_OP(op_get_callee
)
225 DEFINE_OP(op_create_this
)
226 DEFINE_OP(op_convert_this
)
227 DEFINE_OP(op_convert_this_strict
)
228 DEFINE_OP(op_init_lazy_reg
)
229 DEFINE_OP(op_create_arguments
)
231 DEFINE_OP(op_del_by_id
)
235 DEFINE_OP(op_create_activation
)
237 DEFINE_OP(op_eq_null
)
238 DEFINE_OP(op_get_by_id
)
239 DEFINE_OP(op_get_arguments_length
)
240 DEFINE_OP(op_get_by_val
)
241 DEFINE_OP(op_get_argument_by_val
)
242 DEFINE_OP(op_get_by_pname
)
243 DEFINE_OP(op_get_global_var
)
244 DEFINE_OP(op_get_pnames
)
245 DEFINE_OP(op_get_scoped_var
)
246 DEFINE_OP(op_check_has_instance
)
247 DEFINE_OP(op_instanceof
)
248 DEFINE_OP(op_jeq_null
)
251 DEFINE_OP(op_jmp_scopes
)
252 DEFINE_OP(op_jneq_null
)
253 DEFINE_OP(op_jneq_ptr
)
256 DEFINE_OP(op_jlesseq
)
257 DEFINE_OP(op_jnlesseq
)
260 DEFINE_OP(op_load_varargs
)
262 DEFINE_OP(op_loop_if_less
)
263 DEFINE_OP(op_loop_if_lesseq
)
264 DEFINE_OP(op_loop_if_true
)
265 DEFINE_OP(op_loop_if_false
)
267 DEFINE_OP(op_method_check
)
271 #if USE(JSVALUE32_64)
275 DEFINE_OP(op_neq_null
)
276 DEFINE_OP(op_new_array
)
277 DEFINE_OP(op_new_array_buffer
)
278 DEFINE_OP(op_new_func
)
279 DEFINE_OP(op_new_func_exp
)
280 DEFINE_OP(op_new_object
)
281 DEFINE_OP(op_new_regexp
)
282 DEFINE_OP(op_next_pname
)
284 DEFINE_OP(op_nstricteq
)
285 DEFINE_OP(op_pop_scope
)
286 DEFINE_OP(op_post_dec
)
287 DEFINE_OP(op_post_inc
)
288 DEFINE_OP(op_pre_dec
)
289 DEFINE_OP(op_pre_inc
)
290 DEFINE_OP(op_profile_did_call
)
291 DEFINE_OP(op_profile_will_call
)
292 DEFINE_OP(op_push_new_scope
)
293 DEFINE_OP(op_push_scope
)
294 DEFINE_OP(op_put_by_id
)
295 DEFINE_OP(op_put_by_index
)
296 DEFINE_OP(op_put_by_val
)
297 DEFINE_OP(op_put_getter
)
298 DEFINE_OP(op_put_global_var
)
299 DEFINE_OP(op_put_scoped_var
)
300 DEFINE_OP(op_put_setter
)
301 DEFINE_OP(op_resolve
)
302 DEFINE_OP(op_resolve_base
)
303 DEFINE_OP(op_ensure_property_exists
)
304 DEFINE_OP(op_resolve_global
)
305 DEFINE_OP(op_resolve_global_dynamic
)
306 DEFINE_OP(op_resolve_skip
)
307 DEFINE_OP(op_resolve_with_base
)
309 DEFINE_OP(op_call_put_result
)
310 DEFINE_OP(op_ret_object_or_this
)
312 DEFINE_OP(op_urshift
)
315 DEFINE_OP(op_stricteq
)
317 DEFINE_OP(op_switch_char
)
318 DEFINE_OP(op_switch_imm
)
319 DEFINE_OP(op_switch_string
)
320 DEFINE_OP(op_tear_off_activation
)
321 DEFINE_OP(op_tear_off_arguments
)
323 DEFINE_OP(op_throw_reference_error
)
324 DEFINE_OP(op_to_jsnumber
)
325 DEFINE_OP(op_to_primitive
)
327 case op_get_array_length
:
328 case op_get_by_id_chain
:
329 case op_get_by_id_generic
:
330 case op_get_by_id_proto
:
331 case op_get_by_id_proto_list
:
332 case op_get_by_id_self
:
333 case op_get_by_id_self_list
:
334 case op_get_by_id_getter_chain
:
335 case op_get_by_id_getter_proto
:
336 case op_get_by_id_getter_proto_list
:
337 case op_get_by_id_getter_self
:
338 case op_get_by_id_getter_self_list
:
339 case op_get_by_id_custom_chain
:
340 case op_get_by_id_custom_proto
:
341 case op_get_by_id_custom_proto_list
:
342 case op_get_by_id_custom_self
:
343 case op_get_by_id_custom_self_list
:
344 case op_get_string_length
:
345 case op_put_by_id_generic
:
346 case op_put_by_id_replace
:
347 case op_put_by_id_transition
:
348 ASSERT_NOT_REACHED();
352 ASSERT(m_propertyAccessInstructionIndex
== m_codeBlock
->numberOfStructureStubInfos());
353 ASSERT(m_callLinkInfoIndex
== m_codeBlock
->numberOfCallLinkInfos());
356 // Reset this, in order to guard its use with ASSERTs.
357 m_bytecodeOffset
= (unsigned)-1;
362 void JIT::privateCompileLinkPass()
364 unsigned jmpTableCount
= m_jmpTable
.size();
365 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
366 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeOffset
], this);
370 void JIT::privateCompileSlowCases()
372 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
374 m_propertyAccessInstructionIndex
= 0;
375 m_globalResolveInfoIndex
= 0;
376 m_callLinkInfoIndex
= 0;
378 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
380 killLastResultRegister();
383 m_bytecodeOffset
= iter
->to
;
385 unsigned firstTo
= m_bytecodeOffset
;
387 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeOffset
;
389 switch (m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
390 DEFINE_SLOWCASE_OP(op_add
)
391 DEFINE_SLOWCASE_OP(op_bitand
)
392 DEFINE_SLOWCASE_OP(op_bitnot
)
393 DEFINE_SLOWCASE_OP(op_bitor
)
394 DEFINE_SLOWCASE_OP(op_bitxor
)
395 DEFINE_SLOWCASE_OP(op_call
)
396 DEFINE_SLOWCASE_OP(op_call_eval
)
397 DEFINE_SLOWCASE_OP(op_call_varargs
)
398 DEFINE_SLOWCASE_OP(op_construct
)
399 DEFINE_SLOWCASE_OP(op_convert_this
)
400 DEFINE_SLOWCASE_OP(op_convert_this_strict
)
401 DEFINE_SLOWCASE_OP(op_div
)
402 DEFINE_SLOWCASE_OP(op_eq
)
403 DEFINE_SLOWCASE_OP(op_get_by_id
)
404 DEFINE_SLOWCASE_OP(op_get_arguments_length
)
405 DEFINE_SLOWCASE_OP(op_get_by_val
)
406 DEFINE_SLOWCASE_OP(op_get_argument_by_val
)
407 DEFINE_SLOWCASE_OP(op_get_by_pname
)
408 DEFINE_SLOWCASE_OP(op_check_has_instance
)
409 DEFINE_SLOWCASE_OP(op_instanceof
)
410 DEFINE_SLOWCASE_OP(op_jfalse
)
411 DEFINE_SLOWCASE_OP(op_jnless
)
412 DEFINE_SLOWCASE_OP(op_jless
)
413 DEFINE_SLOWCASE_OP(op_jlesseq
)
414 DEFINE_SLOWCASE_OP(op_jnlesseq
)
415 DEFINE_SLOWCASE_OP(op_jtrue
)
416 DEFINE_SLOWCASE_OP(op_load_varargs
)
417 DEFINE_SLOWCASE_OP(op_loop_if_less
)
418 DEFINE_SLOWCASE_OP(op_loop_if_lesseq
)
419 DEFINE_SLOWCASE_OP(op_loop_if_true
)
420 DEFINE_SLOWCASE_OP(op_loop_if_false
)
421 DEFINE_SLOWCASE_OP(op_lshift
)
422 DEFINE_SLOWCASE_OP(op_method_check
)
423 DEFINE_SLOWCASE_OP(op_mod
)
424 DEFINE_SLOWCASE_OP(op_mul
)
425 #if USE(JSVALUE32_64)
426 DEFINE_SLOWCASE_OP(op_negate
)
428 DEFINE_SLOWCASE_OP(op_neq
)
429 DEFINE_SLOWCASE_OP(op_not
)
430 DEFINE_SLOWCASE_OP(op_nstricteq
)
431 DEFINE_SLOWCASE_OP(op_post_dec
)
432 DEFINE_SLOWCASE_OP(op_post_inc
)
433 DEFINE_SLOWCASE_OP(op_pre_dec
)
434 DEFINE_SLOWCASE_OP(op_pre_inc
)
435 DEFINE_SLOWCASE_OP(op_put_by_id
)
436 DEFINE_SLOWCASE_OP(op_put_by_val
)
437 DEFINE_SLOWCASE_OP(op_resolve_global
)
438 DEFINE_SLOWCASE_OP(op_resolve_global_dynamic
)
439 DEFINE_SLOWCASE_OP(op_rshift
)
440 DEFINE_SLOWCASE_OP(op_urshift
)
441 DEFINE_SLOWCASE_OP(op_stricteq
)
442 DEFINE_SLOWCASE_OP(op_sub
)
443 DEFINE_SLOWCASE_OP(op_to_jsnumber
)
444 DEFINE_SLOWCASE_OP(op_to_primitive
)
446 ASSERT_NOT_REACHED();
449 ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
,"Not enough jumps linked in slow case codegen.");
450 ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
452 emitJumpSlowToHot(jump(), 0);
455 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
456 ASSERT(m_propertyAccessInstructionIndex
== m_codeBlock
->numberOfStructureStubInfos());
458 ASSERT(m_callLinkInfoIndex
== m_codeBlock
->numberOfCallLinkInfos());
461 // Reset this, in order to guard its use with ASSERTs.
462 m_bytecodeOffset
= (unsigned)-1;
466 JITCode
JIT::privateCompile(CodePtr
* functionEntryArityCheck
)
468 // Just add a little bit of randomness to the codegen
469 if (m_randomGenerator
.getUint32() & 1)
472 // Could use a pop_m, but would need to offset the following instruction if so.
473 preserveReturnAddressAfterCall(regT2
);
474 emitPutToCallFrameHeader(regT2
, RegisterFile::ReturnPC
);
476 Label
beginLabel(this);
478 sampleCodeBlock(m_codeBlock
);
479 #if ENABLE(OPCODE_SAMPLING)
480 sampleInstruction(m_codeBlock
->instructions().begin());
483 Jump registerFileCheck
;
484 if (m_codeBlock
->codeType() == FunctionCode
) {
485 #if DFG_SUCCESS_STATS
486 static SamplingCounter
counter("orignalJIT");
490 // In the case of a fast linked call, we do not set this up in the caller.
491 emitPutImmediateToCallFrameHeader(m_codeBlock
, RegisterFile::CodeBlock
);
493 addPtr(Imm32(m_codeBlock
->m_numCalleeRegisters
* sizeof(Register
)), callFrameRegister
, regT1
);
494 registerFileCheck
= branchPtr(Below
, AbsoluteAddress(m_globalData
->interpreter
->registerFile().addressOfEnd()), regT1
);
497 Label functionBody
= label();
499 privateCompileMainPass();
500 privateCompileLinkPass();
501 privateCompileSlowCases();
504 if (m_codeBlock
->codeType() == FunctionCode
) {
505 registerFileCheck
.link(this);
506 m_bytecodeOffset
= 0;
507 JITStubCall(this, cti_register_file_check
).call();
509 m_bytecodeOffset
= (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
513 arityCheck
= label();
514 preserveReturnAddressAfterCall(regT2
);
515 emitPutToCallFrameHeader(regT2
, RegisterFile::ReturnPC
);
516 branch32(Equal
, regT1
, TrustedImm32(m_codeBlock
->m_numParameters
)).linkTo(beginLabel
, this);
517 restoreArgumentReference();
519 JITStubCall(this, m_codeBlock
->m_isConstructor
? cti_op_construct_arityCheck
: cti_op_call_arityCheck
).call(callFrameRegister
);
524 ASSERT(m_jmpTable
.isEmpty());
526 LinkBuffer
patchBuffer(*m_globalData
, this, m_globalData
->executableAllocator
);
528 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
529 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
530 SwitchRecord record
= m_switches
[i
];
531 unsigned bytecodeOffset
= record
.bytecodeOffset
;
533 if (record
.type
!= SwitchRecord::String
) {
534 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
535 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
537 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
539 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
540 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
541 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
544 ASSERT(record
.type
== SwitchRecord::String
);
546 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ record
.defaultOffset
]);
548 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
549 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
550 unsigned offset
= it
->second
.branchOffset
;
551 it
->second
.ctiOffset
= offset
? patchBuffer
.locationOf(m_labels
[bytecodeOffset
+ offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
556 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
557 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
558 handler
.nativeCode
= patchBuffer
.locationOf(m_labels
[handler
.target
]);
561 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
563 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
566 if (m_codeBlock
->needsCallReturnIndices()) {
567 m_codeBlock
->callReturnIndexVector().reserveCapacity(m_calls
.size());
568 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
)
569 m_codeBlock
->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer
.returnAddressOffset(iter
->from
), iter
->bytecodeOffset
));
572 // Link absolute addresses for jsr
573 for (Vector
<JSRInfo
>::iterator iter
= m_jsrSites
.begin(); iter
!= m_jsrSites
.end(); ++iter
)
574 patchBuffer
.patch(iter
->storeLocation
, patchBuffer
.locationOf(iter
->target
).executableAddress());
576 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
577 for (unsigned i
= 0; i
< m_codeBlock
->numberOfStructureStubInfos(); ++i
) {
578 StructureStubInfo
& info
= m_codeBlock
->structureStubInfo(i
);
579 info
.callReturnLocation
= patchBuffer
.locationOf(m_propertyAccessCompilationInfo
[i
].callReturnLocation
);
580 info
.hotPathBegin
= patchBuffer
.locationOf(m_propertyAccessCompilationInfo
[i
].hotPathBegin
);
583 #if ENABLE(JIT_OPTIMIZE_CALL)
584 for (unsigned i
= 0; i
< m_codeBlock
->numberOfCallLinkInfos(); ++i
) {
585 CallLinkInfo
& info
= m_codeBlock
->callLinkInfo(i
);
586 info
.isCall
= m_callStructureStubCompilationInfo
[i
].isCall
;
587 info
.callReturnLocation
= patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].callReturnLocation
);
588 info
.hotPathBegin
= patchBuffer
.locationOf(m_callStructureStubCompilationInfo
[i
].hotPathBegin
);
589 info
.hotPathOther
= patchBuffer
.locationOfNearCall(m_callStructureStubCompilationInfo
[i
].hotPathOther
);
592 unsigned methodCallCount
= m_methodCallCompilationInfo
.size();
593 m_codeBlock
->addMethodCallLinkInfos(methodCallCount
);
594 for (unsigned i
= 0; i
< methodCallCount
; ++i
) {
595 MethodCallLinkInfo
& info
= m_codeBlock
->methodCallLinkInfo(i
);
596 info
.cachedStructure
.setLocation(patchBuffer
.locationOf(m_methodCallCompilationInfo
[i
].structureToCompare
));
597 info
.callReturnLocation
= m_codeBlock
->structureStubInfo(m_methodCallCompilationInfo
[i
].propertyAccessIndex
).callReturnLocation
;
600 if (m_codeBlock
->codeType() == FunctionCode
&& functionEntryArityCheck
)
601 *functionEntryArityCheck
= patchBuffer
.locationOf(arityCheck
);
603 return patchBuffer
.finalizeCode();
606 #if ENABLE(JIT_OPTIMIZE_CALL)
608 void JIT::linkCall(JSFunction
* callee
, CodeBlock
* callerCodeBlock
, CodeBlock
* calleeCodeBlock
, JIT::CodePtr code
, CallLinkInfo
* callLinkInfo
, int callerArgCount
, JSGlobalData
* globalData
)
610 RepatchBuffer
repatchBuffer(callerCodeBlock
);
612 // Currently we only link calls with the exact number of arguments.
613 // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
614 if (!calleeCodeBlock
|| (callerArgCount
== calleeCodeBlock
->m_numParameters
)) {
615 ASSERT(!callLinkInfo
->isLinked());
616 callLinkInfo
->callee
.set(*globalData
, callLinkInfo
->hotPathBegin
, callerCodeBlock
->ownerExecutable(), callee
);
617 repatchBuffer
.relink(callLinkInfo
->hotPathOther
, code
);
620 // patch the call so we do not continue to try to link.
621 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, globalData
->jitStubs
->ctiVirtualCall());
624 void JIT::linkConstruct(JSFunction
* callee
, CodeBlock
* callerCodeBlock
, CodeBlock
* calleeCodeBlock
, JIT::CodePtr code
, CallLinkInfo
* callLinkInfo
, int callerArgCount
, JSGlobalData
* globalData
)
626 RepatchBuffer
repatchBuffer(callerCodeBlock
);
628 // Currently we only link calls with the exact number of arguments.
629 // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
630 if (!calleeCodeBlock
|| (callerArgCount
== calleeCodeBlock
->m_numParameters
)) {
631 ASSERT(!callLinkInfo
->isLinked());
632 callLinkInfo
->callee
.set(*globalData
, callLinkInfo
->hotPathBegin
, callerCodeBlock
->ownerExecutable(), callee
);
633 repatchBuffer
.relink(callLinkInfo
->hotPathOther
, code
);
636 // patch the call so we do not continue to try to link.
637 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, globalData
->jitStubs
->ctiVirtualConstruct());
639 #endif // ENABLE(JIT_OPTIMIZE_CALL)
643 #endif // ENABLE(JIT)