]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JIT.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / jit / JIT.cpp
1 /*
2 * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
35 #endif
36
37 #include "CodeBlock.h"
38 #include <wtf/CryptographicallyRandomNumber.h>
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlines.h"
42 #include "JITStubCall.h"
43 #include "JSArray.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "Operations.h"
47 #include "RepatchBuffer.h"
48 #include "ResultType.h"
49 #include "SamplingTool.h"
50
51 using namespace std;
52
53 namespace JSC {
54
55 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
56 {
57 RepatchBuffer repatchBuffer(codeblock);
58 repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
59 }
60
61 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
62 {
63 RepatchBuffer repatchBuffer(codeblock);
64 repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
65 }
66
67 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
68 {
69 RepatchBuffer repatchBuffer(codeblock);
70 repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
71 }
72
73 JIT::JIT(VM* vm, CodeBlock* codeBlock)
74 : m_interpreter(vm->interpreter)
75 , m_vm(vm)
76 , m_codeBlock(codeBlock)
77 , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
78 , m_bytecodeOffset((unsigned)-1)
79 , m_propertyAccessInstructionIndex(UINT_MAX)
80 , m_byValInstructionIndex(UINT_MAX)
81 , m_globalResolveInfoIndex(UINT_MAX)
82 , m_callLinkInfoIndex(UINT_MAX)
83 #if USE(JSVALUE32_64)
84 , m_jumpTargetIndex(0)
85 , m_mappedBytecodeOffset((unsigned)-1)
86 , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
87 , m_mappedTag((RegisterID)-1)
88 , m_mappedPayload((RegisterID)-1)
89 #else
90 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
91 , m_jumpTargetsPosition(0)
92 #endif
93 , m_randomGenerator(cryptographicallyRandomNumber())
94 #if ENABLE(VALUE_PROFILER)
95 , m_canBeOptimized(false)
96 , m_shouldEmitProfiling(false)
97 #endif
98 {
99 }
100
101 #if ENABLE(DFG_JIT)
102 void JIT::emitEnterOptimizationCheck()
103 {
104 if (!canBeOptimized())
105 return;
106
107 Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
108 JITStubCall stubCall(this, cti_optimize);
109 stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
110 ASSERT(!m_bytecodeOffset);
111 stubCall.call();
112 skipOptimize.link(this);
113 }
114 #endif
115
116 #define NEXT_OPCODE(name) \
117 m_bytecodeOffset += OPCODE_LENGTH(name); \
118 break;
119
120 #if USE(JSVALUE32_64)
121 #define DEFINE_BINARY_OP(name) \
122 case name: { \
123 JITStubCall stubCall(this, cti_##name); \
124 stubCall.addArgument(currentInstruction[2].u.operand); \
125 stubCall.addArgument(currentInstruction[3].u.operand); \
126 stubCall.call(currentInstruction[1].u.operand); \
127 NEXT_OPCODE(name); \
128 }
129
130 #define DEFINE_UNARY_OP(name) \
131 case name: { \
132 JITStubCall stubCall(this, cti_##name); \
133 stubCall.addArgument(currentInstruction[2].u.operand); \
134 stubCall.call(currentInstruction[1].u.operand); \
135 NEXT_OPCODE(name); \
136 }
137
138 #else // USE(JSVALUE32_64)
139
140 #define DEFINE_BINARY_OP(name) \
141 case name: { \
142 JITStubCall stubCall(this, cti_##name); \
143 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
144 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
145 stubCall.call(currentInstruction[1].u.operand); \
146 NEXT_OPCODE(name); \
147 }
148
149 #define DEFINE_UNARY_OP(name) \
150 case name: { \
151 JITStubCall stubCall(this, cti_##name); \
152 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
153 stubCall.call(currentInstruction[1].u.operand); \
154 NEXT_OPCODE(name); \
155 }
156 #endif // USE(JSVALUE32_64)
157
158 #define DEFINE_OP(name) \
159 case name: { \
160 emit_##name(currentInstruction); \
161 NEXT_OPCODE(name); \
162 }
163
164 #define DEFINE_SLOWCASE_OP(name) \
165 case name: { \
166 emitSlow_##name(currentInstruction, iter); \
167 NEXT_OPCODE(name); \
168 }
169
170 void JIT::privateCompileMainPass()
171 {
172 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
173 unsigned instructionCount = m_codeBlock->instructions().size();
174
175 m_globalResolveInfoIndex = 0;
176 m_callLinkInfoIndex = 0;
177
178 for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
179 if (m_disassembler)
180 m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
181 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
182 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
183
184 #if ENABLE(OPCODE_SAMPLING)
185 if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
186 sampleInstruction(currentInstruction);
187 #endif
188
189 #if USE(JSVALUE64)
190 if (atJumpTarget())
191 killLastResultRegister();
192 #endif
193
194 m_labels[m_bytecodeOffset] = label();
195
196 #if ENABLE(JIT_VERBOSE)
197 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
198 #endif
199
200 OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
201
202 if (m_compilation && opcodeID != op_call_put_result) {
203 add64(
204 TrustedImm32(1),
205 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
206 m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
207 }
208
209 switch (opcodeID) {
210 DEFINE_BINARY_OP(op_del_by_val)
211 DEFINE_BINARY_OP(op_in)
212 DEFINE_BINARY_OP(op_less)
213 DEFINE_BINARY_OP(op_lesseq)
214 DEFINE_BINARY_OP(op_greater)
215 DEFINE_BINARY_OP(op_greatereq)
216 DEFINE_UNARY_OP(op_is_function)
217 DEFINE_UNARY_OP(op_is_object)
218 DEFINE_UNARY_OP(op_typeof)
219
220 DEFINE_OP(op_add)
221 DEFINE_OP(op_bitand)
222 DEFINE_OP(op_bitor)
223 DEFINE_OP(op_bitxor)
224 DEFINE_OP(op_call)
225 DEFINE_OP(op_call_eval)
226 DEFINE_OP(op_call_varargs)
227 DEFINE_OP(op_catch)
228 DEFINE_OP(op_construct)
229 DEFINE_OP(op_get_callee)
230 DEFINE_OP(op_create_this)
231 DEFINE_OP(op_convert_this)
232 DEFINE_OP(op_init_lazy_reg)
233 DEFINE_OP(op_create_arguments)
234 DEFINE_OP(op_debug)
235 DEFINE_OP(op_del_by_id)
236 DEFINE_OP(op_div)
237 DEFINE_OP(op_end)
238 DEFINE_OP(op_enter)
239 DEFINE_OP(op_create_activation)
240 DEFINE_OP(op_eq)
241 DEFINE_OP(op_eq_null)
242 case op_get_by_id_out_of_line:
243 case op_get_array_length:
244 DEFINE_OP(op_get_by_id)
245 DEFINE_OP(op_get_arguments_length)
246 DEFINE_OP(op_get_by_val)
247 DEFINE_OP(op_get_argument_by_val)
248 DEFINE_OP(op_get_by_pname)
249 DEFINE_OP(op_get_pnames)
250 DEFINE_OP(op_check_has_instance)
251 DEFINE_OP(op_instanceof)
252 DEFINE_OP(op_is_undefined)
253 DEFINE_OP(op_is_boolean)
254 DEFINE_OP(op_is_number)
255 DEFINE_OP(op_is_string)
256 DEFINE_OP(op_jeq_null)
257 DEFINE_OP(op_jfalse)
258 DEFINE_OP(op_jmp)
259 DEFINE_OP(op_jneq_null)
260 DEFINE_OP(op_jneq_ptr)
261 DEFINE_OP(op_jless)
262 DEFINE_OP(op_jlesseq)
263 DEFINE_OP(op_jgreater)
264 DEFINE_OP(op_jgreatereq)
265 DEFINE_OP(op_jnless)
266 DEFINE_OP(op_jnlesseq)
267 DEFINE_OP(op_jngreater)
268 DEFINE_OP(op_jngreatereq)
269 DEFINE_OP(op_jtrue)
270 DEFINE_OP(op_loop_hint)
271 DEFINE_OP(op_lshift)
272 DEFINE_OP(op_mod)
273 DEFINE_OP(op_mov)
274 DEFINE_OP(op_mul)
275 DEFINE_OP(op_negate)
276 DEFINE_OP(op_neq)
277 DEFINE_OP(op_neq_null)
278 DEFINE_OP(op_new_array)
279 DEFINE_OP(op_new_array_with_size)
280 DEFINE_OP(op_new_array_buffer)
281 DEFINE_OP(op_new_func)
282 DEFINE_OP(op_new_func_exp)
283 DEFINE_OP(op_new_object)
284 DEFINE_OP(op_new_regexp)
285 DEFINE_OP(op_next_pname)
286 DEFINE_OP(op_not)
287 DEFINE_OP(op_nstricteq)
288 DEFINE_OP(op_pop_scope)
289 DEFINE_OP(op_dec)
290 DEFINE_OP(op_inc)
291 DEFINE_OP(op_profile_did_call)
292 DEFINE_OP(op_profile_will_call)
293 DEFINE_OP(op_push_name_scope)
294 DEFINE_OP(op_push_with_scope)
295 case op_put_by_id_out_of_line:
296 case op_put_by_id_transition_direct:
297 case op_put_by_id_transition_normal:
298 case op_put_by_id_transition_direct_out_of_line:
299 case op_put_by_id_transition_normal_out_of_line:
300 DEFINE_OP(op_put_by_id)
301 DEFINE_OP(op_put_by_index)
302 DEFINE_OP(op_put_by_val)
303 DEFINE_OP(op_put_getter_setter)
304 case op_init_global_const_nop:
305 NEXT_OPCODE(op_init_global_const_nop);
306 DEFINE_OP(op_init_global_const)
307 DEFINE_OP(op_init_global_const_check)
308
309 case op_resolve_global_property:
310 case op_resolve_global_var:
311 case op_resolve_scoped_var:
312 case op_resolve_scoped_var_on_top_scope:
313 case op_resolve_scoped_var_with_top_scope_check:
314 DEFINE_OP(op_resolve)
315
316 case op_resolve_base_to_global:
317 case op_resolve_base_to_global_dynamic:
318 case op_resolve_base_to_scope:
319 case op_resolve_base_to_scope_with_top_scope_check:
320 DEFINE_OP(op_resolve_base)
321
322 case op_put_to_base_variable:
323 DEFINE_OP(op_put_to_base)
324
325 DEFINE_OP(op_resolve_with_base)
326 DEFINE_OP(op_resolve_with_this)
327 DEFINE_OP(op_ret)
328 DEFINE_OP(op_call_put_result)
329 DEFINE_OP(op_ret_object_or_this)
330 DEFINE_OP(op_rshift)
331 DEFINE_OP(op_urshift)
332 DEFINE_OP(op_strcat)
333 DEFINE_OP(op_stricteq)
334 DEFINE_OP(op_sub)
335 DEFINE_OP(op_switch_char)
336 DEFINE_OP(op_switch_imm)
337 DEFINE_OP(op_switch_string)
338 DEFINE_OP(op_tear_off_activation)
339 DEFINE_OP(op_tear_off_arguments)
340 DEFINE_OP(op_throw)
341 DEFINE_OP(op_throw_static_error)
342 DEFINE_OP(op_to_number)
343 DEFINE_OP(op_to_primitive)
344
345 DEFINE_OP(op_get_scoped_var)
346 DEFINE_OP(op_put_scoped_var)
347
348 case op_get_by_id_chain:
349 case op_get_by_id_generic:
350 case op_get_by_id_proto:
351 case op_get_by_id_self:
352 case op_get_by_id_getter_chain:
353 case op_get_by_id_getter_proto:
354 case op_get_by_id_getter_self:
355 case op_get_by_id_custom_chain:
356 case op_get_by_id_custom_proto:
357 case op_get_by_id_custom_self:
358 case op_get_string_length:
359 case op_put_by_id_generic:
360 case op_put_by_id_replace:
361 case op_put_by_id_transition:
362 RELEASE_ASSERT_NOT_REACHED();
363 }
364 }
365
366 RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
367
368 #ifndef NDEBUG
369 // Reset this, in order to guard its use with ASSERTs.
370 m_bytecodeOffset = (unsigned)-1;
371 #endif
372 }
373
374 void JIT::privateCompileLinkPass()
375 {
376 unsigned jmpTableCount = m_jmpTable.size();
377 for (unsigned i = 0; i < jmpTableCount; ++i)
378 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
379 m_jmpTable.clear();
380 }
381
382 void JIT::privateCompileSlowCases()
383 {
384 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
385
386 m_propertyAccessInstructionIndex = 0;
387 m_byValInstructionIndex = 0;
388 m_globalResolveInfoIndex = 0;
389 m_callLinkInfoIndex = 0;
390
391 #if ENABLE(VALUE_PROFILER)
392 // Use this to assert that slow-path code associates new profiling sites with existing
393 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
394 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
395 // instructions and the slow-path executions. Furthermore, if the slow-path code created
396 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
397 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
398 unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
399 #endif
400
401 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
402 #if USE(JSVALUE64)
403 killLastResultRegister();
404 #endif
405
406 m_bytecodeOffset = iter->to;
407
408 unsigned firstTo = m_bytecodeOffset;
409
410 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
411
412 #if ENABLE(VALUE_PROFILER)
413 RareCaseProfile* rareCaseProfile = 0;
414 if (shouldEmitProfiling())
415 rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
416 #endif
417
418 #if ENABLE(JIT_VERBOSE)
419 dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
420 #endif
421
422 if (m_disassembler)
423 m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
424
425 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
426 DEFINE_SLOWCASE_OP(op_add)
427 DEFINE_SLOWCASE_OP(op_bitand)
428 DEFINE_SLOWCASE_OP(op_bitor)
429 DEFINE_SLOWCASE_OP(op_bitxor)
430 DEFINE_SLOWCASE_OP(op_call)
431 DEFINE_SLOWCASE_OP(op_call_eval)
432 DEFINE_SLOWCASE_OP(op_call_varargs)
433 DEFINE_SLOWCASE_OP(op_construct)
434 DEFINE_SLOWCASE_OP(op_convert_this)
435 DEFINE_SLOWCASE_OP(op_create_this)
436 DEFINE_SLOWCASE_OP(op_div)
437 DEFINE_SLOWCASE_OP(op_eq)
438 case op_get_by_id_out_of_line:
439 case op_get_array_length:
440 DEFINE_SLOWCASE_OP(op_get_by_id)
441 DEFINE_SLOWCASE_OP(op_get_arguments_length)
442 DEFINE_SLOWCASE_OP(op_get_by_val)
443 DEFINE_SLOWCASE_OP(op_get_argument_by_val)
444 DEFINE_SLOWCASE_OP(op_get_by_pname)
445 DEFINE_SLOWCASE_OP(op_check_has_instance)
446 DEFINE_SLOWCASE_OP(op_instanceof)
447 DEFINE_SLOWCASE_OP(op_jfalse)
448 DEFINE_SLOWCASE_OP(op_jless)
449 DEFINE_SLOWCASE_OP(op_jlesseq)
450 DEFINE_SLOWCASE_OP(op_jgreater)
451 DEFINE_SLOWCASE_OP(op_jgreatereq)
452 DEFINE_SLOWCASE_OP(op_jnless)
453 DEFINE_SLOWCASE_OP(op_jnlesseq)
454 DEFINE_SLOWCASE_OP(op_jngreater)
455 DEFINE_SLOWCASE_OP(op_jngreatereq)
456 DEFINE_SLOWCASE_OP(op_jtrue)
457 DEFINE_SLOWCASE_OP(op_loop_hint)
458 DEFINE_SLOWCASE_OP(op_lshift)
459 DEFINE_SLOWCASE_OP(op_mod)
460 DEFINE_SLOWCASE_OP(op_mul)
461 DEFINE_SLOWCASE_OP(op_negate)
462 DEFINE_SLOWCASE_OP(op_neq)
463 DEFINE_SLOWCASE_OP(op_new_object)
464 DEFINE_SLOWCASE_OP(op_not)
465 DEFINE_SLOWCASE_OP(op_nstricteq)
466 DEFINE_SLOWCASE_OP(op_dec)
467 DEFINE_SLOWCASE_OP(op_inc)
468 case op_put_by_id_out_of_line:
469 case op_put_by_id_transition_direct:
470 case op_put_by_id_transition_normal:
471 case op_put_by_id_transition_direct_out_of_line:
472 case op_put_by_id_transition_normal_out_of_line:
473 DEFINE_SLOWCASE_OP(op_put_by_id)
474 DEFINE_SLOWCASE_OP(op_put_by_val)
475 DEFINE_SLOWCASE_OP(op_init_global_const_check);
476 DEFINE_SLOWCASE_OP(op_rshift)
477 DEFINE_SLOWCASE_OP(op_urshift)
478 DEFINE_SLOWCASE_OP(op_stricteq)
479 DEFINE_SLOWCASE_OP(op_sub)
480 DEFINE_SLOWCASE_OP(op_to_number)
481 DEFINE_SLOWCASE_OP(op_to_primitive)
482
483 case op_resolve_global_property:
484 case op_resolve_global_var:
485 case op_resolve_scoped_var:
486 case op_resolve_scoped_var_on_top_scope:
487 case op_resolve_scoped_var_with_top_scope_check:
488 DEFINE_SLOWCASE_OP(op_resolve)
489
490 case op_resolve_base_to_global:
491 case op_resolve_base_to_global_dynamic:
492 case op_resolve_base_to_scope:
493 case op_resolve_base_to_scope_with_top_scope_check:
494 DEFINE_SLOWCASE_OP(op_resolve_base)
495 DEFINE_SLOWCASE_OP(op_resolve_with_base)
496 DEFINE_SLOWCASE_OP(op_resolve_with_this)
497
498 case op_put_to_base_variable:
499 DEFINE_SLOWCASE_OP(op_put_to_base)
500
501 default:
502 RELEASE_ASSERT_NOT_REACHED();
503 }
504
505 RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
506 RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
507
508 #if ENABLE(VALUE_PROFILER)
509 if (shouldEmitProfiling())
510 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
511 #endif
512
513 emitJumpSlowToHot(jump(), 0);
514 }
515
516 RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
517 RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
518 #if ENABLE(VALUE_PROFILER)
519 RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
520 #endif
521
522 #ifndef NDEBUG
523 // Reset this, in order to guard its use with ASSERTs.
524 m_bytecodeOffset = (unsigned)-1;
525 #endif
526 }
527
528 ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
529 {
530 ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
531 info.bytecodeIndex = bytecodeIndex;
532 info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
533 info.hotPathBegin = linkBuffer.locationOf(hotPathBegin);
534
535 switch (m_type) {
536 case GetById: {
537 CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
538 info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
539 info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
540 info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
541 #if USE(JSVALUE64)
542 info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
543 #else
544 info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1));
545 info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2));
546 #endif
547 info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult));
548 info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation));
549 break;
550 }
551 case PutById:
552 CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
553 info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
554 info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
555 #if USE(JSVALUE64)
556 info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
557 #else
558 info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1));
559 info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2));
560 #endif
561 break;
562 }
563 }
564
565 JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort)
566 {
567 #if ENABLE(JIT_VERBOSE_OSR)
568 printf("Compiling JIT code!\n");
569 #endif
570
571 #if ENABLE(VALUE_PROFILER)
572 DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG();
573 switch (level) {
574 case DFG::CannotCompile:
575 m_canBeOptimized = false;
576 m_shouldEmitProfiling = false;
577 break;
578 case DFG::MayInline:
579 m_canBeOptimized = false;
580 m_canBeOptimizedOrInlined = true;
581 m_shouldEmitProfiling = true;
582 break;
583 case DFG::CanCompile:
584 m_canBeOptimized = true;
585 m_canBeOptimizedOrInlined = true;
586 m_shouldEmitProfiling = true;
587 break;
588 default:
589 RELEASE_ASSERT_NOT_REACHED();
590 break;
591 }
592 #endif
593
594 if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
595 m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
596 if (m_vm->m_perBytecodeProfiler) {
597 m_compilation = m_vm->m_perBytecodeProfiler->newCompilation(m_codeBlock, Profiler::Baseline);
598 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
599 }
600
601 if (m_disassembler)
602 m_disassembler->setStartOfCode(label());
603
604 // Just add a little bit of randomness to the codegen
605 if (m_randomGenerator.getUint32() & 1)
606 nop();
607
608 preserveReturnAddressAfterCall(regT2);
609 emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
610 emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
611
612 Label beginLabel(this);
613
614 sampleCodeBlock(m_codeBlock);
615 #if ENABLE(OPCODE_SAMPLING)
616 sampleInstruction(m_codeBlock->instructions().begin());
617 #endif
618
619 Jump stackCheck;
620 if (m_codeBlock->codeType() == FunctionCode) {
621 #if ENABLE(DFG_JIT)
622 #if DFG_ENABLE(SUCCESS_STATS)
623 static SamplingCounter counter("orignalJIT");
624 emitCount(counter);
625 #endif
626 #endif
627
628 #if ENABLE(VALUE_PROFILER)
629 ASSERT(m_bytecodeOffset == (unsigned)-1);
630 if (shouldEmitProfiling()) {
631 for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
632 // If this is a constructor, then we want to put in a dummy profiling site (to
633 // keep things consistent) but we don't actually want to record the dummy value.
634 if (m_codeBlock->m_isConstructor && !argument)
635 continue;
636 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
637 #if USE(JSVALUE64)
638 load64(Address(callFrameRegister, offset), regT0);
639 #elif USE(JSVALUE32_64)
640 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
641 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
642 #endif
643 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
644 }
645 }
646 #endif
647
648 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
649 stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1);
650 }
651
652 Label functionBody = label();
653
654 privateCompileMainPass();
655 privateCompileLinkPass();
656 privateCompileSlowCases();
657
658 if (m_disassembler)
659 m_disassembler->setEndOfSlowPath(label());
660
661 Label arityCheck;
662 if (m_codeBlock->codeType() == FunctionCode) {
663 stackCheck.link(this);
664 m_bytecodeOffset = 0;
665 JITStubCall(this, cti_stack_check).call();
666 #ifndef NDEBUG
667 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
668 #endif
669 jump(functionBody);
670
671 arityCheck = label();
672 preserveReturnAddressAfterCall(regT2);
673 emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
674 emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
675
676 load32(payloadFor(JSStack::ArgumentCount), regT1);
677 branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
678
679 m_bytecodeOffset = 0;
680 JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
681 #if !ASSERT_DISABLED
682 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
683 #endif
684
685 jump(beginLabel);
686 }
687
688 ASSERT(m_jmpTable.isEmpty());
689
690 if (m_disassembler)
691 m_disassembler->setEndOfCode(label());
692
693 LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
694 if (patchBuffer.didFailToAllocate())
695 return JITCode();
696
697 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
698 for (unsigned i = 0; i < m_switches.size(); ++i) {
699 SwitchRecord record = m_switches[i];
700 unsigned bytecodeOffset = record.bytecodeOffset;
701
702 if (record.type != SwitchRecord::String) {
703 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
704 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
705
706 record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
707
708 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
709 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
710 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
711 }
712 } else {
713 ASSERT(record.type == SwitchRecord::String);
714
715 record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
716
717 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
718 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
719 unsigned offset = it->value.branchOffset;
720 it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
721 }
722 }
723 }
724
725 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
726 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
727 handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
728 }
729
730 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
731 if (iter->to)
732 patchBuffer.link(iter->from, FunctionPtr(iter->to));
733 }
734
735 m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
736 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
737 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
738
739 m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
740 for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
741 m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
742 m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
743 for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
744 CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
745 CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
746 CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
747 CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
748
749 m_codeBlock->byValInfo(i) = ByValInfo(
750 m_byValCompilationInfo[i].bytecodeIndex,
751 badTypeJump,
752 m_byValCompilationInfo[i].arrayMode,
753 differenceBetweenCodePtr(badTypeJump, doneTarget),
754 differenceBetweenCodePtr(returnAddress, slowPathTarget));
755 }
756 m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
757 for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
758 CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
759 info.callType = m_callStructureStubCompilationInfo[i].callType;
760 info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
761 info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
762 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
763 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
764 info.calleeGPR = regT0;
765 }
766
767 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
768 if (canBeOptimizedOrInlined()
769 #if ENABLE(LLINT)
770 || true
771 #endif
772 ) {
773 CompactJITCodeMap::Encoder jitCodeMapEncoder;
774 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
775 if (m_labels[bytecodeOffset].isSet())
776 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
777 }
778 m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
779 }
780 #endif
781
782 if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
783 *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
784
785 if (Options::showDisassembly())
786 m_disassembler->dump(patchBuffer);
787 if (m_compilation)
788 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
789
790 CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
791
792 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
793 static_cast<double>(result.size()) /
794 static_cast<double>(m_codeBlock->instructions().size()));
795
796 m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
797
798 #if ENABLE(JIT_VERBOSE)
799 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
800 #endif
801
802 return JITCode(result, JITCode::BaselineJIT);
803 }
804
805 void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
806 {
807 RepatchBuffer repatchBuffer(callerCodeBlock);
808
809 ASSERT(!callLinkInfo->isLinked());
810 callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
811 callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
812 repatchBuffer.relink(callLinkInfo->hotPathOther, code);
813
814 if (calleeCodeBlock)
815 calleeCodeBlock->linkIncomingCall(callLinkInfo);
816
817 // Patch the slow patch so we do not continue to try to link.
818 if (kind == CodeForCall) {
819 ASSERT(callLinkInfo->callType == CallLinkInfo::Call
820 || callLinkInfo->callType == CallLinkInfo::CallVarargs);
821 if (callLinkInfo->callType == CallLinkInfo::Call) {
822 repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallGenerator).code());
823 return;
824 }
825
826 repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallGenerator).code());
827 return;
828 }
829
830 ASSERT(kind == CodeForConstruct);
831 repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructGenerator).code());
832 }
833
834 void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
835 {
836 RepatchBuffer repatchBuffer(callerCodeBlock);
837
838 repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallGenerator).code());
839 }
840
841 } // namespace JSC
842
843 #endif // ENABLE(JIT)