]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #if ENABLE(JIT) | |
29 | ||
30 | #include "JIT.h" | |
31 | ||
32 | // This probably does not belong here; adding here for now as a quick Windows build fix. | |
33 | #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) | |
34 | #include "MacroAssembler.h" | |
35 | JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; | |
36 | #endif | |
37 | ||
38 | #include "ArityCheckFailReturnThunks.h" | |
39 | #include "CodeBlock.h" | |
40 | #include "DFGCapabilities.h" | |
41 | #include "Interpreter.h" | |
42 | #include "JITInlines.h" | |
43 | #include "JITOperations.h" | |
44 | #include "JSArray.h" | |
45 | #include "JSFunction.h" | |
46 | #include "LinkBuffer.h" | |
47 | #include "MaxFrameExtentForSlowPathCall.h" | |
48 | #include "JSCInlines.h" | |
49 | #include "ProfilerDatabase.h" | |
50 | #include "RepatchBuffer.h" | |
51 | #include "ResultType.h" | |
52 | #include "SamplingTool.h" | |
53 | #include "SlowPathCall.h" | |
54 | #include "StackAlignment.h" | |
55 | #include <wtf/CryptographicallyRandomNumber.h> | |
56 | ||
57 | using namespace std; | |
58 | ||
59 | namespace JSC { | |
60 | ||
61 | void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) | |
62 | { | |
63 | RepatchBuffer repatchBuffer(codeblock); | |
64 | repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); | |
65 | } | |
66 | ||
67 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) | |
68 | { | |
69 | RepatchBuffer repatchBuffer(codeblock); | |
70 | repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); | |
71 | } | |
72 | ||
73 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) | |
74 | { | |
75 | RepatchBuffer repatchBuffer(codeblock); | |
76 | repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); | |
77 | } | |
78 | ||
79 | JIT::JIT(VM* vm, CodeBlock* codeBlock) | |
80 | : JSInterfaceJIT(vm, codeBlock) | |
81 | , m_interpreter(vm->interpreter) | |
82 | , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) | |
83 | , m_bytecodeOffset((unsigned)-1) | |
84 | , m_getByIdIndex(UINT_MAX) | |
85 | , m_putByIdIndex(UINT_MAX) | |
86 | , m_byValInstructionIndex(UINT_MAX) | |
87 | , m_callLinkInfoIndex(UINT_MAX) | |
88 | , m_randomGenerator(cryptographicallyRandomNumber()) | |
89 | , m_canBeOptimized(false) | |
90 | , m_shouldEmitProfiling(false) | |
91 | { | |
92 | } | |
93 | ||
94 | #if ENABLE(DFG_JIT) | |
95 | void JIT::emitEnterOptimizationCheck() | |
96 | { | |
97 | if (!canBeOptimized()) | |
98 | return; | |
99 | ||
100 | JumpList skipOptimize; | |
101 | ||
102 | skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); | |
103 | ASSERT(!m_bytecodeOffset); | |
104 | callOperation(operationOptimize, m_bytecodeOffset); | |
105 | skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); | |
106 | move(returnValueGPR2, stackPointerRegister); | |
107 | jump(returnValueGPR); | |
108 | skipOptimize.link(this); | |
109 | } | |
110 | #endif | |
111 | ||
112 | #define NEXT_OPCODE(name) \ | |
113 | m_bytecodeOffset += OPCODE_LENGTH(name); \ | |
114 | break; | |
115 | ||
116 | #define DEFINE_SLOW_OP(name) \ | |
117 | case op_##name: { \ | |
118 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ | |
119 | slowPathCall.call(); \ | |
120 | NEXT_OPCODE(op_##name); \ | |
121 | } | |
122 | ||
123 | #define DEFINE_OP(name) \ | |
124 | case name: { \ | |
125 | emit_##name(currentInstruction); \ | |
126 | NEXT_OPCODE(name); \ | |
127 | } | |
128 | ||
129 | #define DEFINE_SLOWCASE_OP(name) \ | |
130 | case name: { \ | |
131 | emitSlow_##name(currentInstruction, iter); \ | |
132 | NEXT_OPCODE(name); \ | |
133 | } | |
134 | ||
135 | void JIT::privateCompileMainPass() | |
136 | { | |
137 | jitAssertTagsInPlace(); | |
138 | jitAssertArgumentCountSane(); | |
139 | ||
140 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); | |
141 | unsigned instructionCount = m_codeBlock->instructions().size(); | |
142 | ||
143 | m_callLinkInfoIndex = 0; | |
144 | ||
145 | for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { | |
146 | if (m_disassembler) | |
147 | m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); | |
148 | Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; | |
149 | ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); | |
150 | ||
151 | #if ENABLE(OPCODE_SAMPLING) | |
152 | if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. | |
153 | sampleInstruction(currentInstruction); | |
154 | #endif | |
155 | ||
156 | m_labels[m_bytecodeOffset] = label(); | |
157 | ||
158 | #if ENABLE(JIT_VERBOSE) | |
159 | dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); | |
160 | #endif | |
161 | ||
162 | OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode); | |
163 | ||
164 | if (m_compilation) { | |
165 | add64( | |
166 | TrustedImm32(1), | |
167 | AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( | |
168 | m_compilation->bytecodes(), m_bytecodeOffset)))->address())); | |
169 | } | |
170 | ||
171 | if (Options::eagerlyUpdateTopCallFrame()) | |
172 | updateTopCallFrame(); | |
173 | ||
174 | switch (opcodeID) { | |
175 | DEFINE_SLOW_OP(del_by_val) | |
176 | DEFINE_SLOW_OP(in) | |
177 | DEFINE_SLOW_OP(less) | |
178 | DEFINE_SLOW_OP(lesseq) | |
179 | DEFINE_SLOW_OP(greater) | |
180 | DEFINE_SLOW_OP(greatereq) | |
181 | DEFINE_SLOW_OP(is_function) | |
182 | DEFINE_SLOW_OP(is_object) | |
183 | DEFINE_SLOW_OP(typeof) | |
184 | ||
185 | DEFINE_OP(op_touch_entry) | |
186 | DEFINE_OP(op_add) | |
187 | DEFINE_OP(op_bitand) | |
188 | DEFINE_OP(op_bitor) | |
189 | DEFINE_OP(op_bitxor) | |
190 | DEFINE_OP(op_call) | |
191 | DEFINE_OP(op_call_eval) | |
192 | DEFINE_OP(op_call_varargs) | |
193 | DEFINE_OP(op_construct_varargs) | |
194 | DEFINE_OP(op_catch) | |
195 | DEFINE_OP(op_construct) | |
196 | DEFINE_OP(op_get_callee) | |
197 | DEFINE_OP(op_create_this) | |
198 | DEFINE_OP(op_to_this) | |
199 | DEFINE_OP(op_init_lazy_reg) | |
200 | DEFINE_OP(op_create_arguments) | |
201 | DEFINE_OP(op_debug) | |
202 | DEFINE_OP(op_del_by_id) | |
203 | DEFINE_OP(op_div) | |
204 | DEFINE_OP(op_end) | |
205 | DEFINE_OP(op_enter) | |
206 | DEFINE_OP(op_create_activation) | |
207 | DEFINE_OP(op_eq) | |
208 | DEFINE_OP(op_eq_null) | |
209 | case op_get_by_id_out_of_line: | |
210 | case op_get_array_length: | |
211 | DEFINE_OP(op_get_by_id) | |
212 | DEFINE_OP(op_get_arguments_length) | |
213 | DEFINE_OP(op_get_by_val) | |
214 | DEFINE_OP(op_get_argument_by_val) | |
215 | DEFINE_OP(op_get_by_pname) | |
216 | DEFINE_OP(op_get_pnames) | |
217 | DEFINE_OP(op_check_has_instance) | |
218 | DEFINE_OP(op_instanceof) | |
219 | DEFINE_OP(op_is_undefined) | |
220 | DEFINE_OP(op_is_boolean) | |
221 | DEFINE_OP(op_is_number) | |
222 | DEFINE_OP(op_is_string) | |
223 | DEFINE_OP(op_jeq_null) | |
224 | DEFINE_OP(op_jfalse) | |
225 | DEFINE_OP(op_jmp) | |
226 | DEFINE_OP(op_jneq_null) | |
227 | DEFINE_OP(op_jneq_ptr) | |
228 | DEFINE_OP(op_jless) | |
229 | DEFINE_OP(op_jlesseq) | |
230 | DEFINE_OP(op_jgreater) | |
231 | DEFINE_OP(op_jgreatereq) | |
232 | DEFINE_OP(op_jnless) | |
233 | DEFINE_OP(op_jnlesseq) | |
234 | DEFINE_OP(op_jngreater) | |
235 | DEFINE_OP(op_jngreatereq) | |
236 | DEFINE_OP(op_jtrue) | |
237 | DEFINE_OP(op_loop_hint) | |
238 | DEFINE_OP(op_lshift) | |
239 | DEFINE_OP(op_mod) | |
240 | DEFINE_OP(op_captured_mov) | |
241 | DEFINE_OP(op_mov) | |
242 | DEFINE_OP(op_mul) | |
243 | DEFINE_OP(op_negate) | |
244 | DEFINE_OP(op_neq) | |
245 | DEFINE_OP(op_neq_null) | |
246 | DEFINE_OP(op_new_array) | |
247 | DEFINE_OP(op_new_array_with_size) | |
248 | DEFINE_OP(op_new_array_buffer) | |
249 | DEFINE_OP(op_new_func) | |
250 | DEFINE_OP(op_new_captured_func) | |
251 | DEFINE_OP(op_new_func_exp) | |
252 | DEFINE_OP(op_new_object) | |
253 | DEFINE_OP(op_new_regexp) | |
254 | DEFINE_OP(op_next_pname) | |
255 | DEFINE_OP(op_not) | |
256 | DEFINE_OP(op_nstricteq) | |
257 | DEFINE_OP(op_pop_scope) | |
258 | DEFINE_OP(op_dec) | |
259 | DEFINE_OP(op_inc) | |
260 | DEFINE_OP(op_profile_did_call) | |
261 | DEFINE_OP(op_profile_will_call) | |
262 | DEFINE_OP(op_push_name_scope) | |
263 | DEFINE_OP(op_push_with_scope) | |
264 | case op_put_by_id_out_of_line: | |
265 | case op_put_by_id_transition_direct: | |
266 | case op_put_by_id_transition_normal: | |
267 | case op_put_by_id_transition_direct_out_of_line: | |
268 | case op_put_by_id_transition_normal_out_of_line: | |
269 | DEFINE_OP(op_put_by_id) | |
270 | DEFINE_OP(op_put_by_index) | |
271 | case op_put_by_val_direct: | |
272 | DEFINE_OP(op_put_by_val) | |
273 | DEFINE_OP(op_put_getter_setter) | |
274 | case op_init_global_const_nop: | |
275 | NEXT_OPCODE(op_init_global_const_nop); | |
276 | DEFINE_OP(op_init_global_const) | |
277 | ||
278 | DEFINE_OP(op_ret) | |
279 | DEFINE_OP(op_ret_object_or_this) | |
280 | DEFINE_OP(op_rshift) | |
281 | DEFINE_OP(op_unsigned) | |
282 | DEFINE_OP(op_urshift) | |
283 | DEFINE_OP(op_strcat) | |
284 | DEFINE_OP(op_stricteq) | |
285 | DEFINE_OP(op_sub) | |
286 | DEFINE_OP(op_switch_char) | |
287 | DEFINE_OP(op_switch_imm) | |
288 | DEFINE_OP(op_switch_string) | |
289 | DEFINE_OP(op_tear_off_activation) | |
290 | DEFINE_OP(op_tear_off_arguments) | |
291 | DEFINE_OP(op_throw) | |
292 | DEFINE_OP(op_throw_static_error) | |
293 | DEFINE_OP(op_to_number) | |
294 | DEFINE_OP(op_to_primitive) | |
295 | ||
296 | DEFINE_OP(op_resolve_scope) | |
297 | DEFINE_OP(op_get_from_scope) | |
298 | DEFINE_OP(op_put_to_scope) | |
299 | default: | |
300 | RELEASE_ASSERT_NOT_REACHED(); | |
301 | } | |
302 | } | |
303 | ||
304 | RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); | |
305 | ||
306 | #ifndef NDEBUG | |
307 | // Reset this, in order to guard its use with ASSERTs. | |
308 | m_bytecodeOffset = (unsigned)-1; | |
309 | #endif | |
310 | } | |
311 | ||
312 | void JIT::privateCompileLinkPass() | |
313 | { | |
314 | unsigned jmpTableCount = m_jmpTable.size(); | |
315 | for (unsigned i = 0; i < jmpTableCount; ++i) | |
316 | m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); | |
317 | m_jmpTable.clear(); | |
318 | } | |
319 | ||
320 | void JIT::privateCompileSlowCases() | |
321 | { | |
322 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); | |
323 | ||
324 | m_getByIdIndex = 0; | |
325 | m_putByIdIndex = 0; | |
326 | m_byValInstructionIndex = 0; | |
327 | m_callLinkInfoIndex = 0; | |
328 | ||
329 | // Use this to assert that slow-path code associates new profiling sites with existing | |
330 | // ValueProfiles rather than creating new ones. This ensures that for a given instruction | |
331 | // (say, get_by_id) we get combined statistics for both the fast-path executions of that | |
332 | // instructions and the slow-path executions. Furthermore, if the slow-path code created | |
333 | // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset, | |
334 | // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset(). | |
335 | unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles(); | |
336 | ||
337 | for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { | |
338 | m_bytecodeOffset = iter->to; | |
339 | ||
340 | unsigned firstTo = m_bytecodeOffset; | |
341 | ||
342 | Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; | |
343 | ||
344 | RareCaseProfile* rareCaseProfile = 0; | |
345 | if (shouldEmitProfiling()) | |
346 | rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); | |
347 | ||
348 | #if ENABLE(JIT_VERBOSE) | |
349 | dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); | |
350 | #endif | |
351 | ||
352 | if (m_disassembler) | |
353 | m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label()); | |
354 | ||
355 | switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { | |
356 | DEFINE_SLOWCASE_OP(op_add) | |
357 | DEFINE_SLOWCASE_OP(op_bitand) | |
358 | DEFINE_SLOWCASE_OP(op_bitor) | |
359 | DEFINE_SLOWCASE_OP(op_bitxor) | |
360 | DEFINE_SLOWCASE_OP(op_call) | |
361 | DEFINE_SLOWCASE_OP(op_call_eval) | |
362 | DEFINE_SLOWCASE_OP(op_call_varargs) | |
363 | DEFINE_SLOWCASE_OP(op_construct_varargs) | |
364 | DEFINE_SLOWCASE_OP(op_construct) | |
365 | DEFINE_SLOWCASE_OP(op_to_this) | |
366 | DEFINE_SLOWCASE_OP(op_create_this) | |
367 | DEFINE_SLOWCASE_OP(op_captured_mov) | |
368 | DEFINE_SLOWCASE_OP(op_div) | |
369 | DEFINE_SLOWCASE_OP(op_eq) | |
370 | DEFINE_SLOWCASE_OP(op_get_callee) | |
371 | case op_get_by_id_out_of_line: | |
372 | case op_get_array_length: | |
373 | DEFINE_SLOWCASE_OP(op_get_by_id) | |
374 | DEFINE_SLOWCASE_OP(op_get_arguments_length) | |
375 | DEFINE_SLOWCASE_OP(op_get_by_val) | |
376 | DEFINE_SLOWCASE_OP(op_get_argument_by_val) | |
377 | DEFINE_SLOWCASE_OP(op_get_by_pname) | |
378 | DEFINE_SLOWCASE_OP(op_check_has_instance) | |
379 | DEFINE_SLOWCASE_OP(op_instanceof) | |
380 | DEFINE_SLOWCASE_OP(op_jfalse) | |
381 | DEFINE_SLOWCASE_OP(op_jless) | |
382 | DEFINE_SLOWCASE_OP(op_jlesseq) | |
383 | DEFINE_SLOWCASE_OP(op_jgreater) | |
384 | DEFINE_SLOWCASE_OP(op_jgreatereq) | |
385 | DEFINE_SLOWCASE_OP(op_jnless) | |
386 | DEFINE_SLOWCASE_OP(op_jnlesseq) | |
387 | DEFINE_SLOWCASE_OP(op_jngreater) | |
388 | DEFINE_SLOWCASE_OP(op_jngreatereq) | |
389 | DEFINE_SLOWCASE_OP(op_jtrue) | |
390 | DEFINE_SLOWCASE_OP(op_loop_hint) | |
391 | DEFINE_SLOWCASE_OP(op_lshift) | |
392 | DEFINE_SLOWCASE_OP(op_mod) | |
393 | DEFINE_SLOWCASE_OP(op_mul) | |
394 | DEFINE_SLOWCASE_OP(op_negate) | |
395 | DEFINE_SLOWCASE_OP(op_neq) | |
396 | DEFINE_SLOWCASE_OP(op_new_object) | |
397 | DEFINE_SLOWCASE_OP(op_not) | |
398 | DEFINE_SLOWCASE_OP(op_nstricteq) | |
399 | DEFINE_SLOWCASE_OP(op_dec) | |
400 | DEFINE_SLOWCASE_OP(op_inc) | |
401 | case op_put_by_id_out_of_line: | |
402 | case op_put_by_id_transition_direct: | |
403 | case op_put_by_id_transition_normal: | |
404 | case op_put_by_id_transition_direct_out_of_line: | |
405 | case op_put_by_id_transition_normal_out_of_line: | |
406 | DEFINE_SLOWCASE_OP(op_put_by_id) | |
407 | case op_put_by_val_direct: | |
408 | DEFINE_SLOWCASE_OP(op_put_by_val) | |
409 | DEFINE_SLOWCASE_OP(op_rshift) | |
410 | DEFINE_SLOWCASE_OP(op_unsigned) | |
411 | DEFINE_SLOWCASE_OP(op_urshift) | |
412 | DEFINE_SLOWCASE_OP(op_stricteq) | |
413 | DEFINE_SLOWCASE_OP(op_sub) | |
414 | DEFINE_SLOWCASE_OP(op_to_number) | |
415 | DEFINE_SLOWCASE_OP(op_to_primitive) | |
416 | ||
417 | DEFINE_SLOWCASE_OP(op_resolve_scope) | |
418 | DEFINE_SLOWCASE_OP(op_get_from_scope) | |
419 | DEFINE_SLOWCASE_OP(op_put_to_scope) | |
420 | ||
421 | default: | |
422 | RELEASE_ASSERT_NOT_REACHED(); | |
423 | } | |
424 | ||
425 | RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen."); | |
426 | RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); | |
427 | ||
428 | if (shouldEmitProfiling()) | |
429 | add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); | |
430 | ||
431 | emitJumpSlowToHot(jump(), 0); | |
432 | } | |
433 | ||
434 | RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); | |
435 | RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); | |
436 | RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); | |
437 | RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); | |
438 | ||
439 | #ifndef NDEBUG | |
440 | // Reset this, in order to guard its use with ASSERTs. | |
441 | m_bytecodeOffset = (unsigned)-1; | |
442 | #endif | |
443 | } | |
444 | ||
445 | CompilationResult JIT::privateCompile(JITCompilationEffort effort) | |
446 | { | |
447 | DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); | |
448 | switch (level) { | |
449 | case DFG::CannotCompile: | |
450 | m_canBeOptimized = false; | |
451 | m_canBeOptimizedOrInlined = false; | |
452 | m_shouldEmitProfiling = false; | |
453 | break; | |
454 | case DFG::CanInline: | |
455 | m_canBeOptimized = false; | |
456 | m_canBeOptimizedOrInlined = true; | |
457 | m_shouldEmitProfiling = true; | |
458 | break; | |
459 | case DFG::CanCompile: | |
460 | case DFG::CanCompileAndInline: | |
461 | m_canBeOptimized = true; | |
462 | m_canBeOptimizedOrInlined = true; | |
463 | m_shouldEmitProfiling = true; | |
464 | break; | |
465 | default: | |
466 | RELEASE_ASSERT_NOT_REACHED(); | |
467 | break; | |
468 | } | |
469 | ||
470 | switch (m_codeBlock->codeType()) { | |
471 | case GlobalCode: | |
472 | case EvalCode: | |
473 | m_codeBlock->m_shouldAlwaysBeInlined = false; | |
474 | break; | |
475 | case FunctionCode: | |
476 | // We could have already set it to false because we detected an uninlineable call. | |
477 | // Don't override that observation. | |
478 | m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); | |
479 | break; | |
480 | } | |
481 | ||
482 | if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) | |
483 | m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); | |
484 | if (m_vm->m_perBytecodeProfiler) { | |
485 | m_compilation = adoptRef( | |
486 | new Profiler::Compilation( | |
487 | m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), | |
488 | Profiler::Baseline)); | |
489 | m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); | |
490 | } | |
491 | ||
492 | if (m_disassembler) | |
493 | m_disassembler->setStartOfCode(label()); | |
494 | ||
495 | // Just add a little bit of randomness to the codegen | |
496 | if (m_randomGenerator.getUint32() & 1) | |
497 | nop(); | |
498 | ||
499 | emitFunctionPrologue(); | |
500 | emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); | |
501 | ||
502 | Label beginLabel(this); | |
503 | ||
504 | sampleCodeBlock(m_codeBlock); | |
505 | #if ENABLE(OPCODE_SAMPLING) | |
506 | sampleInstruction(m_codeBlock->instructions().begin()); | |
507 | #endif | |
508 | ||
509 | Jump stackOverflow; | |
510 | if (m_codeBlock->codeType() == FunctionCode) { | |
511 | ASSERT(m_bytecodeOffset == (unsigned)-1); | |
512 | if (shouldEmitProfiling()) { | |
513 | for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { | |
514 | // If this is a constructor, then we want to put in a dummy profiling site (to | |
515 | // keep things consistent) but we don't actually want to record the dummy value. | |
516 | if (m_codeBlock->m_isConstructor && !argument) | |
517 | continue; | |
518 | int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); | |
519 | #if USE(JSVALUE64) | |
520 | load64(Address(callFrameRegister, offset), regT0); | |
521 | #elif USE(JSVALUE32_64) | |
522 | load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); | |
523 | load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); | |
524 | #endif | |
525 | emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); | |
526 | } | |
527 | } | |
528 | ||
529 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); | |
530 | stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); | |
531 | } | |
532 | ||
533 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
534 | checkStackPointerAlignment(); | |
535 | ||
536 | privateCompileMainPass(); | |
537 | privateCompileLinkPass(); | |
538 | privateCompileSlowCases(); | |
539 | ||
540 | if (m_disassembler) | |
541 | m_disassembler->setEndOfSlowPath(label()); | |
542 | ||
543 | Label arityCheck; | |
544 | if (m_codeBlock->codeType() == FunctionCode) { | |
545 | stackOverflow.link(this); | |
546 | m_bytecodeOffset = 0; | |
547 | if (maxFrameExtentForSlowPathCall) | |
548 | addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); | |
549 | callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); | |
550 | ||
551 | arityCheck = label(); | |
552 | store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); | |
553 | emitFunctionPrologue(); | |
554 | emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); | |
555 | ||
556 | load32(payloadFor(JSStack::ArgumentCount), regT1); | |
557 | branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); | |
558 | ||
559 | m_bytecodeOffset = 0; | |
560 | ||
561 | if (maxFrameExtentForSlowPathCall) | |
562 | addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); | |
563 | callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); | |
564 | if (maxFrameExtentForSlowPathCall) | |
565 | addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); | |
566 | if (returnValueGPR != regT0) | |
567 | move(returnValueGPR, regT0); | |
568 | branchTest32(Zero, regT0).linkTo(beginLabel, this); | |
569 | GPRReg thunkReg; | |
570 | #if USE(JSVALUE64) | |
571 | thunkReg = GPRInfo::regT7; | |
572 | #else | |
573 | thunkReg = GPRInfo::regT5; | |
574 | #endif | |
575 | move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg); | |
576 | loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg); | |
577 | emitNakedCall(m_vm->getCTIStub(arityFixup).code()); | |
578 | ||
579 | #if !ASSERT_DISABLED | |
580 | m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. | |
581 | #endif | |
582 | ||
583 | jump(beginLabel); | |
584 | } | |
585 | ||
586 | ASSERT(m_jmpTable.isEmpty()); | |
587 | ||
588 | privateCompileExceptionHandlers(); | |
589 | ||
590 | if (m_disassembler) | |
591 | m_disassembler->setEndOfCode(label()); | |
592 | ||
593 | LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); | |
594 | if (patchBuffer.didFailToAllocate()) | |
595 | return CompilationFailed; | |
596 | ||
597 | // Translate vPC offsets into addresses in JIT generated code, for switch tables. | |
598 | for (unsigned i = 0; i < m_switches.size(); ++i) { | |
599 | SwitchRecord record = m_switches[i]; | |
600 | unsigned bytecodeOffset = record.bytecodeOffset; | |
601 | ||
602 | if (record.type != SwitchRecord::String) { | |
603 | ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); | |
604 | ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); | |
605 | ||
606 | record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); | |
607 | ||
608 | for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { | |
609 | unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; | |
610 | record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; | |
611 | } | |
612 | } else { | |
613 | ASSERT(record.type == SwitchRecord::String); | |
614 | ||
615 | record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); | |
616 | ||
617 | StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); | |
618 | for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { | |
619 | unsigned offset = it->value.branchOffset; | |
620 | it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; | |
621 | } | |
622 | } | |
623 | } | |
624 | ||
625 | for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { | |
626 | HandlerInfo& handler = m_codeBlock->exceptionHandler(i); | |
627 | handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); | |
628 | } | |
629 | ||
630 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
631 | if (iter->to) | |
632 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
633 | } | |
634 | ||
635 | for (unsigned i = m_getByIds.size(); i--;) | |
636 | m_getByIds[i].finalize(patchBuffer); | |
637 | for (unsigned i = m_putByIds.size(); i--;) | |
638 | m_putByIds[i].finalize(patchBuffer); | |
639 | ||
640 | m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); | |
641 | for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { | |
642 | CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); | |
643 | CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); | |
644 | CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); | |
645 | CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); | |
646 | ||
647 | m_codeBlock->byValInfo(i) = ByValInfo( | |
648 | m_byValCompilationInfo[i].bytecodeIndex, | |
649 | badTypeJump, | |
650 | m_byValCompilationInfo[i].arrayMode, | |
651 | differenceBetweenCodePtr(badTypeJump, doneTarget), | |
652 | differenceBetweenCodePtr(returnAddress, slowPathTarget)); | |
653 | } | |
654 | for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { | |
655 | CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; | |
656 | CallLinkInfo& info = *compilationInfo.callLinkInfo; | |
657 | info.callReturnLocation = patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation); | |
658 | info.hotPathBegin = patchBuffer.locationOf(compilationInfo.hotPathBegin); | |
659 | info.hotPathOther = patchBuffer.locationOfNearCall(compilationInfo.hotPathOther); | |
660 | } | |
661 | ||
662 | CompactJITCodeMap::Encoder jitCodeMapEncoder; | |
663 | for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { | |
664 | if (m_labels[bytecodeOffset].isSet()) | |
665 | jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); | |
666 | } | |
667 | m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); | |
668 | ||
669 | MacroAssemblerCodePtr withArityCheck; | |
670 | if (m_codeBlock->codeType() == FunctionCode) | |
671 | withArityCheck = patchBuffer.locationOf(arityCheck); | |
672 | ||
673 | if (Options::showDisassembly()) | |
674 | m_disassembler->dump(patchBuffer); | |
675 | if (m_compilation) { | |
676 | m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); | |
677 | m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); | |
678 | } | |
679 | ||
680 | CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); | |
681 | ||
682 | m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( | |
683 | static_cast<double>(result.size()) / | |
684 | static_cast<double>(m_codeBlock->instructions().size())); | |
685 | ||
686 | m_codeBlock->shrinkToFit(CodeBlock::LateShrink); | |
687 | m_codeBlock->setJITCode( | |
688 | adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); | |
689 | ||
690 | #if ENABLE(JIT_VERBOSE) | |
691 | dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); | |
692 | #endif | |
693 | ||
694 | return CompilationSuccessful; | |
695 | } | |
696 | ||
697 | void JIT::privateCompileExceptionHandlers() | |
698 | { | |
699 | if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) | |
700 | return; | |
701 | ||
702 | Jump doLookup; | |
703 | ||
704 | if (!m_exceptionChecksWithCallFrameRollback.empty()) { | |
705 | m_exceptionChecksWithCallFrameRollback.link(this); | |
706 | emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1); | |
707 | doLookup = jump(); | |
708 | } | |
709 | ||
710 | if (!m_exceptionChecks.empty()) | |
711 | m_exceptionChecks.link(this); | |
712 | ||
713 | // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). | |
714 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); | |
715 | ||
716 | if (doLookup.isSet()) | |
717 | doLookup.link(this); | |
718 | ||
719 | move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); | |
720 | ||
721 | #if CPU(X86) | |
722 | // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! | |
723 | poke(GPRInfo::argumentGPR0); | |
724 | poke(GPRInfo::argumentGPR1, 1); | |
725 | #endif | |
726 | m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value())); | |
727 | jumpToExceptionHandler(); | |
728 | } | |
729 | ||
730 | unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) | |
731 | { | |
732 | ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters))); | |
733 | ||
734 | return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters); | |
735 | } | |
736 | ||
737 | int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) | |
738 | { | |
739 | return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); | |
740 | } | |
741 | ||
742 | } // namespace JSC | |
743 | ||
744 | #endif // ENABLE(JIT) |