]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #if ENABLE(JIT) | |
29 | ||
30 | #include "JIT.h" | |
31 | ||
32 | #include "ArityCheckFailReturnThunks.h" | |
33 | #include "CodeBlock.h" | |
34 | #include "CodeBlockWithJITType.h" | |
35 | #include "DFGCapabilities.h" | |
36 | #include "Interpreter.h" | |
37 | #include "JITInlines.h" | |
38 | #include "JITOperations.h" | |
39 | #include "JSArray.h" | |
40 | #include "JSFunction.h" | |
41 | #include "LinkBuffer.h" | |
42 | #include "MaxFrameExtentForSlowPathCall.h" | |
43 | #include "JSCInlines.h" | |
44 | #include "ProfilerDatabase.h" | |
45 | #include "RepatchBuffer.h" | |
46 | #include "ResultType.h" | |
47 | #include "SamplingTool.h" | |
48 | #include "SlowPathCall.h" | |
49 | #include "StackAlignment.h" | |
50 | #include "TypeProfilerLog.h" | |
51 | #include <wtf/CryptographicallyRandomNumber.h> | |
52 | ||
53 | using namespace std; | |
54 | ||
55 | namespace JSC { | |
56 | ||
57 | void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) | |
58 | { | |
59 | RepatchBuffer repatchBuffer(codeblock); | |
60 | repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); | |
61 | } | |
62 | ||
63 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) | |
64 | { | |
65 | RepatchBuffer repatchBuffer(codeblock); | |
66 | repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); | |
67 | } | |
68 | ||
69 | void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) | |
70 | { | |
71 | RepatchBuffer repatchBuffer(codeblock); | |
72 | repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); | |
73 | } | |
74 | ||
75 | JIT::JIT(VM* vm, CodeBlock* codeBlock) | |
76 | : JSInterfaceJIT(vm, codeBlock) | |
77 | , m_interpreter(vm->interpreter) | |
78 | , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) | |
79 | , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) | |
80 | , m_getByIdIndex(UINT_MAX) | |
81 | , m_putByIdIndex(UINT_MAX) | |
82 | , m_byValInstructionIndex(UINT_MAX) | |
83 | , m_callLinkInfoIndex(UINT_MAX) | |
84 | , m_randomGenerator(cryptographicallyRandomNumber()) | |
85 | , m_canBeOptimized(false) | |
86 | , m_shouldEmitProfiling(false) | |
87 | { | |
88 | } | |
89 | ||
90 | #if ENABLE(DFG_JIT) | |
91 | void JIT::emitEnterOptimizationCheck() | |
92 | { | |
93 | if (!canBeOptimized()) | |
94 | return; | |
95 | ||
96 | JumpList skipOptimize; | |
97 | ||
98 | skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); | |
99 | ASSERT(!m_bytecodeOffset); | |
100 | callOperation(operationOptimize, m_bytecodeOffset); | |
101 | skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); | |
102 | move(returnValueGPR2, stackPointerRegister); | |
103 | jump(returnValueGPR); | |
104 | skipOptimize.link(this); | |
105 | } | |
106 | #endif | |
107 | ||
108 | void JIT::emitNotifyWrite(WatchpointSet* set) | |
109 | { | |
110 | if (!set || set->state() == IsInvalidated) | |
111 | return; | |
112 | ||
113 | addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); | |
114 | } | |
115 | ||
116 | void JIT::assertStackPointerOffset() | |
117 | { | |
118 | if (ASSERT_DISABLED) | |
119 | return; | |
120 | ||
121 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); | |
122 | Jump ok = branchPtr(Equal, regT0, stackPointerRegister); | |
123 | breakpoint(); | |
124 | ok.link(this); | |
125 | } | |
126 | ||
127 | #define NEXT_OPCODE(name) \ | |
128 | m_bytecodeOffset += OPCODE_LENGTH(name); \ | |
129 | break; | |
130 | ||
131 | #define DEFINE_SLOW_OP(name) \ | |
132 | case op_##name: { \ | |
133 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ | |
134 | slowPathCall.call(); \ | |
135 | NEXT_OPCODE(op_##name); \ | |
136 | } | |
137 | ||
138 | #define DEFINE_OP(name) \ | |
139 | case name: { \ | |
140 | emit_##name(currentInstruction); \ | |
141 | NEXT_OPCODE(name); \ | |
142 | } | |
143 | ||
144 | #define DEFINE_SLOWCASE_OP(name) \ | |
145 | case name: { \ | |
146 | emitSlow_##name(currentInstruction, iter); \ | |
147 | NEXT_OPCODE(name); \ | |
148 | } | |
149 | ||
150 | void JIT::privateCompileMainPass() | |
151 | { | |
152 | jitAssertTagsInPlace(); | |
153 | jitAssertArgumentCountSane(); | |
154 | ||
155 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); | |
156 | unsigned instructionCount = m_codeBlock->instructions().size(); | |
157 | ||
158 | m_callLinkInfoIndex = 0; | |
159 | ||
160 | for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { | |
161 | if (m_disassembler) | |
162 | m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); | |
163 | Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; | |
164 | ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); | |
165 | ||
166 | #if ENABLE(OPCODE_SAMPLING) | |
167 | if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. | |
168 | sampleInstruction(currentInstruction); | |
169 | #endif | |
170 | ||
171 | m_labels[m_bytecodeOffset] = label(); | |
172 | ||
173 | #if ENABLE(JIT_VERBOSE) | |
174 | dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); | |
175 | #endif | |
176 | ||
177 | OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode); | |
178 | ||
179 | if (m_compilation) { | |
180 | add64( | |
181 | TrustedImm32(1), | |
182 | AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( | |
183 | m_compilation->bytecodes(), m_bytecodeOffset)))->address())); | |
184 | } | |
185 | ||
186 | if (Options::eagerlyUpdateTopCallFrame()) | |
187 | updateTopCallFrame(); | |
188 | ||
189 | switch (opcodeID) { | |
190 | DEFINE_SLOW_OP(del_by_val) | |
191 | DEFINE_SLOW_OP(in) | |
192 | DEFINE_SLOW_OP(less) | |
193 | DEFINE_SLOW_OP(lesseq) | |
194 | DEFINE_SLOW_OP(greater) | |
195 | DEFINE_SLOW_OP(greatereq) | |
196 | DEFINE_SLOW_OP(is_function) | |
197 | DEFINE_SLOW_OP(is_object_or_null) | |
198 | DEFINE_SLOW_OP(typeof) | |
199 | ||
200 | DEFINE_OP(op_add) | |
201 | DEFINE_OP(op_bitand) | |
202 | DEFINE_OP(op_bitor) | |
203 | DEFINE_OP(op_bitxor) | |
204 | DEFINE_OP(op_call) | |
205 | DEFINE_OP(op_call_eval) | |
206 | DEFINE_OP(op_call_varargs) | |
207 | DEFINE_OP(op_construct_varargs) | |
208 | DEFINE_OP(op_catch) | |
209 | DEFINE_OP(op_construct) | |
210 | DEFINE_OP(op_create_this) | |
211 | DEFINE_OP(op_to_this) | |
212 | DEFINE_OP(op_create_direct_arguments) | |
213 | DEFINE_OP(op_create_scoped_arguments) | |
214 | DEFINE_OP(op_create_out_of_band_arguments) | |
215 | DEFINE_OP(op_check_tdz) | |
216 | DEFINE_OP(op_debug) | |
217 | DEFINE_OP(op_del_by_id) | |
218 | DEFINE_OP(op_div) | |
219 | DEFINE_OP(op_end) | |
220 | DEFINE_OP(op_enter) | |
221 | DEFINE_OP(op_create_lexical_environment) | |
222 | DEFINE_OP(op_get_scope) | |
223 | DEFINE_OP(op_eq) | |
224 | DEFINE_OP(op_eq_null) | |
225 | case op_get_by_id_out_of_line: | |
226 | case op_get_array_length: | |
227 | DEFINE_OP(op_get_by_id) | |
228 | DEFINE_OP(op_get_by_val) | |
229 | DEFINE_OP(op_check_has_instance) | |
230 | DEFINE_OP(op_instanceof) | |
231 | DEFINE_OP(op_is_undefined) | |
232 | DEFINE_OP(op_is_boolean) | |
233 | DEFINE_OP(op_is_number) | |
234 | DEFINE_OP(op_is_string) | |
235 | DEFINE_OP(op_is_object) | |
236 | DEFINE_OP(op_jeq_null) | |
237 | DEFINE_OP(op_jfalse) | |
238 | DEFINE_OP(op_jmp) | |
239 | DEFINE_OP(op_jneq_null) | |
240 | DEFINE_OP(op_jneq_ptr) | |
241 | DEFINE_OP(op_jless) | |
242 | DEFINE_OP(op_jlesseq) | |
243 | DEFINE_OP(op_jgreater) | |
244 | DEFINE_OP(op_jgreatereq) | |
245 | DEFINE_OP(op_jnless) | |
246 | DEFINE_OP(op_jnlesseq) | |
247 | DEFINE_OP(op_jngreater) | |
248 | DEFINE_OP(op_jngreatereq) | |
249 | DEFINE_OP(op_jtrue) | |
250 | DEFINE_OP(op_loop_hint) | |
251 | DEFINE_OP(op_lshift) | |
252 | DEFINE_OP(op_mod) | |
253 | DEFINE_OP(op_mov) | |
254 | DEFINE_OP(op_mul) | |
255 | DEFINE_OP(op_negate) | |
256 | DEFINE_OP(op_neq) | |
257 | DEFINE_OP(op_neq_null) | |
258 | DEFINE_OP(op_new_array) | |
259 | DEFINE_OP(op_new_array_with_size) | |
260 | DEFINE_OP(op_new_array_buffer) | |
261 | DEFINE_OP(op_new_func) | |
262 | DEFINE_OP(op_new_func_exp) | |
263 | DEFINE_OP(op_new_object) | |
264 | DEFINE_OP(op_new_regexp) | |
265 | DEFINE_OP(op_not) | |
266 | DEFINE_OP(op_nstricteq) | |
267 | DEFINE_OP(op_pop_scope) | |
268 | DEFINE_OP(op_dec) | |
269 | DEFINE_OP(op_inc) | |
270 | DEFINE_OP(op_profile_did_call) | |
271 | DEFINE_OP(op_profile_will_call) | |
272 | DEFINE_OP(op_profile_type) | |
273 | DEFINE_OP(op_profile_control_flow) | |
274 | DEFINE_OP(op_push_name_scope) | |
275 | DEFINE_OP(op_push_with_scope) | |
276 | case op_put_by_id_out_of_line: | |
277 | case op_put_by_id_transition_direct: | |
278 | case op_put_by_id_transition_normal: | |
279 | case op_put_by_id_transition_direct_out_of_line: | |
280 | case op_put_by_id_transition_normal_out_of_line: | |
281 | DEFINE_OP(op_put_by_id) | |
282 | DEFINE_OP(op_put_by_index) | |
283 | case op_put_by_val_direct: | |
284 | DEFINE_OP(op_put_by_val) | |
285 | DEFINE_OP(op_put_getter_by_id) | |
286 | DEFINE_OP(op_put_setter_by_id) | |
287 | DEFINE_OP(op_put_getter_setter) | |
288 | case op_init_global_const_nop: | |
289 | NEXT_OPCODE(op_init_global_const_nop); | |
290 | DEFINE_OP(op_init_global_const) | |
291 | ||
292 | DEFINE_OP(op_ret) | |
293 | DEFINE_OP(op_rshift) | |
294 | DEFINE_OP(op_unsigned) | |
295 | DEFINE_OP(op_urshift) | |
296 | DEFINE_OP(op_strcat) | |
297 | DEFINE_OP(op_stricteq) | |
298 | DEFINE_OP(op_sub) | |
299 | DEFINE_OP(op_switch_char) | |
300 | DEFINE_OP(op_switch_imm) | |
301 | DEFINE_OP(op_switch_string) | |
302 | DEFINE_OP(op_throw) | |
303 | DEFINE_OP(op_throw_static_error) | |
304 | DEFINE_OP(op_to_number) | |
305 | DEFINE_OP(op_to_string) | |
306 | DEFINE_OP(op_to_primitive) | |
307 | ||
308 | DEFINE_OP(op_resolve_scope) | |
309 | DEFINE_OP(op_get_from_scope) | |
310 | DEFINE_OP(op_put_to_scope) | |
311 | DEFINE_OP(op_get_from_arguments) | |
312 | DEFINE_OP(op_put_to_arguments) | |
313 | ||
314 | DEFINE_OP(op_get_enumerable_length) | |
315 | DEFINE_OP(op_has_generic_property) | |
316 | DEFINE_OP(op_has_structure_property) | |
317 | DEFINE_OP(op_has_indexed_property) | |
318 | DEFINE_OP(op_get_direct_pname) | |
319 | DEFINE_OP(op_get_property_enumerator) | |
320 | DEFINE_OP(op_enumerator_structure_pname) | |
321 | DEFINE_OP(op_enumerator_generic_pname) | |
322 | DEFINE_OP(op_to_index_string) | |
323 | default: | |
324 | RELEASE_ASSERT_NOT_REACHED(); | |
325 | } | |
326 | } | |
327 | ||
328 | RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); | |
329 | ||
330 | #ifndef NDEBUG | |
331 | // Reset this, in order to guard its use with ASSERTs. | |
332 | m_bytecodeOffset = std::numeric_limits<unsigned>::max(); | |
333 | #endif | |
334 | } | |
335 | ||
336 | void JIT::privateCompileLinkPass() | |
337 | { | |
338 | unsigned jmpTableCount = m_jmpTable.size(); | |
339 | for (unsigned i = 0; i < jmpTableCount; ++i) | |
340 | m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); | |
341 | m_jmpTable.clear(); | |
342 | } | |
343 | ||
344 | void JIT::privateCompileSlowCases() | |
345 | { | |
346 | Instruction* instructionsBegin = m_codeBlock->instructions().begin(); | |
347 | ||
348 | m_getByIdIndex = 0; | |
349 | m_putByIdIndex = 0; | |
350 | m_byValInstructionIndex = 0; | |
351 | m_callLinkInfoIndex = 0; | |
352 | ||
353 | // Use this to assert that slow-path code associates new profiling sites with existing | |
354 | // ValueProfiles rather than creating new ones. This ensures that for a given instruction | |
355 | // (say, get_by_id) we get combined statistics for both the fast-path executions of that | |
356 | // instructions and the slow-path executions. Furthermore, if the slow-path code created | |
357 | // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset, | |
358 | // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset(). | |
359 | unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles(); | |
360 | ||
361 | for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { | |
362 | m_bytecodeOffset = iter->to; | |
363 | ||
364 | unsigned firstTo = m_bytecodeOffset; | |
365 | ||
366 | Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; | |
367 | ||
368 | RareCaseProfile* rareCaseProfile = 0; | |
369 | if (shouldEmitProfiling()) | |
370 | rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); | |
371 | ||
372 | #if ENABLE(JIT_VERBOSE) | |
373 | dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); | |
374 | #endif | |
375 | ||
376 | if (m_disassembler) | |
377 | m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label()); | |
378 | ||
379 | switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { | |
380 | DEFINE_SLOWCASE_OP(op_add) | |
381 | DEFINE_SLOWCASE_OP(op_bitand) | |
382 | DEFINE_SLOWCASE_OP(op_bitor) | |
383 | DEFINE_SLOWCASE_OP(op_bitxor) | |
384 | DEFINE_SLOWCASE_OP(op_call) | |
385 | DEFINE_SLOWCASE_OP(op_call_eval) | |
386 | DEFINE_SLOWCASE_OP(op_call_varargs) | |
387 | DEFINE_SLOWCASE_OP(op_construct_varargs) | |
388 | DEFINE_SLOWCASE_OP(op_construct) | |
389 | DEFINE_SLOWCASE_OP(op_to_this) | |
390 | DEFINE_SLOWCASE_OP(op_check_tdz) | |
391 | DEFINE_SLOWCASE_OP(op_create_this) | |
392 | DEFINE_SLOWCASE_OP(op_div) | |
393 | DEFINE_SLOWCASE_OP(op_eq) | |
394 | case op_get_by_id_out_of_line: | |
395 | case op_get_array_length: | |
396 | DEFINE_SLOWCASE_OP(op_get_by_id) | |
397 | DEFINE_SLOWCASE_OP(op_get_by_val) | |
398 | DEFINE_SLOWCASE_OP(op_check_has_instance) | |
399 | DEFINE_SLOWCASE_OP(op_instanceof) | |
400 | DEFINE_SLOWCASE_OP(op_jfalse) | |
401 | DEFINE_SLOWCASE_OP(op_jless) | |
402 | DEFINE_SLOWCASE_OP(op_jlesseq) | |
403 | DEFINE_SLOWCASE_OP(op_jgreater) | |
404 | DEFINE_SLOWCASE_OP(op_jgreatereq) | |
405 | DEFINE_SLOWCASE_OP(op_jnless) | |
406 | DEFINE_SLOWCASE_OP(op_jnlesseq) | |
407 | DEFINE_SLOWCASE_OP(op_jngreater) | |
408 | DEFINE_SLOWCASE_OP(op_jngreatereq) | |
409 | DEFINE_SLOWCASE_OP(op_jtrue) | |
410 | DEFINE_SLOWCASE_OP(op_loop_hint) | |
411 | DEFINE_SLOWCASE_OP(op_lshift) | |
412 | DEFINE_SLOWCASE_OP(op_mod) | |
413 | DEFINE_SLOWCASE_OP(op_mul) | |
414 | DEFINE_SLOWCASE_OP(op_negate) | |
415 | DEFINE_SLOWCASE_OP(op_neq) | |
416 | DEFINE_SLOWCASE_OP(op_new_object) | |
417 | DEFINE_SLOWCASE_OP(op_not) | |
418 | DEFINE_SLOWCASE_OP(op_nstricteq) | |
419 | DEFINE_SLOWCASE_OP(op_dec) | |
420 | DEFINE_SLOWCASE_OP(op_inc) | |
421 | case op_put_by_id_out_of_line: | |
422 | case op_put_by_id_transition_direct: | |
423 | case op_put_by_id_transition_normal: | |
424 | case op_put_by_id_transition_direct_out_of_line: | |
425 | case op_put_by_id_transition_normal_out_of_line: | |
426 | DEFINE_SLOWCASE_OP(op_put_by_id) | |
427 | case op_put_by_val_direct: | |
428 | DEFINE_SLOWCASE_OP(op_put_by_val) | |
429 | DEFINE_SLOWCASE_OP(op_rshift) | |
430 | DEFINE_SLOWCASE_OP(op_unsigned) | |
431 | DEFINE_SLOWCASE_OP(op_urshift) | |
432 | DEFINE_SLOWCASE_OP(op_stricteq) | |
433 | DEFINE_SLOWCASE_OP(op_sub) | |
434 | DEFINE_SLOWCASE_OP(op_to_number) | |
435 | DEFINE_SLOWCASE_OP(op_to_string) | |
436 | DEFINE_SLOWCASE_OP(op_to_primitive) | |
437 | DEFINE_SLOWCASE_OP(op_has_indexed_property) | |
438 | DEFINE_SLOWCASE_OP(op_has_structure_property) | |
439 | DEFINE_SLOWCASE_OP(op_get_direct_pname) | |
440 | ||
441 | DEFINE_SLOWCASE_OP(op_resolve_scope) | |
442 | DEFINE_SLOWCASE_OP(op_get_from_scope) | |
443 | DEFINE_SLOWCASE_OP(op_put_to_scope) | |
444 | ||
445 | default: | |
446 | RELEASE_ASSERT_NOT_REACHED(); | |
447 | } | |
448 | ||
449 | RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen."); | |
450 | RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); | |
451 | ||
452 | if (shouldEmitProfiling()) | |
453 | add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); | |
454 | ||
455 | emitJumpSlowToHot(jump(), 0); | |
456 | } | |
457 | ||
458 | RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); | |
459 | RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); | |
460 | RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); | |
461 | RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); | |
462 | ||
463 | #ifndef NDEBUG | |
464 | // Reset this, in order to guard its use with ASSERTs. | |
465 | m_bytecodeOffset = std::numeric_limits<unsigned>::max(); | |
466 | #endif | |
467 | } | |
468 | ||
469 | CompilationResult JIT::privateCompile(JITCompilationEffort effort) | |
470 | { | |
471 | DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); | |
472 | switch (level) { | |
473 | case DFG::CannotCompile: | |
474 | m_canBeOptimized = false; | |
475 | m_canBeOptimizedOrInlined = false; | |
476 | m_shouldEmitProfiling = false; | |
477 | break; | |
478 | case DFG::CanCompile: | |
479 | case DFG::CanCompileAndInline: | |
480 | m_canBeOptimized = true; | |
481 | m_canBeOptimizedOrInlined = true; | |
482 | m_shouldEmitProfiling = true; | |
483 | break; | |
484 | default: | |
485 | RELEASE_ASSERT_NOT_REACHED(); | |
486 | break; | |
487 | } | |
488 | ||
489 | switch (m_codeBlock->codeType()) { | |
490 | case GlobalCode: | |
491 | case EvalCode: | |
492 | m_codeBlock->m_shouldAlwaysBeInlined = false; | |
493 | break; | |
494 | case FunctionCode: | |
495 | // We could have already set it to false because we detected an uninlineable call. | |
496 | // Don't override that observation. | |
497 | m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); | |
498 | break; | |
499 | } | |
500 | ||
501 | // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. | |
502 | if (m_vm->typeProfiler()) | |
503 | m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); | |
504 | ||
505 | if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) | |
506 | m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); | |
507 | if (m_vm->m_perBytecodeProfiler) { | |
508 | m_compilation = adoptRef( | |
509 | new Profiler::Compilation( | |
510 | m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), | |
511 | Profiler::Baseline)); | |
512 | m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); | |
513 | } | |
514 | ||
515 | if (m_disassembler) | |
516 | m_disassembler->setStartOfCode(label()); | |
517 | ||
518 | // Just add a little bit of randomness to the codegen | |
519 | if (m_randomGenerator.getUint32() & 1) | |
520 | nop(); | |
521 | ||
522 | emitFunctionPrologue(); | |
523 | emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); | |
524 | ||
525 | Label beginLabel(this); | |
526 | ||
527 | sampleCodeBlock(m_codeBlock); | |
528 | #if ENABLE(OPCODE_SAMPLING) | |
529 | sampleInstruction(m_codeBlock->instructions().begin()); | |
530 | #endif | |
531 | ||
532 | if (m_codeBlock->codeType() == FunctionCode) { | |
533 | ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); | |
534 | if (shouldEmitProfiling()) { | |
535 | for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { | |
536 | // If this is a constructor, then we want to put in a dummy profiling site (to | |
537 | // keep things consistent) but we don't actually want to record the dummy value. | |
538 | if (m_codeBlock->m_isConstructor && !argument) | |
539 | continue; | |
540 | int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); | |
541 | #if USE(JSVALUE64) | |
542 | load64(Address(callFrameRegister, offset), regT0); | |
543 | #elif USE(JSVALUE32_64) | |
544 | load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); | |
545 | load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); | |
546 | #endif | |
547 | emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); | |
548 | } | |
549 | } | |
550 | } | |
551 | ||
552 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); | |
553 | Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); | |
554 | ||
555 | move(regT1, stackPointerRegister); | |
556 | checkStackPointerAlignment(); | |
557 | ||
558 | privateCompileMainPass(); | |
559 | privateCompileLinkPass(); | |
560 | privateCompileSlowCases(); | |
561 | ||
562 | if (m_disassembler) | |
563 | m_disassembler->setEndOfSlowPath(label()); | |
564 | ||
565 | stackOverflow.link(this); | |
566 | m_bytecodeOffset = 0; | |
567 | if (maxFrameExtentForSlowPathCall) | |
568 | addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); | |
569 | callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); | |
570 | ||
571 | Label arityCheck; | |
572 | if (m_codeBlock->codeType() == FunctionCode) { | |
573 | arityCheck = label(); | |
574 | store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); | |
575 | emitFunctionPrologue(); | |
576 | emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); | |
577 | ||
578 | load32(payloadFor(JSStack::ArgumentCount), regT1); | |
579 | branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); | |
580 | ||
581 | m_bytecodeOffset = 0; | |
582 | ||
583 | if (maxFrameExtentForSlowPathCall) | |
584 | addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); | |
585 | callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); | |
586 | if (maxFrameExtentForSlowPathCall) | |
587 | addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); | |
588 | if (returnValueGPR != regT0) | |
589 | move(returnValueGPR, regT0); | |
590 | branchTest32(Zero, regT0).linkTo(beginLabel, this); | |
591 | GPRReg thunkReg; | |
592 | #if USE(JSVALUE64) | |
593 | thunkReg = GPRInfo::regT7; | |
594 | #else | |
595 | thunkReg = GPRInfo::regT5; | |
596 | #endif | |
597 | CodeLocationLabel* failThunkLabels = | |
598 | m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); | |
599 | move(TrustedImmPtr(failThunkLabels), thunkReg); | |
600 | loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg); | |
601 | emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); | |
602 | ||
603 | #if !ASSERT_DISABLED | |
604 | m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. | |
605 | #endif | |
606 | ||
607 | jump(beginLabel); | |
608 | } | |
609 | ||
610 | ASSERT(m_jmpTable.isEmpty()); | |
611 | ||
612 | privateCompileExceptionHandlers(); | |
613 | ||
614 | if (m_disassembler) | |
615 | m_disassembler->setEndOfCode(label()); | |
616 | ||
617 | LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); | |
618 | if (patchBuffer.didFailToAllocate()) | |
619 | return CompilationFailed; | |
620 | ||
621 | // Translate vPC offsets into addresses in JIT generated code, for switch tables. | |
622 | for (unsigned i = 0; i < m_switches.size(); ++i) { | |
623 | SwitchRecord record = m_switches[i]; | |
624 | unsigned bytecodeOffset = record.bytecodeOffset; | |
625 | ||
626 | if (record.type != SwitchRecord::String) { | |
627 | ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); | |
628 | ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); | |
629 | ||
630 | record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); | |
631 | ||
632 | for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { | |
633 | unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; | |
634 | record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; | |
635 | } | |
636 | } else { | |
637 | ASSERT(record.type == SwitchRecord::String); | |
638 | ||
639 | record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); | |
640 | ||
641 | StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); | |
642 | for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { | |
643 | unsigned offset = it->value.branchOffset; | |
644 | it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; | |
645 | } | |
646 | } | |
647 | } | |
648 | ||
649 | for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { | |
650 | HandlerInfo& handler = m_codeBlock->exceptionHandler(i); | |
651 | handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); | |
652 | } | |
653 | ||
654 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
655 | if (iter->to) | |
656 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
657 | } | |
658 | ||
659 | for (unsigned i = m_getByIds.size(); i--;) | |
660 | m_getByIds[i].finalize(patchBuffer); | |
661 | for (unsigned i = m_putByIds.size(); i--;) | |
662 | m_putByIds[i].finalize(patchBuffer); | |
663 | ||
664 | m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); | |
665 | for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { | |
666 | CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); | |
667 | CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); | |
668 | CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); | |
669 | CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); | |
670 | ||
671 | m_codeBlock->byValInfo(i) = ByValInfo( | |
672 | m_byValCompilationInfo[i].bytecodeIndex, | |
673 | badTypeJump, | |
674 | m_byValCompilationInfo[i].arrayMode, | |
675 | differenceBetweenCodePtr(badTypeJump, doneTarget), | |
676 | differenceBetweenCodePtr(returnAddress, slowPathTarget)); | |
677 | } | |
678 | for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { | |
679 | CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; | |
680 | CallLinkInfo& info = *compilationInfo.callLinkInfo; | |
681 | info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation), | |
682 | patchBuffer.locationOf(compilationInfo.hotPathBegin), | |
683 | patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); | |
684 | } | |
685 | ||
686 | CompactJITCodeMap::Encoder jitCodeMapEncoder; | |
687 | for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { | |
688 | if (m_labels[bytecodeOffset].isSet()) | |
689 | jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); | |
690 | } | |
691 | m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); | |
692 | ||
693 | MacroAssemblerCodePtr withArityCheck; | |
694 | if (m_codeBlock->codeType() == FunctionCode) | |
695 | withArityCheck = patchBuffer.locationOf(arityCheck); | |
696 | ||
697 | if (Options::showDisassembly()) { | |
698 | m_disassembler->dump(patchBuffer); | |
699 | patchBuffer.didAlreadyDisassemble(); | |
700 | } | |
701 | if (m_compilation) { | |
702 | m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); | |
703 | m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); | |
704 | } | |
705 | ||
706 | CodeRef result = FINALIZE_CODE( | |
707 | patchBuffer, | |
708 | ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); | |
709 | ||
710 | m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( | |
711 | static_cast<double>(result.size()) / | |
712 | static_cast<double>(m_codeBlock->instructions().size())); | |
713 | ||
714 | m_codeBlock->shrinkToFit(CodeBlock::LateShrink); | |
715 | m_codeBlock->setJITCode( | |
716 | adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); | |
717 | ||
718 | #if ENABLE(JIT_VERBOSE) | |
719 | dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); | |
720 | #endif | |
721 | ||
722 | return CompilationSuccessful; | |
723 | } | |
724 | ||
725 | void JIT::privateCompileExceptionHandlers() | |
726 | { | |
727 | if (!m_exceptionChecksWithCallFrameRollback.empty()) { | |
728 | m_exceptionChecksWithCallFrameRollback.link(this); | |
729 | ||
730 | // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). | |
731 | ||
732 | move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); | |
733 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); | |
734 | ||
735 | #if CPU(X86) | |
736 | // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! | |
737 | poke(GPRInfo::argumentGPR0); | |
738 | poke(GPRInfo::argumentGPR1, 1); | |
739 | #endif | |
740 | m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); | |
741 | jumpToExceptionHandler(); | |
742 | } | |
743 | ||
744 | if (!m_exceptionChecks.empty()) { | |
745 | m_exceptionChecks.link(this); | |
746 | ||
747 | // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). | |
748 | move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); | |
749 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); | |
750 | ||
751 | #if CPU(X86) | |
752 | // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! | |
753 | poke(GPRInfo::argumentGPR0); | |
754 | poke(GPRInfo::argumentGPR1, 1); | |
755 | #endif | |
756 | m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value())); | |
757 | jumpToExceptionHandler(); | |
758 | } | |
759 | } | |
760 | ||
761 | unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) | |
762 | { | |
763 | ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters))); | |
764 | ||
765 | return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters); | |
766 | } | |
767 | ||
768 | int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) | |
769 | { | |
770 | return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); | |
771 | } | |
772 | ||
773 | } // namespace JSC | |
774 | ||
775 | #endif // ENABLE(JIT) |