1 # Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
24 # Work-around for the fact that the toolchain's awareness of armv7s results in
25 # a separate slab in the fat binary, yet the offlineasm doesn't know to expect
30 # First come the common protocols that both interpreters use. Note that each
31 # of these must have an ASSERT() in LLIntData.cpp
33 # These declarations must match interpreter/RegisterFile.h.
34 const CallFrameHeaderSize = 48
35 const ArgumentCount = -48
36 const CallerFrame = -40
38 const ScopeChain = -24
42 const ThisArgumentOffset = -CallFrameHeaderSize - 8
44 # Some register conventions.
46 # - Use a pair of registers to represent the PC: one register for the
47 # base of the register file, and one register for the index.
48 # - The PC base (or PB for short) should be stored in the csr. It will
49 # get clobbered on calls to other JS code, but will get saved on calls
51 # - C calls are still given the Instruction* rather than the PC index.
52 # This requires an add before the call, and a sub after.
55 const tagTypeNumber = csr1
61 # Constants for reasoning about value representation.
64 const PayloadOffset = 4
67 const PayloadOffset = 0
74 # Type flags constants.
75 const MasqueradesAsUndefined = 1
76 const ImplementsHasInstance = 2
77 const ImplementsDefaultHasInstance = 8
79 # Bytecode operand constants.
80 const FirstConstantRegisterIndex = 0x40000000
82 # Code type constants.
85 const FunctionCode = 2
87 # The interpreter steals the tag word of the argument count.
88 const LLIntReturnPC = ArgumentCount + TagOffset
91 const HashFlags8BitBuffer = 64
93 # Allocation constants
95 const JSFinalObjectSizeClassIndex = 1
97 const JSFinalObjectSizeClassIndex = 3
100 # This must match wtf/Vector.h
102 const VectorSizeOffset = 0
103 const VectorBufferOffset = 8
105 const VectorSizeOffset = 0
106 const VectorBufferOffset = 4
110 # Some common utilities.
112 storei 0, 0xbbadbeef[]
117 macro assert(assertion)
125 macro preserveReturnAddressAfterCall(destinationRegister)
127 move lr, destinationRegister
129 pop destinationRegister
135 macro restoreReturnAddressBeforeReturn(sourceRegister)
137 move sourceRegister, lr
145 macro traceExecution()
147 callSlowPath(_llint_trace)
151 macro slowPathForCall(advance, slowPath)
161 macro checkSwitchToJIT(increment, action)
163 loadp CodeBlock[cfr], t0
164 baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
170 macro checkSwitchToJITForEpilogue()
174 callSlowPath(_llint_replace)
178 macro assertNotConstant(index)
179 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
182 macro functionForCallCodeBlockGetter(targetRegister)
183 loadp Callee[cfr], targetRegister
184 loadp JSFunction::m_executable[targetRegister], targetRegister
185 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
188 macro functionForConstructCodeBlockGetter(targetRegister)
189 loadp Callee[cfr], targetRegister
190 loadp JSFunction::m_executable[targetRegister], targetRegister
191 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
194 macro notFunctionCodeBlockGetter(targetRegister)
195 loadp CodeBlock[cfr], targetRegister
198 macro functionCodeBlockSetter(sourceRegister)
199 storep sourceRegister, CodeBlock[cfr]
202 macro notFunctionCodeBlockSetter(sourceRegister)
206 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
207 # in t1. May also trigger prologue entry OSR.
208 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
209 preserveReturnAddressAfterCall(t2)
211 # Set up the call frame and check if we should OSR.
212 storep t2, ReturnPC[cfr]
214 callSlowPath(traceSlowPath)
218 baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
219 cCall2(osrSlowPath, cfr, PC)
222 loadp ReturnPC[cfr], t2
223 restoreReturnAddressBeforeReturn(t2)
233 loadp CodeBlock::m_instructions[t1], PB
236 loadp CodeBlock::m_instructions[t1], PC
240 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
241 # Must call dispatch(0) after calling this.
242 macro functionInitialization(profileArgSkip)
244 # Profile the arguments. Unfortunately, we have no choice but to do this. This
245 # code is pretty horrendous because of the difference in ordering between
246 # arguments and value profiles, the desire to have a simple loop-down-to-zero
247 # loop, and the desire to use only three registers so as to preserve the PC and
248 # the code block. It is likely that this code should be rewritten in a more
249 # optimal way for architectures that have more than five registers available
250 # for arbitrary use in the interpreter.
251 loadi CodeBlock::m_numParameters[t1], t0
252 addp -profileArgSkip, t0 # Use addi because that's what has the peephole
253 assert(macro (ok) bpgteq t0, 0, ok end)
254 btpz t0, .argumentProfileDone
255 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
256 mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
260 .argumentProfileLoop:
262 loadp ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2
263 subp sizeof ValueProfile, t3
264 storep t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
266 loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
267 subp sizeof ValueProfile, t3
268 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
269 loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
270 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
272 baddpnz 8, t0, .argumentProfileLoop
273 .argumentProfileDone:
276 # Check stack height.
277 loadi CodeBlock::m_numCalleeRegisters[t1], t0
278 loadp CodeBlock::m_globalData[t1], t2
279 loadp JSGlobalData::interpreter[t2], t2 # FIXME: Can get to the RegisterFile from the JITStackFrame
282 bpaeq Interpreter::m_registerFile + RegisterFile::m_end[t2], t0, .stackHeightOK
284 # Stack height check failed - need to call a slow_path.
285 callSlowPath(_llint_register_file_check)
289 macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase)
290 if ALWAYS_ALLOCATE_SLOW
293 const offsetOfMySizeClass =
295 Heap::m_objectSpace +
296 MarkedSpace::m_normalSpace +
297 MarkedSpace::Subspace::preciseAllocators +
298 sizeClassIndex * sizeof MarkedAllocator
300 const offsetOfFirstFreeCell =
301 MarkedAllocator::m_freeList +
302 MarkedBlock::FreeList::head
304 # FIXME: we can get the global data in one load from the stack.
305 loadp CodeBlock[cfr], scratch1
306 loadp CodeBlock::m_globalData[scratch1], scratch1
308 # Get the object from the free list.
309 loadp offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1], result
310 btpz result, slowCase
312 # Remove the object from the free list.
313 loadp [result], scratch2
314 storep scratch2, offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1]
316 # Initialize the object.
317 loadp classInfoOffset[scratch1], scratch2
318 storep scratch2, [result]
319 storep structure, JSCell::m_structure[result]
320 storep 0, JSObject::m_inheritorID[result]
321 addp sizeof JSObject, result, scratch1
322 storep scratch1, JSObject::m_propertyStorage[result]
327 loadp ReturnPC[cfr], t2
328 loadp CallerFrame[cfr], cfr
329 restoreReturnAddressBeforeReturn(t2)
334 # Indicate the beginning of LLInt.
339 _llint_program_prologue:
340 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
344 _llint_eval_prologue:
345 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
349 _llint_function_for_call_prologue:
350 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
351 .functionForCallBegin:
352 functionInitialization(0)
356 _llint_function_for_construct_prologue:
357 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
358 .functionForConstructBegin:
359 functionInitialization(1)
363 _llint_function_for_call_arity_check:
364 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
365 functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
368 _llint_function_for_construct_arity_check:
369 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
370 functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
373 # Value-representation-specific code.
375 include LowLevelInterpreter64
377 include LowLevelInterpreter32_64
381 # Value-representation-agnostic code.
384 callSlowPath(_llint_slow_path_new_array)
388 _llint_op_new_array_buffer:
390 callSlowPath(_llint_slow_path_new_array_buffer)
394 _llint_op_new_regexp:
396 callSlowPath(_llint_slow_path_new_regexp)
402 callSlowPath(_llint_slow_path_less)
408 callSlowPath(_llint_slow_path_lesseq)
414 callSlowPath(_llint_slow_path_greater)
420 callSlowPath(_llint_slow_path_greatereq)
426 callSlowPath(_llint_slow_path_mod)
432 callSlowPath(_llint_slow_path_typeof)
438 callSlowPath(_llint_slow_path_is_object)
442 _llint_op_is_function:
444 callSlowPath(_llint_slow_path_is_function)
450 callSlowPath(_llint_slow_path_in)
456 callSlowPath(_llint_slow_path_resolve)
460 _llint_op_resolve_skip:
462 callSlowPath(_llint_slow_path_resolve_skip)
466 _llint_op_resolve_base:
468 callSlowPath(_llint_slow_path_resolve_base)
472 _llint_op_ensure_property_exists:
474 callSlowPath(_llint_slow_path_ensure_property_exists)
478 _llint_op_resolve_with_base:
480 callSlowPath(_llint_slow_path_resolve_with_base)
484 _llint_op_resolve_with_this:
486 callSlowPath(_llint_slow_path_resolve_with_this)
492 callSlowPath(_llint_slow_path_del_by_id)
496 _llint_op_del_by_val:
498 callSlowPath(_llint_slow_path_del_by_val)
502 _llint_op_put_by_index:
504 callSlowPath(_llint_slow_path_put_by_index)
508 _llint_op_put_getter_setter:
510 callSlowPath(_llint_slow_path_put_getter_setter)
514 _llint_op_jmp_scopes:
516 callSlowPath(_llint_slow_path_jmp_scopes)
520 _llint_op_loop_if_true:
525 macro (value, target) btinz value, target end,
526 _llint_slow_path_jtrue)
529 _llint_op_loop_if_false:
534 macro (value, target) btiz value, target end,
535 _llint_slow_path_jfalse)
538 _llint_op_loop_if_less:
543 macro (left, right, target) bilt left, right, target end,
544 macro (left, right, target) bdlt left, right, target end,
545 _llint_slow_path_jless)
551 macro (left, right, target) bigteq left, right, target end,
552 macro (left, right, target) bdgtequn left, right, target end,
553 _llint_slow_path_jnless)
556 _llint_op_loop_if_greater:
557 jmp _llint_op_jgreater
561 macro (left, right, target) bigt left, right, target end,
562 macro (left, right, target) bdgt left, right, target end,
563 _llint_slow_path_jgreater)
569 macro (left, right, target) bilteq left, right, target end,
570 macro (left, right, target) bdltequn left, right, target end,
571 _llint_slow_path_jngreater)
574 _llint_op_loop_if_lesseq:
575 jmp _llint_op_jlesseq
579 macro (left, right, target) bilteq left, right, target end,
580 macro (left, right, target) bdlteq left, right, target end,
581 _llint_slow_path_jlesseq)
587 macro (left, right, target) bigt left, right, target end,
588 macro (left, right, target) bdgtun left, right, target end,
589 _llint_slow_path_jnlesseq)
592 _llint_op_loop_if_greatereq:
593 jmp _llint_op_jgreatereq
594 _llint_op_jgreatereq:
597 macro (left, right, target) bigteq left, right, target end,
598 macro (left, right, target) bdgteq left, right, target end,
599 _llint_slow_path_jgreatereq)
602 _llint_op_jngreatereq:
605 macro (left, right, target) bilt left, right, target end,
606 macro (left, right, target) bdltun left, right, target end,
607 _llint_slow_path_jngreatereq)
612 checkSwitchToJITForLoop()
616 _llint_op_switch_string:
618 callSlowPath(_llint_slow_path_switch_string)
622 _llint_op_new_func_exp:
624 callSlowPath(_llint_slow_path_new_func_exp)
630 doCall(_llint_slow_path_call)
635 doCall(_llint_slow_path_construct)
638 _llint_op_call_varargs:
640 slowPathForCall(6, _llint_slow_path_call_varargs)
646 # Eval is executed in one of two modes:
648 # 1) We find that we're really invoking eval() in which case the
649 # execution is perfomed entirely inside the slow_path, and it
650 # returns the PC of a function that just returns the return value
651 # that the eval returned.
653 # 2) We find that we're invoking something called eval() that is not
654 # the real eval. Then the slow_path returns the PC of the thing to
655 # call, and we call it.
657 # This allows us to handle two cases, which would require a total of
658 # up to four pieces of state that cannot be easily packed into two
659 # registers (C functions can return up to two registers, easily):
661 # - The call frame register. This may or may not have been modified
662 # by the slow_path, but the convention is that it returns it. It's not
663 # totally clear if that's necessary, since the cfr is callee save.
664 # But that's our style in this here interpreter so we stick with it.
666 # - A bit to say if the slow_path successfully executed the eval and has
667 # the return value, or did not execute the eval but has a PC for us
671 # - The JS return value (two registers), or
675 # It turns out to be easier to just always have this return the cfr
676 # and a PC to call, and that PC may be a dummy thunk that just
677 # returns the JS value that the eval returned.
679 slowPathForCall(4, _llint_slow_path_call_eval)
682 _llint_generic_return_point:
688 callSlowPath(_llint_slow_path_strcat)
692 _llint_op_method_check:
694 # We ignore method checks and use normal get_by_id optimizations.
698 _llint_op_get_pnames:
700 callSlowPath(_llint_slow_path_get_pnames)
701 dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
704 _llint_op_push_scope:
706 callSlowPath(_llint_slow_path_push_scope)
712 callSlowPath(_llint_slow_path_pop_scope)
716 _llint_op_push_new_scope:
718 callSlowPath(_llint_slow_path_push_new_scope)
724 callSlowPath(_llint_slow_path_throw)
728 _llint_op_throw_reference_error:
730 callSlowPath(_llint_slow_path_throw_reference_error)
734 _llint_op_profile_will_call:
736 loadp JITStackFrame::enabledProfilerReference[sp], t0
737 btpz [t0], .opProfileWillCallDone
738 callSlowPath(_llint_slow_path_profile_will_call)
739 .opProfileWillCallDone:
743 _llint_op_profile_did_call:
745 loadp JITStackFrame::enabledProfilerReference[sp], t0
746 btpz [t0], .opProfileWillCallDone
747 callSlowPath(_llint_slow_path_profile_did_call)
748 .opProfileDidCallDone:
754 callSlowPath(_llint_slow_path_debug)
758 _llint_native_call_trampoline:
759 nativeCallTrampoline(NativeExecutable::m_function)
762 _llint_native_construct_trampoline:
763 nativeCallTrampoline(NativeExecutable::m_constructor)
766 # Lastly, make sure that we can link even though we don't support all opcodes.
767 # These opcodes should never arise when using LLInt or either JIT. We assert
774 # We should use whatever the smallest possible instruction is, just to
775 # ensure that there is a gap between instruction labels. If multiple
776 # smallest instructions exist, we should pick the one that is most
777 # likely result in execution being halted. Currently that is the break
778 # instruction on all architectures we're interested in. (Break is int3
779 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
784 _llint_op_get_array_length:
787 _llint_op_get_by_id_chain:
790 _llint_op_get_by_id_custom_chain:
793 _llint_op_get_by_id_custom_proto:
796 _llint_op_get_by_id_custom_self:
799 _llint_op_get_by_id_generic:
802 _llint_op_get_by_id_getter_chain:
805 _llint_op_get_by_id_getter_proto:
808 _llint_op_get_by_id_getter_self:
811 _llint_op_get_by_id_proto:
814 _llint_op_get_by_id_self:
817 _llint_op_get_string_length:
820 _llint_op_put_by_id_generic:
823 _llint_op_put_by_id_replace:
826 _llint_op_put_by_id_transition:
830 # Indicate the end of LLInt.