1 # Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
24 # First come the common protocols that both interpreters use. Note that each
25 # of these must have an ASSERT() in LLIntData.cpp
27 # Work-around for the fact that the toolchain's awareness of armv7s results in
28 # a separate slab in the fat binary, yet the offlineasm doesn't know to expect
33 # These declarations must match interpreter/JSStack.h.
37 const CallFrameHeaderSlots = 6
40 const CallFrameHeaderSlots = 5
41 const CallFrameAlignSlots = 1
45 const CallerFrameAndPCSize = 2 * PtrSize
48 const ReturnPC = CallerFrame + PtrSize
49 const CodeBlock = ReturnPC + PtrSize
50 const ScopeChain = CodeBlock + SlotSize
51 const Callee = ScopeChain + SlotSize
52 const ArgumentCount = Callee + SlotSize
53 const ThisArgumentOffset = ArgumentCount + SlotSize
54 const CallFrameHeaderSize = ThisArgumentOffset
56 # Some value representation constants.
58 const TagBitTypeOther = 0x2
59 const TagBitBool = 0x4
60 const TagBitUndefined = 0x8
61 const ValueEmpty = 0x0
62 const ValueFalse = TagBitTypeOther | TagBitBool
63 const ValueTrue = TagBitTypeOther | TagBitBool | 1
64 const ValueUndefined = TagBitTypeOther | TagBitUndefined
65 const ValueNull = TagBitTypeOther
66 const TagTypeNumber = 0xffff000000000000
67 const TagMask = TagTypeNumber | TagBitTypeOther
72 const UndefinedTag = -4
74 const EmptyValueTag = -6
75 const DeletedValueTag = -7
76 const LowestTag = DeletedValueTag
79 const CallOpCodeSize = 9
81 if X86_64 or ARM64 or C_LOOP
82 const maxFrameExtentForSlowPathCall = 0
83 elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
84 const maxFrameExtentForSlowPathCall = 24
86 const maxFrameExtentForSlowPathCall = 40
88 const maxFrameExtentForSlowPathCall = 40
90 const maxFrameExtentForSlowPathCall = 64
94 const ClearWatchpoint = 0
96 const IsInvalidated = 2
98 # Some register conventions.
100 # - Use a pair of registers to represent the PC: one register for the
101 # base of the bytecodes, and one register for the index.
102 # - The PC base (or PB for short) should be stored in the csr. It will
103 # get clobbered on calls to other JS code, but will get saved on calls
105 # - C calls are still given the Instruction* rather than the PC index.
106 # This requires an add before the call, and a sub after.
109 const tagTypeNumber = csr1
112 macro loadisFromInstruction(offset, dest)
113 loadis offset * 8[PB, PC, 8], dest
116 macro loadpFromInstruction(offset, dest)
117 loadp offset * 8[PB, PC, 8], dest
120 macro storepToInstruction(value, offset)
121 storep value, offset * 8[PB, PC, 8]
126 macro loadisFromInstruction(offset, dest)
127 loadis offset * 4[PC], dest
130 macro loadpFromInstruction(offset, dest)
131 loadp offset * 4[PC], dest
135 # Constants for reasoning about value representation.
138 const PayloadOffset = 4
141 const PayloadOffset = 0
144 # Constant for reasoning about butterflies.
146 const IndexingShapeMask = 30
147 const NoIndexingShape = 0
148 const Int32Shape = 20
149 const DoubleShape = 22
150 const ContiguousShape = 26
151 const ArrayStorageShape = 28
152 const SlowPutArrayStorageShape = 30
156 const ObjectType = 18
157 const FinalObjectType = 19
159 # Type flags constants.
160 const MasqueradesAsUndefined = 1
161 const ImplementsHasInstance = 2
162 const ImplementsDefaultHasInstance = 8
164 # Bytecode operand constants.
165 const FirstConstantRegisterIndex = 0x40000000
167 # Code type constants.
170 const FunctionCode = 2
172 # The interpreter steals the tag word of the argument count.
173 const LLIntReturnPC = ArgumentCount + TagOffset
176 const HashFlags8BitBuffer = 32
178 # Copied from PropertyOffset.h
179 const firstOutOfLineOffset = 100
182 const GlobalProperty = 0
185 const GlobalPropertyWithVarInjectionChecks = 3
186 const GlobalVarWithVarInjectionChecks = 4
187 const ClosureVarWithVarInjectionChecks = 5
190 const ResolveModeMask = 0xffff
192 const MarkedBlockSize = 64 * 1024
193 const MarkedBlockMask = ~(MarkedBlockSize - 1)
194 # Constants for checking mark bits.
195 const AtomNumberShift = 3
196 const BitMapWordShift = 4
198 # Allocation constants
200 const JSFinalObjectSizeClassIndex = 1
202 const JSFinalObjectSizeClassIndex = 3
205 # This must match wtf/Vector.h
206 const VectorBufferOffset = 0
208 const VectorSizeOffset = 12
210 const VectorSizeOffset = 8
213 # Some common utilities.
222 macro assert(assertion)
230 macro checkStackPointerAlignment(tempReg, location)
231 if ARM64 or C_LOOP or SH4
232 # ARM64 will check for us!
233 # C_LOOP does not need the alignment, and can use a little perf
234 # improvement from avoiding useless work.
235 # SH4 does not need specific alignment (4 bytes).
237 if ARM or ARMv7 or ARMv7_TRADITIONAL
238 # ARM can't do logical ops with the sp as a source
242 andp sp, 0xf, tempReg
244 btpz tempReg, .stackPointerOkay
245 move location, tempReg
251 macro preserveCallerPCAndCFR()
252 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
255 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
265 macro restoreCallerPCAndCFR()
267 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
270 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
277 macro preserveReturnAddressAfterCall(destinationRegister)
278 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
279 # In C_LOOP case, we're only preserving the bytecode vPC.
280 move lr, destinationRegister
281 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
282 pop destinationRegister
288 macro restoreReturnAddressBeforeReturn(sourceRegister)
289 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
290 # In C_LOOP case, we're only restoring the bytecode vPC.
291 move sourceRegister, lr
292 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
299 macro functionPrologue()
300 if X86 or X86_WIN or X86_64 or X86_64_WIN
304 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
311 macro functionEpilogue()
312 if X86 or X86_WIN or X86_64 or X86_64_WIN
316 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
322 macro callToJavaScriptPrologue()
323 if X86_64 or X86_64_WIN
330 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
346 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
355 macro callToJavaScriptEpilogue()
357 addp CallFrameHeaderSlots * 8, cfr, t4
360 addp CallFrameHeaderSlots * 8, cfr, sp
363 loadp CallerFrame[cfr], cfr
371 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
378 if X86_64 or X86_64_WIN
385 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
391 macro moveStackPointerForCodeBlock(codeBlock, scratch)
392 loadi CodeBlock::m_numCalleeRegisters[codeBlock], scratch
394 addp maxFrameExtentForSlowPathCall, scratch
396 subp cfr, scratch, scratch
399 subp cfr, scratch, sp
403 macro restoreStackPointerAfterCall()
404 loadp CodeBlock[cfr], t2
405 moveStackPointerForCodeBlock(t2, t4)
408 macro traceExecution()
410 callSlowPath(_llint_trace)
414 macro callTargetFunction(callLinkInfo, calleeFramePtr)
415 move calleeFramePtr, sp
417 cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
419 call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
421 restoreStackPointerAfterCall()
425 macro slowPathForCall(slowPath)
429 btpz t1, .dontUpdateSP
431 addp CallerFrameAndPCSize, t1, t1
434 addp CallerFrameAndPCSize, t1, sp
438 cloopCallJSFunction callee
442 restoreStackPointerAfterCall()
447 macro arrayProfile(cellAndIndexingType, profile, scratch)
448 const cell = cellAndIndexingType
449 const indexingType = cellAndIndexingType
450 loadi JSCell::m_structureID[cell], scratch
451 storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
452 loadb JSCell::m_indexingType[cell], indexingType
455 macro checkMarkByte(cell, scratch1, scratch2, continuation)
456 loadb JSCell::m_gcData[cell], scratch1
457 continuation(scratch1)
460 macro checkSwitchToJIT(increment, action)
461 loadp CodeBlock[cfr], t0
462 baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
467 macro checkSwitchToJITForEpilogue()
471 callSlowPath(_llint_replace)
475 macro assertNotConstant(index)
476 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
479 macro functionForCallCodeBlockGetter(targetRegister)
480 loadp Callee[cfr], targetRegister
481 loadp JSFunction::m_executable[targetRegister], targetRegister
482 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
485 macro functionForConstructCodeBlockGetter(targetRegister)
486 loadp Callee[cfr], targetRegister
487 loadp JSFunction::m_executable[targetRegister], targetRegister
488 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
491 macro notFunctionCodeBlockGetter(targetRegister)
492 loadp CodeBlock[cfr], targetRegister
495 macro functionCodeBlockSetter(sourceRegister)
496 storep sourceRegister, CodeBlock[cfr]
499 macro notFunctionCodeBlockSetter(sourceRegister)
503 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
504 # in t1. May also trigger prologue entry OSR.
505 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
506 # Set up the call frame and check if we should OSR.
507 preserveCallerPCAndCFR()
510 subp maxFrameExtentForSlowPathCall, sp
511 callSlowPath(traceSlowPath)
512 addp maxFrameExtentForSlowPathCall, sp
517 baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
519 cCall2(osrSlowPath, cfr, PC)
521 # We are after the function prologue, but before we have set up sp from the CodeBlock.
522 # Temporarily align stack pointer for this call.
524 cCall2(osrSlowPath, cfr, PC)
528 move cfr, sp # restore the previous sp
529 # pop the callerFrame since we will jump to a function that wants to save it
532 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
546 moveStackPointerForCodeBlock(t1, t2)
550 loadp CodeBlock::m_instructions[t1], PB
553 loadp CodeBlock::m_instructions[t1], PC
557 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
558 # Must call dispatch(0) after calling this.
559 macro functionInitialization(profileArgSkip)
560 # Profile the arguments. Unfortunately, we have no choice but to do this. This
561 # code is pretty horrendous because of the difference in ordering between
562 # arguments and value profiles, the desire to have a simple loop-down-to-zero
563 # loop, and the desire to use only three registers so as to preserve the PC and
564 # the code block. It is likely that this code should be rewritten in a more
565 # optimal way for architectures that have more than five registers available
566 # for arbitrary use in the interpreter.
567 loadi CodeBlock::m_numParameters[t1], t0
568 addp -profileArgSkip, t0 # Use addi because that's what has the peephole
569 assert(macro (ok) bpgteq t0, 0, ok end)
570 btpz t0, .argumentProfileDone
571 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
572 mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
575 .argumentProfileLoop:
577 loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
578 subp sizeof ValueProfile, t3
579 storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
581 loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
582 subp sizeof ValueProfile, t3
583 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
584 loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
585 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
587 baddpnz -8, t0, .argumentProfileLoop
588 .argumentProfileDone:
590 # Check stack height.
591 loadi CodeBlock::m_numCalleeRegisters[t1], t0
592 loadp CodeBlock::m_vm[t1], t2
594 addi maxFrameExtentForSlowPathCall, t0
596 bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
598 # Stack height check failed - need to call a slow_path.
599 callSlowPath(_llint_stack_check)
600 bpeq t1, 0, .stackHeightOK
605 macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
606 if ALWAYS_ALLOCATE_SLOW
609 const offsetOfFirstFreeCell =
610 MarkedAllocator::m_freeList +
611 MarkedBlock::FreeList::head
613 # Get the object from the free list.
614 loadp offsetOfFirstFreeCell[allocator], result
615 btpz result, slowCase
617 # Remove the object from the free list.
618 loadp [result], scratch1
619 storep scratch1, offsetOfFirstFreeCell[allocator]
621 # Initialize the object.
622 storep 0, JSObject::m_butterfly[result]
623 storeStructureWithTypeInfo(result, structure, scratch1)
628 restoreCallerPCAndCFR()
632 # stub to call into JavaScript or Native functions
633 # EncodedJSValue callToJavaScript(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
634 # EncodedJSValue callToNativeFunction(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
637 _llint_call_to_javascript:
639 global _callToJavaScript
642 doCallToJavaScript(makeJavaScriptCall)
646 _llint_call_to_native_function:
648 global _callToNativeFunction
649 _callToNativeFunction:
651 doCallToJavaScript(makeHostFunctionCall)
656 # void sanitizeStackForVMImpl(VM* vm)
657 global _sanitizeStackForVMImpl
658 _sanitizeStackForVMImpl:
681 loadp VM::m_lastStackTop[vm], address
682 bpbeq sp, address, .zeroFillDone
686 storep zeroValue, [address]
687 addp PtrSize, address
688 bpa sp, address, .zeroFillLoop
692 storep address, VM::m_lastStackTop[vm]
698 # Dummy entry point the C Loop uses to initialize.
702 macro initPCRelative(pcBase)
703 if X86_64 or X86_64_WIN
716 subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
717 elsif ARM or ARMv7_TRADITIONAL
722 crash() # Need to replace with any initialization steps needed to step up PC relative address calculation
724 mova _relativePCBase, t0
731 macro setEntryAddress(index, label)
733 leap (label - _relativePCBase)[t1], t0
735 storep t0, [t4, t2, 8]
737 leap (label - _relativePCBase)[t1], t0
739 storep t0, [t2, t4, 8]
741 leap (label - _relativePCBase)[t1], t0
743 storep t0, [t4, t2, 4]
747 storep t1, [a0, t2, 8]
748 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
749 mvlbl (label - _relativePCBase), t2
752 storep t2, [a0, t3, 4]
754 move (label - _relativePCBase), t2
757 storep t2, [a0, t3, 4]
758 flushcp # Force constant pool flush to avoid "pcrel too far" link error.
760 crash() # Need to replace with code to turn label into and absolute address and save at index
765 # Entry point for the llint to initialize.
771 # Include generated bytecode initialization file.
772 include InitBytecodes
779 _llint_program_prologue:
780 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
784 _llint_eval_prologue:
785 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
789 _llint_function_for_call_prologue:
790 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
791 functionInitialization(0)
795 _llint_function_for_construct_prologue:
796 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
797 functionInitialization(1)
801 _llint_function_for_call_arity_check:
802 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
803 functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
804 .functionForCallBegin:
805 functionInitialization(0)
809 _llint_function_for_construct_arity_check:
810 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
811 functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
812 .functionForConstructBegin:
813 functionInitialization(1)
817 # Value-representation-specific code.
819 include LowLevelInterpreter64
821 include LowLevelInterpreter32_64
825 # Value-representation-agnostic code.
826 _llint_op_touch_entry:
828 callSlowPath(_slow_path_touch_entry)
834 callSlowPath(_llint_slow_path_new_array)
838 _llint_op_new_array_with_size:
840 callSlowPath(_llint_slow_path_new_array_with_size)
844 _llint_op_new_array_buffer:
846 callSlowPath(_llint_slow_path_new_array_buffer)
850 _llint_op_new_regexp:
852 callSlowPath(_llint_slow_path_new_regexp)
858 callSlowPath(_slow_path_less)
864 callSlowPath(_slow_path_lesseq)
870 callSlowPath(_slow_path_greater)
876 callSlowPath(_slow_path_greatereq)
882 callSlowPath(_slow_path_mod)
888 callSlowPath(_slow_path_typeof)
894 callSlowPath(_slow_path_is_object)
898 _llint_op_is_function:
900 callSlowPath(_slow_path_is_function)
906 callSlowPath(_slow_path_in)
909 macro withInlineStorage(object, propertyStorage, continuation)
910 # Indicate that the object is the property storage, and that the
911 # property storage register is unused.
912 continuation(object, propertyStorage)
915 macro withOutOfLineStorage(object, propertyStorage, continuation)
916 loadp JSObject::m_butterfly[object], propertyStorage
917 # Indicate that the propertyStorage register now points to the
918 # property storage, and that the object register may be reused
919 # if the object pointer is not needed anymore.
920 continuation(propertyStorage, object)
926 callSlowPath(_llint_slow_path_del_by_id)
930 _llint_op_del_by_val:
932 callSlowPath(_llint_slow_path_del_by_val)
936 _llint_op_put_by_index:
938 callSlowPath(_llint_slow_path_put_by_index)
942 _llint_op_put_getter_setter:
944 callSlowPath(_llint_slow_path_put_getter_setter)
951 macro (value, target) btinz value, target end,
952 _llint_slow_path_jtrue)
958 macro (value, target) btiz value, target end,
959 _llint_slow_path_jfalse)
965 macro (left, right, target) bilt left, right, target end,
966 macro (left, right, target) bdlt left, right, target end,
967 _llint_slow_path_jless)
973 macro (left, right, target) bigteq left, right, target end,
974 macro (left, right, target) bdgtequn left, right, target end,
975 _llint_slow_path_jnless)
981 macro (left, right, target) bigt left, right, target end,
982 macro (left, right, target) bdgt left, right, target end,
983 _llint_slow_path_jgreater)
989 macro (left, right, target) bilteq left, right, target end,
990 macro (left, right, target) bdltequn left, right, target end,
991 _llint_slow_path_jngreater)
997 macro (left, right, target) bilteq left, right, target end,
998 macro (left, right, target) bdlteq left, right, target end,
999 _llint_slow_path_jlesseq)
1005 macro (left, right, target) bigt left, right, target end,
1006 macro (left, right, target) bdgtun left, right, target end,
1007 _llint_slow_path_jnlesseq)
1010 _llint_op_jgreatereq:
1013 macro (left, right, target) bigteq left, right, target end,
1014 macro (left, right, target) bdgteq left, right, target end,
1015 _llint_slow_path_jgreatereq)
1018 _llint_op_jngreatereq:
1021 macro (left, right, target) bilt left, right, target end,
1022 macro (left, right, target) bdltun left, right, target end,
1023 _llint_slow_path_jngreatereq)
1026 _llint_op_loop_hint:
1028 loadp CodeBlock[cfr], t1
1029 loadp CodeBlock::m_vm[t1], t1
1030 loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
1031 btbnz t0, .handleWatchdogTimer
1032 .afterWatchdogTimerCheck:
1033 checkSwitchToJITForLoop()
1035 .handleWatchdogTimer:
1036 callWatchdogTimerHandler(.throwHandler)
1037 jmp .afterWatchdogTimerCheck
1039 jmp _llint_throw_from_slow_path_trampoline
1041 _llint_op_switch_string:
1043 callSlowPath(_llint_slow_path_switch_string)
1047 _llint_op_new_func_exp:
1049 callSlowPath(_llint_slow_path_new_func_exp)
1055 arrayProfileForCall()
1056 doCall(_llint_slow_path_call)
1059 _llint_op_construct:
1061 doCall(_llint_slow_path_construct)
1064 _llint_op_call_varargs:
1066 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1067 branchIfException(_llint_throw_from_slow_path_trampoline)
1072 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1074 subp t1, CallerFrameAndPCSize, t2
1077 subp t1, CallerFrameAndPCSize, sp
1080 slowPathForCall(_llint_slow_path_call_varargs)
1082 _llint_op_construct_varargs:
1084 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1085 branchIfException(_llint_throw_from_slow_path_trampoline)
1090 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1092 subp t1, CallerFrameAndPCSize, t2
1095 subp t1, CallerFrameAndPCSize, sp
1098 slowPathForCall(_llint_slow_path_construct_varargs)
1101 _llint_op_call_eval:
1104 # Eval is executed in one of two modes:
1106 # 1) We find that we're really invoking eval() in which case the
1107 # execution is perfomed entirely inside the slow_path, and it
1108 # returns the PC of a function that just returns the return value
1109 # that the eval returned.
1111 # 2) We find that we're invoking something called eval() that is not
1112 # the real eval. Then the slow_path returns the PC of the thing to
1113 # call, and we call it.
1115 # This allows us to handle two cases, which would require a total of
1116 # up to four pieces of state that cannot be easily packed into two
1117 # registers (C functions can return up to two registers, easily):
1119 # - The call frame register. This may or may not have been modified
1120 # by the slow_path, but the convention is that it returns it. It's not
1121 # totally clear if that's necessary, since the cfr is callee save.
1122 # But that's our style in this here interpreter so we stick with it.
1124 # - A bit to say if the slow_path successfully executed the eval and has
1125 # the return value, or did not execute the eval but has a PC for us
1129 # - The JS return value (two registers), or
1133 # It turns out to be easier to just always have this return the cfr
1134 # and a PC to call, and that PC may be a dummy thunk that just
1135 # returns the JS value that the eval returned.
1137 slowPathForCall(_llint_slow_path_call_eval)
1140 _llint_generic_return_point:
1146 callSlowPath(_slow_path_strcat)
1150 _llint_op_get_pnames:
1152 callSlowPath(_llint_slow_path_get_pnames)
1153 dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
1156 _llint_op_push_with_scope:
1158 callSlowPath(_llint_slow_path_push_with_scope)
1162 _llint_op_pop_scope:
1164 callSlowPath(_llint_slow_path_pop_scope)
1168 _llint_op_push_name_scope:
1170 callSlowPath(_llint_slow_path_push_name_scope)
1176 callSlowPath(_llint_slow_path_throw)
1180 _llint_op_throw_static_error:
1182 callSlowPath(_llint_slow_path_throw_static_error)
1186 _llint_op_profile_will_call:
1188 loadp CodeBlock[cfr], t0
1189 loadp CodeBlock::m_vm[t0], t0
1190 loadi VM::m_enabledProfiler[t0], t0
1191 btpz t0, .opProfilerWillCallDone
1192 callSlowPath(_llint_slow_path_profile_will_call)
1193 .opProfilerWillCallDone:
1197 _llint_op_profile_did_call:
1199 loadp CodeBlock[cfr], t0
1200 loadp CodeBlock::m_vm[t0], t0
1201 loadi VM::m_enabledProfiler[t0], t0
1202 btpz t0, .opProfilerDidCallDone
1203 callSlowPath(_llint_slow_path_profile_did_call)
1204 .opProfilerDidCallDone:
1210 loadp CodeBlock[cfr], t0
1211 loadi CodeBlock::m_debuggerRequests[t0], t0
1212 btiz t0, .opDebugDone
1213 callSlowPath(_llint_slow_path_debug)
1218 _llint_native_call_trampoline:
1219 nativeCallTrampoline(NativeExecutable::m_function)
1222 _llint_native_construct_trampoline:
1223 nativeCallTrampoline(NativeExecutable::m_constructor)
1226 # Lastly, make sure that we can link even though we don't support all opcodes.
1227 # These opcodes should never arise when using LLInt or either JIT. We assert
1230 macro notSupported()
1234 # We should use whatever the smallest possible instruction is, just to
1235 # ensure that there is a gap between instruction labels. If multiple
1236 # smallest instructions exist, we should pick the one that is most
1237 # likely result in execution being halted. Currently that is the break
1238 # instruction on all architectures we're interested in. (Break is int3
1239 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
1244 _llint_op_init_global_const_nop: