1 # Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
24 # First come the common protocols that both interpreters use. Note that each
25 # of these must have an ASSERT() in LLIntData.cpp
27 # Work-around for the fact that the toolchain's awareness of armv7s results in
28 # a separate slab in the fat binary, yet the offlineasm doesn't know to expect
33 # These declarations must match interpreter/JSStack.h.
37 const CallFrameHeaderSlots = 6
40 const CallFrameHeaderSlots = 5
41 const CallFrameAlignSlots = 1
45 const CallerFrameAndPCSize = 2 * PtrSize
48 const ReturnPC = CallerFrame + PtrSize
49 const CodeBlock = ReturnPC + PtrSize
50 const ScopeChain = CodeBlock + SlotSize
51 const Callee = ScopeChain + SlotSize
52 const ArgumentCount = Callee + SlotSize
53 const ThisArgumentOffset = ArgumentCount + SlotSize
54 const CallFrameHeaderSize = ThisArgumentOffset
56 # Some value representation constants.
58 const TagBitTypeOther = 0x2
59 const TagBitBool = 0x4
60 const TagBitUndefined = 0x8
61 const ValueEmpty = 0x0
62 const ValueFalse = TagBitTypeOther | TagBitBool
63 const ValueTrue = TagBitTypeOther | TagBitBool | 1
64 const ValueUndefined = TagBitTypeOther | TagBitUndefined
65 const ValueNull = TagBitTypeOther
70 const UndefinedTag = -4
72 const EmptyValueTag = -6
73 const DeletedValueTag = -7
74 const LowestTag = DeletedValueTag
77 const CallOpCodeSize = 9
79 if X86_64 or ARM64 or C_LOOP
80 const maxFrameExtentForSlowPathCall = 0
81 elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
82 const maxFrameExtentForSlowPathCall = 24
84 const maxFrameExtentForSlowPathCall = 40
86 const maxFrameExtentForSlowPathCall = 40
88 const maxFrameExtentForSlowPathCall = 64
92 const ClearWatchpoint = 0
94 const IsInvalidated = 2
96 # Some register conventions.
98 # - Use a pair of registers to represent the PC: one register for the
99 # base of the bytecodes, and one register for the index.
100 # - The PC base (or PB for short) should be stored in the csr. It will
101 # get clobbered on calls to other JS code, but will get saved on calls
103 # - C calls are still given the Instruction* rather than the PC index.
104 # This requires an add before the call, and a sub after.
107 const tagTypeNumber = csr1
110 macro loadisFromInstruction(offset, dest)
111 loadis offset * 8[PB, PC, 8], dest
114 macro loadpFromInstruction(offset, dest)
115 loadp offset * 8[PB, PC, 8], dest
118 macro storepToInstruction(value, offset)
119 storep value, offset * 8[PB, PC, 8]
124 macro loadisFromInstruction(offset, dest)
125 loadis offset * 4[PC], dest
128 macro loadpFromInstruction(offset, dest)
129 loadp offset * 4[PC], dest
133 # Constants for reasoning about value representation.
136 const PayloadOffset = 4
139 const PayloadOffset = 0
142 # Constant for reasoning about butterflies.
144 const IndexingShapeMask = 30
145 const NoIndexingShape = 0
146 const Int32Shape = 20
147 const DoubleShape = 22
148 const ContiguousShape = 26
149 const ArrayStorageShape = 28
150 const SlowPutArrayStorageShape = 30
154 const ObjectType = 18
155 const FinalObjectType = 19
157 # Type flags constants.
158 const MasqueradesAsUndefined = 1
159 const ImplementsHasInstance = 2
160 const ImplementsDefaultHasInstance = 8
162 # Bytecode operand constants.
163 const FirstConstantRegisterIndex = 0x40000000
165 # Code type constants.
168 const FunctionCode = 2
170 # The interpreter steals the tag word of the argument count.
171 const LLIntReturnPC = ArgumentCount + TagOffset
174 const HashFlags8BitBuffer = 32
176 # Copied from PropertyOffset.h
177 const firstOutOfLineOffset = 100
180 const GlobalProperty = 0
183 const GlobalPropertyWithVarInjectionChecks = 3
184 const GlobalVarWithVarInjectionChecks = 4
185 const ClosureVarWithVarInjectionChecks = 5
188 const ResolveModeMask = 0xffff
190 const MarkedBlockSize = 64 * 1024
191 const MarkedBlockMask = ~(MarkedBlockSize - 1)
192 # Constants for checking mark bits.
193 const AtomNumberShift = 3
194 const BitMapWordShift = 4
196 # Allocation constants
198 const JSFinalObjectSizeClassIndex = 1
200 const JSFinalObjectSizeClassIndex = 3
203 # This must match wtf/Vector.h
204 const VectorBufferOffset = 0
206 const VectorSizeOffset = 12
208 const VectorSizeOffset = 8
211 # Some common utilities.
220 macro assert(assertion)
228 macro checkStackPointerAlignment(tempReg, location)
229 if ARM64 or C_LOOP or SH4
230 # ARM64 will check for us!
231 # C_LOOP does not need the alignment, and can use a little perf
232 # improvement from avoiding useless work.
233 # SH4 does not need specific alignment (4 bytes).
235 if ARM or ARMv7 or ARMv7_TRADITIONAL
236 # ARM can't do logical ops with the sp as a source
240 andp sp, 0xf, tempReg
242 btpz tempReg, .stackPointerOkay
243 move location, tempReg
249 macro preserveCallerPCAndCFR()
250 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
253 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
263 macro restoreCallerPCAndCFR()
265 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
268 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
275 macro preserveReturnAddressAfterCall(destinationRegister)
276 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
277 # In C_LOOP case, we're only preserving the bytecode vPC.
278 move lr, destinationRegister
279 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
280 pop destinationRegister
286 macro restoreReturnAddressBeforeReturn(sourceRegister)
287 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
288 # In C_LOOP case, we're only restoring the bytecode vPC.
289 move sourceRegister, lr
290 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
297 macro functionPrologue()
298 if X86 or X86_WIN or X86_64 or X86_64_WIN
302 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
309 macro functionEpilogue()
310 if X86 or X86_WIN or X86_64 or X86_64_WIN
314 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
320 macro callToJavaScriptPrologue()
321 if X86_64 or X86_64_WIN
328 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
344 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
353 macro callToJavaScriptEpilogue()
355 addp CallFrameHeaderSlots * 8, cfr, t4
358 addp CallFrameHeaderSlots * 8, cfr, sp
361 loadp CallerFrame[cfr], cfr
369 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
376 if X86_64 or X86_64_WIN
383 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
389 macro moveStackPointerForCodeBlock(codeBlock, scratch)
390 loadi CodeBlock::m_numCalleeRegisters[codeBlock], scratch
392 addp maxFrameExtentForSlowPathCall, scratch
394 subp cfr, scratch, scratch
397 subp cfr, scratch, sp
401 macro restoreStackPointerAfterCall()
402 loadp CodeBlock[cfr], t2
403 moveStackPointerForCodeBlock(t2, t4)
406 macro traceExecution()
408 callSlowPath(_llint_trace)
412 macro callTargetFunction(callLinkInfo, calleeFramePtr)
413 move calleeFramePtr, sp
415 cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
417 call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
419 restoreStackPointerAfterCall()
423 macro slowPathForCall(slowPath)
427 btpz t1, .dontUpdateSP
429 addp CallerFrameAndPCSize, t1, t1
432 addp CallerFrameAndPCSize, t1, sp
436 cloopCallJSFunction callee
440 restoreStackPointerAfterCall()
445 macro arrayProfile(cellAndIndexingType, profile, scratch)
446 const cell = cellAndIndexingType
447 const indexingType = cellAndIndexingType
448 loadi JSCell::m_structureID[cell], scratch
449 storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
450 loadb JSCell::m_indexingType[cell], indexingType
453 macro checkMarkByte(cell, scratch1, scratch2, continuation)
454 loadb JSCell::m_gcData[cell], scratch1
455 continuation(scratch1)
458 macro checkSwitchToJIT(increment, action)
459 loadp CodeBlock[cfr], t0
460 baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
465 macro checkSwitchToJITForEpilogue()
469 callSlowPath(_llint_replace)
473 macro assertNotConstant(index)
474 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
477 macro functionForCallCodeBlockGetter(targetRegister)
478 loadp Callee[cfr], targetRegister
479 loadp JSFunction::m_executable[targetRegister], targetRegister
480 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
483 macro functionForConstructCodeBlockGetter(targetRegister)
484 loadp Callee[cfr], targetRegister
485 loadp JSFunction::m_executable[targetRegister], targetRegister
486 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
489 macro notFunctionCodeBlockGetter(targetRegister)
490 loadp CodeBlock[cfr], targetRegister
493 macro functionCodeBlockSetter(sourceRegister)
494 storep sourceRegister, CodeBlock[cfr]
497 macro notFunctionCodeBlockSetter(sourceRegister)
501 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
502 # in t1. May also trigger prologue entry OSR.
503 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
504 # Set up the call frame and check if we should OSR.
505 preserveCallerPCAndCFR()
508 subp maxFrameExtentForSlowPathCall, sp
509 callSlowPath(traceSlowPath)
510 addp maxFrameExtentForSlowPathCall, sp
515 baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
517 cCall2(osrSlowPath, cfr, PC)
519 # We are after the function prologue, but before we have set up sp from the CodeBlock.
520 # Temporarily align stack pointer for this call.
522 cCall2(osrSlowPath, cfr, PC)
526 move cfr, sp # restore the previous sp
527 # pop the callerFrame since we will jump to a function that wants to save it
530 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
544 moveStackPointerForCodeBlock(t1, t2)
548 loadp CodeBlock::m_instructions[t1], PB
551 loadp CodeBlock::m_instructions[t1], PC
555 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
556 # Must call dispatch(0) after calling this.
557 macro functionInitialization(profileArgSkip)
558 # Profile the arguments. Unfortunately, we have no choice but to do this. This
559 # code is pretty horrendous because of the difference in ordering between
560 # arguments and value profiles, the desire to have a simple loop-down-to-zero
561 # loop, and the desire to use only three registers so as to preserve the PC and
562 # the code block. It is likely that this code should be rewritten in a more
563 # optimal way for architectures that have more than five registers available
564 # for arbitrary use in the interpreter.
565 loadi CodeBlock::m_numParameters[t1], t0
566 addp -profileArgSkip, t0 # Use addi because that's what has the peephole
567 assert(macro (ok) bpgteq t0, 0, ok end)
568 btpz t0, .argumentProfileDone
569 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
570 mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
573 .argumentProfileLoop:
575 loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
576 subp sizeof ValueProfile, t3
577 storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
579 loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
580 subp sizeof ValueProfile, t3
581 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
582 loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
583 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
585 baddpnz -8, t0, .argumentProfileLoop
586 .argumentProfileDone:
588 # Check stack height.
589 loadi CodeBlock::m_numCalleeRegisters[t1], t0
590 loadp CodeBlock::m_vm[t1], t2
592 addi maxFrameExtentForSlowPathCall, t0
594 bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
596 # Stack height check failed - need to call a slow_path.
597 callSlowPath(_llint_stack_check)
598 bpeq t1, 0, .stackHeightOK
603 macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
604 if ALWAYS_ALLOCATE_SLOW
607 const offsetOfFirstFreeCell =
608 MarkedAllocator::m_freeList +
609 MarkedBlock::FreeList::head
611 # Get the object from the free list.
612 loadp offsetOfFirstFreeCell[allocator], result
613 btpz result, slowCase
615 # Remove the object from the free list.
616 loadp [result], scratch1
617 storep scratch1, offsetOfFirstFreeCell[allocator]
619 # Initialize the object.
620 storep 0, JSObject::m_butterfly[result]
621 storeStructureWithTypeInfo(result, structure, scratch1)
626 restoreCallerPCAndCFR()
630 # stub to call into JavaScript or Native functions
631 # EncodedJSValue callToJavaScript(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
632 # EncodedJSValue callToNativeFunction(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
635 _llint_call_to_javascript:
637 global _callToJavaScript
640 doCallToJavaScript(makeJavaScriptCall)
644 _llint_call_to_native_function:
646 global _callToNativeFunction
647 _callToNativeFunction:
649 doCallToJavaScript(makeHostFunctionCall)
654 # void sanitizeStackForVMImpl(VM* vm)
655 global _sanitizeStackForVMImpl
656 _sanitizeStackForVMImpl:
679 loadp VM::m_lastStackTop[vm], address
680 bpbeq sp, address, .zeroFillDone
684 storep zeroValue, [address]
685 addp PtrSize, address
686 bpa sp, address, .zeroFillLoop
690 storep address, VM::m_lastStackTop[vm]
696 # Dummy entry point the C Loop uses to initialize.
700 macro initPCRelative(pcBase)
701 if X86_64 or X86_64_WIN
714 subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
715 elsif ARM or ARMv7_TRADITIONAL
720 crash() # Need to replace with any initialization steps needed to step up PC relative address calculation
722 mova _relativePCBase, t0
729 macro setEntryAddress(index, label)
731 leap (label - _relativePCBase)[t1], t0
733 storep t0, [t4, t2, 8]
735 leap (label - _relativePCBase)[t1], t0
737 storep t0, [t2, t4, 8]
739 leap (label - _relativePCBase)[t1], t0
741 storep t0, [t4, t2, 4]
745 storep t1, [a0, t2, 8]
746 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
747 mvlbl (label - _relativePCBase), t2
750 storep t2, [a0, t3, 4]
752 move (label - _relativePCBase), t2
755 storep t2, [a0, t3, 4]
756 flushcp # Force constant pool flush to avoid "pcrel too far" link error.
758 crash() # Need to replace with code to turn label into and absolute address and save at index
763 # Entry point for the llint to initialize.
769 # Include generated bytecode initialization file.
770 include InitBytecodes
777 _llint_program_prologue:
778 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
782 _llint_eval_prologue:
783 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
787 _llint_function_for_call_prologue:
788 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
789 functionInitialization(0)
793 _llint_function_for_construct_prologue:
794 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
795 functionInitialization(1)
799 _llint_function_for_call_arity_check:
800 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
801 functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
802 .functionForCallBegin:
803 functionInitialization(0)
807 _llint_function_for_construct_arity_check:
808 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
809 functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
810 .functionForConstructBegin:
811 functionInitialization(1)
815 # Value-representation-specific code.
817 include LowLevelInterpreter64
819 include LowLevelInterpreter32_64
823 # Value-representation-agnostic code.
824 _llint_op_touch_entry:
826 callSlowPath(_slow_path_touch_entry)
832 callSlowPath(_llint_slow_path_new_array)
836 _llint_op_new_array_with_size:
838 callSlowPath(_llint_slow_path_new_array_with_size)
842 _llint_op_new_array_buffer:
844 callSlowPath(_llint_slow_path_new_array_buffer)
848 _llint_op_new_regexp:
850 callSlowPath(_llint_slow_path_new_regexp)
856 callSlowPath(_slow_path_less)
862 callSlowPath(_slow_path_lesseq)
868 callSlowPath(_slow_path_greater)
874 callSlowPath(_slow_path_greatereq)
880 callSlowPath(_slow_path_mod)
886 callSlowPath(_slow_path_typeof)
892 callSlowPath(_slow_path_is_object)
896 _llint_op_is_function:
898 callSlowPath(_slow_path_is_function)
904 callSlowPath(_slow_path_in)
907 macro withInlineStorage(object, propertyStorage, continuation)
908 # Indicate that the object is the property storage, and that the
909 # property storage register is unused.
910 continuation(object, propertyStorage)
913 macro withOutOfLineStorage(object, propertyStorage, continuation)
914 loadp JSObject::m_butterfly[object], propertyStorage
915 # Indicate that the propertyStorage register now points to the
916 # property storage, and that the object register may be reused
917 # if the object pointer is not needed anymore.
918 continuation(propertyStorage, object)
924 callSlowPath(_llint_slow_path_del_by_id)
928 _llint_op_del_by_val:
930 callSlowPath(_llint_slow_path_del_by_val)
934 _llint_op_put_by_index:
936 callSlowPath(_llint_slow_path_put_by_index)
940 _llint_op_put_getter_setter:
942 callSlowPath(_llint_slow_path_put_getter_setter)
949 macro (value, target) btinz value, target end,
950 _llint_slow_path_jtrue)
956 macro (value, target) btiz value, target end,
957 _llint_slow_path_jfalse)
963 macro (left, right, target) bilt left, right, target end,
964 macro (left, right, target) bdlt left, right, target end,
965 _llint_slow_path_jless)
971 macro (left, right, target) bigteq left, right, target end,
972 macro (left, right, target) bdgtequn left, right, target end,
973 _llint_slow_path_jnless)
979 macro (left, right, target) bigt left, right, target end,
980 macro (left, right, target) bdgt left, right, target end,
981 _llint_slow_path_jgreater)
987 macro (left, right, target) bilteq left, right, target end,
988 macro (left, right, target) bdltequn left, right, target end,
989 _llint_slow_path_jngreater)
995 macro (left, right, target) bilteq left, right, target end,
996 macro (left, right, target) bdlteq left, right, target end,
997 _llint_slow_path_jlesseq)
1003 macro (left, right, target) bigt left, right, target end,
1004 macro (left, right, target) bdgtun left, right, target end,
1005 _llint_slow_path_jnlesseq)
1008 _llint_op_jgreatereq:
1011 macro (left, right, target) bigteq left, right, target end,
1012 macro (left, right, target) bdgteq left, right, target end,
1013 _llint_slow_path_jgreatereq)
1016 _llint_op_jngreatereq:
1019 macro (left, right, target) bilt left, right, target end,
1020 macro (left, right, target) bdltun left, right, target end,
1021 _llint_slow_path_jngreatereq)
1024 _llint_op_loop_hint:
1026 loadp CodeBlock[cfr], t1
1027 loadp CodeBlock::m_vm[t1], t1
1028 loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
1029 btbnz t0, .handleWatchdogTimer
1030 .afterWatchdogTimerCheck:
1031 checkSwitchToJITForLoop()
1033 .handleWatchdogTimer:
1034 callWatchdogTimerHandler(.throwHandler)
1035 jmp .afterWatchdogTimerCheck
1037 jmp _llint_throw_from_slow_path_trampoline
1039 _llint_op_switch_string:
1041 callSlowPath(_llint_slow_path_switch_string)
1045 _llint_op_new_func_exp:
1047 callSlowPath(_llint_slow_path_new_func_exp)
1053 arrayProfileForCall()
1054 doCall(_llint_slow_path_call)
1057 _llint_op_construct:
1059 doCall(_llint_slow_path_construct)
1062 _llint_op_call_varargs:
1064 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1065 branchIfException(_llint_throw_from_slow_path_trampoline)
1070 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1072 subp t1, CallerFrameAndPCSize, t2
1075 subp t1, CallerFrameAndPCSize, sp
1078 slowPathForCall(_llint_slow_path_call_varargs)
1080 _llint_op_construct_varargs:
1082 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1083 branchIfException(_llint_throw_from_slow_path_trampoline)
1088 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1090 subp t1, CallerFrameAndPCSize, t2
1093 subp t1, CallerFrameAndPCSize, sp
1096 slowPathForCall(_llint_slow_path_construct_varargs)
1099 _llint_op_call_eval:
1102 # Eval is executed in one of two modes:
1104 # 1) We find that we're really invoking eval() in which case the
1105 # execution is perfomed entirely inside the slow_path, and it
1106 # returns the PC of a function that just returns the return value
1107 # that the eval returned.
1109 # 2) We find that we're invoking something called eval() that is not
1110 # the real eval. Then the slow_path returns the PC of the thing to
1111 # call, and we call it.
1113 # This allows us to handle two cases, which would require a total of
1114 # up to four pieces of state that cannot be easily packed into two
1115 # registers (C functions can return up to two registers, easily):
1117 # - The call frame register. This may or may not have been modified
1118 # by the slow_path, but the convention is that it returns it. It's not
1119 # totally clear if that's necessary, since the cfr is callee save.
1120 # But that's our style in this here interpreter so we stick with it.
1122 # - A bit to say if the slow_path successfully executed the eval and has
1123 # the return value, or did not execute the eval but has a PC for us
1127 # - The JS return value (two registers), or
1131 # It turns out to be easier to just always have this return the cfr
1132 # and a PC to call, and that PC may be a dummy thunk that just
1133 # returns the JS value that the eval returned.
1135 slowPathForCall(_llint_slow_path_call_eval)
1138 _llint_generic_return_point:
1144 callSlowPath(_slow_path_strcat)
1148 _llint_op_get_pnames:
1150 callSlowPath(_llint_slow_path_get_pnames)
1151 dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
1154 _llint_op_push_with_scope:
1156 callSlowPath(_llint_slow_path_push_with_scope)
1160 _llint_op_pop_scope:
1162 callSlowPath(_llint_slow_path_pop_scope)
1166 _llint_op_push_name_scope:
1168 callSlowPath(_llint_slow_path_push_name_scope)
1174 callSlowPath(_llint_slow_path_throw)
1178 _llint_op_throw_static_error:
1180 callSlowPath(_llint_slow_path_throw_static_error)
1184 _llint_op_profile_will_call:
1186 loadp CodeBlock[cfr], t0
1187 loadp CodeBlock::m_vm[t0], t0
1188 loadi VM::m_enabledProfiler[t0], t0
1189 btpz t0, .opProfilerWillCallDone
1190 callSlowPath(_llint_slow_path_profile_will_call)
1191 .opProfilerWillCallDone:
1195 _llint_op_profile_did_call:
1197 loadp CodeBlock[cfr], t0
1198 loadp CodeBlock::m_vm[t0], t0
1199 loadi VM::m_enabledProfiler[t0], t0
1200 btpz t0, .opProfilerDidCallDone
1201 callSlowPath(_llint_slow_path_profile_did_call)
1202 .opProfilerDidCallDone:
1208 loadp CodeBlock[cfr], t0
1209 loadi CodeBlock::m_debuggerRequests[t0], t0
1210 btiz t0, .opDebugDone
1211 callSlowPath(_llint_slow_path_debug)
1216 _llint_native_call_trampoline:
1217 nativeCallTrampoline(NativeExecutable::m_function)
1220 _llint_native_construct_trampoline:
1221 nativeCallTrampoline(NativeExecutable::m_constructor)
1224 # Lastly, make sure that we can link even though we don't support all opcodes.
1225 # These opcodes should never arise when using LLInt or either JIT. We assert
1228 macro notSupported()
1232 # We should use whatever the smallest possible instruction is, just to
1233 # ensure that there is a gap between instruction labels. If multiple
1234 # smallest instructions exist, we should pick the one that is most
1235 # likely result in execution being halted. Currently that is the break
1236 # instruction on all architectures we're interested in. (Break is int3
1237 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
1242 _llint_op_init_global_const_nop: