1 # Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
24 # First come the common protocols that both interpreters use. Note that each
25 # of these must have an ASSERT() in LLIntData.cpp
27 # Work-around for the fact that the toolchain's awareness of armv7k / armv7s
28 # results in a separate slab in the fat binary, yet the offlineasm doesn't know
35 # These declarations must match interpreter/JSStack.h.
39 const CallFrameHeaderSlots = 5
42 const CallFrameHeaderSlots = 4
43 const CallFrameAlignSlots = 1
47 const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
48 const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
50 const StackAlignment = 16
51 const StackAlignmentMask = StackAlignment - 1
53 const CallerFrameAndPCSize = 2 * PtrSize
56 const ReturnPC = CallerFrame + PtrSize
57 const CodeBlock = ReturnPC + PtrSize
58 const Callee = CodeBlock + SlotSize
59 const ArgumentCount = Callee + SlotSize
60 const ThisArgumentOffset = ArgumentCount + SlotSize
61 const FirstArgumentOffset = ThisArgumentOffset + SlotSize
62 const CallFrameHeaderSize = ThisArgumentOffset
64 # Some value representation constants.
66 const TagBitTypeOther = 0x2
67 const TagBitBool = 0x4
68 const TagBitUndefined = 0x8
69 const ValueEmpty = 0x0
70 const ValueFalse = TagBitTypeOther | TagBitBool
71 const ValueTrue = TagBitTypeOther | TagBitBool | 1
72 const ValueUndefined = TagBitTypeOther | TagBitUndefined
73 const ValueNull = TagBitTypeOther
74 const TagTypeNumber = 0xffff000000000000
75 const TagMask = TagTypeNumber | TagBitTypeOther
80 const UndefinedTag = -4
82 const EmptyValueTag = -6
83 const DeletedValueTag = -7
84 const LowestTag = DeletedValueTag
87 const CallOpCodeSize = 9
89 if X86_64 or ARM64 or C_LOOP
90 const maxFrameExtentForSlowPathCall = 0
91 elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
92 const maxFrameExtentForSlowPathCall = 24
94 const maxFrameExtentForSlowPathCall = 40
96 const maxFrameExtentForSlowPathCall = 40
98 const maxFrameExtentForSlowPathCall = 64
102 const ClearWatchpoint = 0
104 const IsInvalidated = 2
106 # Some register conventions.
108 # - Use a pair of registers to represent the PC: one register for the
109 # base of the bytecodes, and one register for the index.
110 # - The PC base (or PB for short) should be stored in the csr. It will
111 # get clobbered on calls to other JS code, but will get saved on calls
113 # - C calls are still given the Instruction* rather than the PC index.
114 # This requires an add before the call, and a sub after.
117 const tagTypeNumber = csr1
120 macro loadisFromInstruction(offset, dest)
121 loadis offset * 8[PB, PC, 8], dest
124 macro loadpFromInstruction(offset, dest)
125 loadp offset * 8[PB, PC, 8], dest
128 macro storepToInstruction(value, offset)
129 storep value, offset * 8[PB, PC, 8]
134 macro loadisFromInstruction(offset, dest)
135 loadis offset * 4[PC], dest
138 macro loadpFromInstruction(offset, dest)
139 loadp offset * 4[PC], dest
143 # Constants for reasoning about value representation.
146 const PayloadOffset = 4
149 const PayloadOffset = 0
152 # Constant for reasoning about butterflies.
154 const IndexingShapeMask = 30
155 const NoIndexingShape = 0
156 const Int32Shape = 20
157 const DoubleShape = 22
158 const ContiguousShape = 26
159 const ArrayStorageShape = 28
160 const SlowPutArrayStorageShape = 30
164 const ObjectType = 18
165 const FinalObjectType = 19
167 # Type flags constants.
168 const MasqueradesAsUndefined = 1
169 const ImplementsHasInstance = 2
170 const ImplementsDefaultHasInstance = 8
172 # Bytecode operand constants.
173 const FirstConstantRegisterIndex = 0x40000000
175 # Code type constants.
178 const FunctionCode = 2
180 # The interpreter steals the tag word of the argument count.
181 const LLIntReturnPC = ArgumentCount + TagOffset
184 const HashFlags8BitBuffer = 8
186 # Copied from PropertyOffset.h
187 const firstOutOfLineOffset = 100
190 const GlobalProperty = 0
193 const LocalClosureVar = 3
194 const GlobalPropertyWithVarInjectionChecks = 4
195 const GlobalVarWithVarInjectionChecks = 5
196 const ClosureVarWithVarInjectionChecks = 6
199 const ResolveModeMask = 0xffff
201 const MarkedBlockSize = 16 * 1024
202 const MarkedBlockMask = ~(MarkedBlockSize - 1)
203 # Constants for checking mark bits.
204 const AtomNumberShift = 3
205 const BitMapWordShift = 4
207 # Allocation constants
209 const JSFinalObjectSizeClassIndex = 1
211 const JSFinalObjectSizeClassIndex = 3
214 # This must match wtf/Vector.h
215 const VectorBufferOffset = 0
217 const VectorSizeOffset = 12
219 const VectorSizeOffset = 8
222 # Some common utilities.
231 macro assert(assertion)
239 macro checkStackPointerAlignment(tempReg, location)
240 if ARM64 or C_LOOP or SH4
241 # ARM64 will check for us!
242 # C_LOOP does not need the alignment, and can use a little perf
243 # improvement from avoiding useless work.
244 # SH4 does not need specific alignment (4 bytes).
246 if ARM or ARMv7 or ARMv7_TRADITIONAL
247 # ARM can't do logical ops with the sp as a source
249 andp StackAlignmentMask, tempReg
251 andp sp, StackAlignmentMask, tempReg
253 btpz tempReg, .stackPointerOkay
254 move location, tempReg
261 const CalleeSaveRegisterCount = 0
262 elsif ARM or ARMv7_TRADITIONAL or ARMv7
263 const CalleeSaveRegisterCount = 7
265 const CalleeSaveRegisterCount = 10
266 elsif SH4 or X86_64 or MIPS
267 const CalleeSaveRegisterCount = 5
269 const CalleeSaveRegisterCount = 3
271 const CalleeSaveRegisterCount = 7
274 const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
276 # VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
277 # callee save registers rounded up to keep the stack aligned
278 const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
280 macro pushCalleeSaves()
282 elsif ARM or ARMv7_TRADITIONAL
285 emit "push {r4-r6, r8-r11}"
287 emit "stp x20, x19, [sp, #-16]!"
288 emit "stp x22, x21, [sp, #-16]!"
289 emit "stp x24, x23, [sp, #-16]!"
290 emit "stp x26, x25, [sp, #-16]!"
291 emit "stp x28, x27, [sp, #-16]!"
293 emit "addiu $sp, $sp, -20"
294 emit "sw $20, 16($sp)"
295 emit "sw $19, 12($sp)"
296 emit "sw $18, 8($sp)"
297 emit "sw $17, 4($sp)"
298 emit "sw $16, 0($sp)"
300 emit "mov.l r13, @-r15"
301 emit "mov.l r11, @-r15"
302 emit "mov.l r10, @-r15"
303 emit "mov.l r9, @-r15"
304 emit "mov.l r8, @-r15"
330 macro popCalleeSaves()
332 elsif ARM or ARMv7_TRADITIONAL
335 emit "pop {r4-r6, r8-r11}"
337 emit "ldp x28, x27, [sp], #16"
338 emit "ldp x26, x25, [sp], #16"
339 emit "ldp x24, x23, [sp], #16"
340 emit "ldp x22, x21, [sp], #16"
341 emit "ldp x20, x19, [sp], #16"
343 emit "lw $16, 0($sp)"
344 emit "lw $17, 4($sp)"
345 emit "lw $18, 8($sp)"
346 emit "lw $19, 12($sp)"
347 emit "lw $20, 16($sp)"
348 emit "addiu $sp, $sp, 20"
350 emit "mov.l @r15+, r8"
351 emit "mov.l @r15+, r9"
352 emit "mov.l @r15+, r10"
353 emit "mov.l @r15+, r11"
354 emit "mov.l @r15+, r13"
380 macro preserveCallerPCAndCFR()
381 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
384 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
394 macro restoreCallerPCAndCFR()
396 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
399 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
406 macro preserveReturnAddressAfterCall(destinationRegister)
407 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
408 # In C_LOOP case, we're only preserving the bytecode vPC.
409 move lr, destinationRegister
410 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
411 pop destinationRegister
417 macro restoreReturnAddressBeforeReturn(sourceRegister)
418 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
419 # In C_LOOP case, we're only restoring the bytecode vPC.
420 move sourceRegister, lr
421 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
428 macro functionPrologue()
429 if X86 or X86_WIN or X86_64 or X86_64_WIN
433 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
440 macro functionEpilogue()
441 if X86 or X86_WIN or X86_64 or X86_64_WIN
445 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
451 macro vmEntryRecord(entryFramePointer, resultReg)
452 subp entryFramePointer, VMEntryTotalFrameSize, resultReg
455 macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
456 loadi CodeBlock::m_numCalleeRegisters[codeBlock], size
458 addp maxFrameExtentForSlowPathCall, size
461 macro restoreStackPointerAfterCall()
462 loadp CodeBlock[cfr], t2
463 getFrameRegisterSizeForCodeBlock(t2, t4)
472 macro traceExecution()
474 callSlowPath(_llint_trace)
478 macro callTargetFunction(callLinkInfo, calleeFramePtr)
479 move calleeFramePtr, sp
481 cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
483 call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
485 restoreStackPointerAfterCall()
489 macro slowPathForCall(slowPath)
493 btpz t1, .dontUpdateSP
495 addp CallerFrameAndPCSize, t1, t1
498 addp CallerFrameAndPCSize, t1, sp
502 cloopCallJSFunction callee
506 restoreStackPointerAfterCall()
511 macro arrayProfile(cellAndIndexingType, profile, scratch)
512 const cell = cellAndIndexingType
513 const indexingType = cellAndIndexingType
514 loadi JSCell::m_structureID[cell], scratch
515 storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
516 loadb JSCell::m_indexingType[cell], indexingType
519 macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation)
520 loadb JSCell::m_gcData[cell], scratch1
521 continuation(scratch1)
524 macro notifyWrite(set, slow)
525 bbneq WatchpointSet::m_state[set], IsInvalidated, slow
528 macro checkSwitchToJIT(increment, action)
529 loadp CodeBlock[cfr], t0
530 baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
535 macro checkSwitchToJITForEpilogue()
539 callSlowPath(_llint_replace)
543 macro assertNotConstant(index)
544 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
547 macro functionForCallCodeBlockGetter(targetRegister)
549 loadp Callee[cfr], targetRegister
551 loadp Callee + PayloadOffset[cfr], targetRegister
553 loadp JSFunction::m_executable[targetRegister], targetRegister
554 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
557 macro functionForConstructCodeBlockGetter(targetRegister)
559 loadp Callee[cfr], targetRegister
561 loadp Callee + PayloadOffset[cfr], targetRegister
563 loadp JSFunction::m_executable[targetRegister], targetRegister
564 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
567 macro notFunctionCodeBlockGetter(targetRegister)
568 loadp CodeBlock[cfr], targetRegister
571 macro functionCodeBlockSetter(sourceRegister)
572 storep sourceRegister, CodeBlock[cfr]
575 macro notFunctionCodeBlockSetter(sourceRegister)
579 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
580 # in t1. May also trigger prologue entry OSR.
581 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
582 # Set up the call frame and check if we should OSR.
583 preserveCallerPCAndCFR()
586 subp maxFrameExtentForSlowPathCall, sp
587 callSlowPath(traceSlowPath)
588 addp maxFrameExtentForSlowPathCall, sp
592 baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
594 cCall2(osrSlowPath, cfr, PC)
596 # We are after the function prologue, but before we have set up sp from the CodeBlock.
597 # Temporarily align stack pointer for this call.
599 cCall2(osrSlowPath, cfr, PC)
603 move cfr, sp # restore the previous sp
604 # pop the callerFrame since we will jump to a function that wants to save it
607 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
623 loadp CodeBlock::m_instructions[t1], PB
626 loadp CodeBlock::m_instructions[t1], PC
629 # Get new sp in t0 and check stack height.
630 getFrameRegisterSizeForCodeBlock(t1, t0)
632 loadp CodeBlock::m_vm[t1], t2
633 bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
635 # Stack height check failed - need to call a slow_path.
636 subp maxFrameExtentForSlowPathCall, sp # Set up temporary stack pointer for call
637 callSlowPath(_llint_stack_check)
638 bpeq t1, 0, .stackHeightOKGetCodeBlock
640 dispatch(0) # Go to exception handler in PC
642 .stackHeightOKGetCodeBlock:
643 # Stack check slow path returned that the stack was ok.
644 # Since they were clobbered, need to get CodeBlock and new sp
646 getFrameRegisterSizeForCodeBlock(t1, t0)
653 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
654 # Must call dispatch(0) after calling this.
655 macro functionInitialization(profileArgSkip)
656 # Profile the arguments. Unfortunately, we have no choice but to do this. This
657 # code is pretty horrendous because of the difference in ordering between
658 # arguments and value profiles, the desire to have a simple loop-down-to-zero
659 # loop, and the desire to use only three registers so as to preserve the PC and
660 # the code block. It is likely that this code should be rewritten in a more
661 # optimal way for architectures that have more than five registers available
662 # for arbitrary use in the interpreter.
663 loadi CodeBlock::m_numParameters[t1], t0
664 addp -profileArgSkip, t0 # Use addi because that's what has the peephole
665 assert(macro (ok) bpgteq t0, 0, ok end)
666 btpz t0, .argumentProfileDone
667 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
668 mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
671 .argumentProfileLoop:
673 loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
674 subp sizeof ValueProfile, t3
675 storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
677 loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
678 subp sizeof ValueProfile, t3
679 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
680 loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
681 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
683 baddpnz -8, t0, .argumentProfileLoop
684 .argumentProfileDone:
687 macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
688 const offsetOfFirstFreeCell =
689 MarkedAllocator::m_freeList +
690 MarkedBlock::FreeList::head
692 # Get the object from the free list.
693 loadp offsetOfFirstFreeCell[allocator], result
694 btpz result, slowCase
696 # Remove the object from the free list.
697 loadp [result], scratch1
698 storep scratch1, offsetOfFirstFreeCell[allocator]
700 # Initialize the object.
701 storep 0, JSObject::m_butterfly[result]
702 storeStructureWithTypeInfo(result, structure, scratch1)
706 restoreCallerPCAndCFR()
710 # stub to call into JavaScript or Native functions
711 # EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
712 # EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
715 _llint_vm_entry_to_javascript:
717 global _vmEntryToJavaScript
718 _vmEntryToJavaScript:
720 doVMEntry(makeJavaScriptCall)
724 _llint_vm_entry_to_native:
726 global _vmEntryToNative
729 doVMEntry(makeHostFunctionCall)
733 # void sanitizeStackForVMImpl(VM* vm)
734 global _sanitizeStackForVMImpl
735 _sanitizeStackForVMImpl:
758 loadp VM::m_lastStackTop[vm], address
759 bpbeq sp, address, .zeroFillDone
763 storep zeroValue, [address]
764 addp PtrSize, address
765 bpa sp, address, .zeroFillLoop
769 storep address, VM::m_lastStackTop[vm]
772 # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
773 global _vmEntryRecord
776 const entryFrame = t4
778 elsif X86 or X86_WIN or X86_64_WIN
779 const entryFrame = t2
782 const entryFrame = a0
787 loadp 4[sp], entryFrame
790 vmEntryRecord(entryFrame, result)
795 # Dummy entry point the C Loop uses to initialize.
799 macro initPCRelative(pcBase)
800 if X86_64 or X86_64_WIN
813 subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
814 elsif ARM or ARMv7_TRADITIONAL
819 la _relativePCBase, pcBase
822 mova _relativePCBase, t0
829 macro setEntryAddress(index, label)
831 leap (label - _relativePCBase)[t1], t0
833 storep t0, [t4, t2, 8]
835 leap (label - _relativePCBase)[t1], t0
837 storep t0, [t2, t4, 8]
839 leap (label - _relativePCBase)[t1], t0
841 storep t0, [t4, t2, 4]
845 storep t1, [a0, t2, 8]
846 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
847 mvlbl (label - _relativePCBase), t2
850 storep t2, [a0, t3, 4]
852 move (label - _relativePCBase), t2
855 storep t2, [a0, t3, 4]
856 flushcp # Force constant pool flush to avoid "pcrel too far" link error.
859 la _relativePCBase, t3
863 storep t2, [a0, t3, 4]
868 # Entry point for the llint to initialize.
874 # Include generated bytecode initialization file.
875 include InitBytecodes
882 _llint_program_prologue:
883 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
887 _llint_eval_prologue:
888 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
892 _llint_function_for_call_prologue:
893 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
894 functionInitialization(0)
898 _llint_function_for_construct_prologue:
899 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
900 functionInitialization(1)
904 _llint_function_for_call_arity_check:
905 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
906 functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
907 .functionForCallBegin:
908 functionInitialization(0)
912 _llint_function_for_construct_arity_check:
913 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
914 functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
915 .functionForConstructBegin:
916 functionInitialization(1)
920 # Value-representation-specific code.
922 include LowLevelInterpreter64
924 include LowLevelInterpreter32_64
928 # Value-representation-agnostic code.
929 _llint_op_create_direct_arguments:
931 callSlowPath(_slow_path_create_direct_arguments)
935 _llint_op_create_scoped_arguments:
937 callSlowPath(_slow_path_create_scoped_arguments)
941 _llint_op_create_out_of_band_arguments:
943 callSlowPath(_slow_path_create_out_of_band_arguments)
949 callSlowPath(_llint_slow_path_new_func)
955 callSlowPath(_llint_slow_path_new_array)
959 _llint_op_new_array_with_size:
961 callSlowPath(_llint_slow_path_new_array_with_size)
965 _llint_op_new_array_buffer:
967 callSlowPath(_llint_slow_path_new_array_buffer)
971 _llint_op_new_regexp:
973 callSlowPath(_llint_slow_path_new_regexp)
979 callSlowPath(_slow_path_less)
985 callSlowPath(_slow_path_lesseq)
991 callSlowPath(_slow_path_greater)
997 callSlowPath(_slow_path_greatereq)
1003 callSlowPath(_slow_path_mod)
1009 callSlowPath(_slow_path_typeof)
1013 _llint_op_is_object_or_null:
1015 callSlowPath(_slow_path_is_object_or_null)
1018 _llint_op_is_function:
1020 callSlowPath(_slow_path_is_function)
1026 callSlowPath(_slow_path_in)
1029 macro withInlineStorage(object, propertyStorage, continuation)
1030 # Indicate that the object is the property storage, and that the
1031 # property storage register is unused.
1032 continuation(object, propertyStorage)
1035 macro withOutOfLineStorage(object, propertyStorage, continuation)
1036 loadp JSObject::m_butterfly[object], propertyStorage
1037 # Indicate that the propertyStorage register now points to the
1038 # property storage, and that the object register may be reused
1039 # if the object pointer is not needed anymore.
1040 continuation(propertyStorage, object)
1044 _llint_op_del_by_id:
1046 callSlowPath(_llint_slow_path_del_by_id)
1050 _llint_op_del_by_val:
1052 callSlowPath(_llint_slow_path_del_by_val)
1056 _llint_op_put_by_index:
1058 callSlowPath(_llint_slow_path_put_by_index)
1062 _llint_op_put_getter_by_id:
1064 callSlowPath(_llint_slow_path_put_getter_by_id)
1068 _llint_op_put_setter_by_id:
1070 callSlowPath(_llint_slow_path_put_setter_by_id)
1074 _llint_op_put_getter_setter:
1076 callSlowPath(_llint_slow_path_put_getter_setter)
1083 macro (value, target) btinz value, target end,
1084 _llint_slow_path_jtrue)
1090 macro (value, target) btiz value, target end,
1091 _llint_slow_path_jfalse)
1097 macro (left, right, target) bilt left, right, target end,
1098 macro (left, right, target) bdlt left, right, target end,
1099 _llint_slow_path_jless)
1105 macro (left, right, target) bigteq left, right, target end,
1106 macro (left, right, target) bdgtequn left, right, target end,
1107 _llint_slow_path_jnless)
1113 macro (left, right, target) bigt left, right, target end,
1114 macro (left, right, target) bdgt left, right, target end,
1115 _llint_slow_path_jgreater)
1118 _llint_op_jngreater:
1121 macro (left, right, target) bilteq left, right, target end,
1122 macro (left, right, target) bdltequn left, right, target end,
1123 _llint_slow_path_jngreater)
1129 macro (left, right, target) bilteq left, right, target end,
1130 macro (left, right, target) bdlteq left, right, target end,
1131 _llint_slow_path_jlesseq)
1137 macro (left, right, target) bigt left, right, target end,
1138 macro (left, right, target) bdgtun left, right, target end,
1139 _llint_slow_path_jnlesseq)
1142 _llint_op_jgreatereq:
1145 macro (left, right, target) bigteq left, right, target end,
1146 macro (left, right, target) bdgteq left, right, target end,
1147 _llint_slow_path_jgreatereq)
1150 _llint_op_jngreatereq:
1153 macro (left, right, target) bilt left, right, target end,
1154 macro (left, right, target) bdltun left, right, target end,
1155 _llint_slow_path_jngreatereq)
1158 _llint_op_loop_hint:
1160 loadp CodeBlock[cfr], t1
1161 loadp CodeBlock::m_vm[t1], t1
1162 loadp VM::watchdog[t1], t0
1163 btpnz t0, .handleWatchdogTimer
1164 .afterWatchdogTimerCheck:
1165 checkSwitchToJITForLoop()
1167 .handleWatchdogTimer:
1168 loadb Watchdog::m_timerDidFire[t0], t0
1169 btbz t0, .afterWatchdogTimerCheck
1170 callWatchdogTimerHandler(.throwHandler)
1171 jmp .afterWatchdogTimerCheck
1173 jmp _llint_throw_from_slow_path_trampoline
1175 _llint_op_switch_string:
1177 callSlowPath(_llint_slow_path_switch_string)
1181 _llint_op_new_func_exp:
1183 callSlowPath(_llint_slow_path_new_func_exp)
1189 arrayProfileForCall()
1190 doCall(_llint_slow_path_call)
1193 _llint_op_construct:
1195 doCall(_llint_slow_path_construct)
1198 _llint_op_call_varargs:
1200 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1201 branchIfException(_llint_throw_from_slow_path_trampoline)
1206 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1208 subp t1, CallerFrameAndPCSize, t2
1211 subp t1, CallerFrameAndPCSize, sp
1214 slowPathForCall(_llint_slow_path_call_varargs)
1216 _llint_op_construct_varargs:
1218 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1219 branchIfException(_llint_throw_from_slow_path_trampoline)
1224 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1226 subp t1, CallerFrameAndPCSize, t2
1229 subp t1, CallerFrameAndPCSize, sp
1232 slowPathForCall(_llint_slow_path_construct_varargs)
1235 _llint_op_call_eval:
1238 # Eval is executed in one of two modes:
1240 # 1) We find that we're really invoking eval() in which case the
1241 # execution is perfomed entirely inside the slow_path, and it
1242 # returns the PC of a function that just returns the return value
1243 # that the eval returned.
1245 # 2) We find that we're invoking something called eval() that is not
1246 # the real eval. Then the slow_path returns the PC of the thing to
1247 # call, and we call it.
1249 # This allows us to handle two cases, which would require a total of
1250 # up to four pieces of state that cannot be easily packed into two
1251 # registers (C functions can return up to two registers, easily):
1253 # - The call frame register. This may or may not have been modified
1254 # by the slow_path, but the convention is that it returns it. It's not
1255 # totally clear if that's necessary, since the cfr is callee save.
1256 # But that's our style in this here interpreter so we stick with it.
1258 # - A bit to say if the slow_path successfully executed the eval and has
1259 # the return value, or did not execute the eval but has a PC for us
1263 # - The JS return value (two registers), or
1267 # It turns out to be easier to just always have this return the cfr
1268 # and a PC to call, and that PC may be a dummy thunk that just
1269 # returns the JS value that the eval returned.
1271 slowPathForCall(_llint_slow_path_call_eval)
1274 _llint_generic_return_point:
1280 callSlowPath(_slow_path_strcat)
1284 _llint_op_push_with_scope:
1286 callSlowPath(_llint_slow_path_push_with_scope)
1290 _llint_op_pop_scope:
1292 callSlowPath(_llint_slow_path_pop_scope)
1296 _llint_op_push_name_scope:
1298 callSlowPath(_llint_slow_path_push_name_scope)
1304 callSlowPath(_llint_slow_path_throw)
1308 _llint_op_throw_static_error:
1310 callSlowPath(_llint_slow_path_throw_static_error)
1314 _llint_op_profile_will_call:
1316 loadp CodeBlock[cfr], t0
1317 loadp CodeBlock::m_vm[t0], t0
1318 loadi VM::m_enabledProfiler[t0], t0
1319 btpz t0, .opProfilerWillCallDone
1320 callSlowPath(_llint_slow_path_profile_will_call)
1321 .opProfilerWillCallDone:
1325 _llint_op_profile_did_call:
1327 loadp CodeBlock[cfr], t0
1328 loadp CodeBlock::m_vm[t0], t0
1329 loadi VM::m_enabledProfiler[t0], t0
1330 btpz t0, .opProfilerDidCallDone
1331 callSlowPath(_llint_slow_path_profile_did_call)
1332 .opProfilerDidCallDone:
1338 loadp CodeBlock[cfr], t0
1339 loadi CodeBlock::m_debuggerRequests[t0], t0
1340 btiz t0, .opDebugDone
1341 callSlowPath(_llint_slow_path_debug)
1346 _llint_native_call_trampoline:
1347 nativeCallTrampoline(NativeExecutable::m_function)
1350 _llint_native_construct_trampoline:
1351 nativeCallTrampoline(NativeExecutable::m_constructor)
1353 _llint_op_get_enumerable_length:
1355 callSlowPath(_slow_path_get_enumerable_length)
1358 _llint_op_has_indexed_property:
1360 callSlowPath(_slow_path_has_indexed_property)
1363 _llint_op_has_structure_property:
1365 callSlowPath(_slow_path_has_structure_property)
1368 _llint_op_has_generic_property:
1370 callSlowPath(_slow_path_has_generic_property)
1373 _llint_op_get_direct_pname:
1375 callSlowPath(_slow_path_get_direct_pname)
1378 _llint_op_get_property_enumerator:
1380 callSlowPath(_slow_path_get_property_enumerator)
1383 _llint_op_enumerator_structure_pname:
1385 callSlowPath(_slow_path_next_structure_enumerator_pname)
1388 _llint_op_enumerator_generic_pname:
1390 callSlowPath(_slow_path_next_generic_enumerator_pname)
1393 _llint_op_to_index_string:
1395 callSlowPath(_slow_path_to_index_string)
1398 _llint_op_profile_control_flow:
1400 loadpFromInstruction(1, t0)
1401 storeb 1, BasicBlockLocation::m_hasExecuted[t0]
1404 # Lastly, make sure that we can link even though we don't support all opcodes.
1405 # These opcodes should never arise when using LLInt or either JIT. We assert
1408 macro notSupported()
1412 # We should use whatever the smallest possible instruction is, just to
1413 # ensure that there is a gap between instruction labels. If multiple
1414 # smallest instructions exist, we should pick the one that is most
1415 # likely result in execution being halted. Currently that is the break
1416 # instruction on all architectures we're interested in. (Break is int3
1417 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
1422 _llint_op_init_global_const_nop: