]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - llint/LowLevelInterpreter.asm
JavaScriptCore-7600.1.4.13.1.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.asm
index 0a5f20102dc72dab4739cdf26420543fa8ad8fdd..45a604cc2a64ebbe71b033f4217e83fefa4474c6 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 # THE POSSIBILITY OF SUCH DAMAGE.
 
+# First come the common protocols that both interpreters use. Note that each
+# of these must have an ASSERT() in LLIntData.cpp
+
 # Work-around for the fact that the toolchain's awareness of armv7s results in
 # a separate slab in the fat binary, yet the offlineasm doesn't know to expect
 # it.
 if ARMv7s
 end
 
-# First come the common protocols that both interpreters use. Note that each
-# of these must have an ASSERT() in LLIntData.cpp
+# These declarations must match interpreter/JSStack.h.
+
+if JSVALUE64
+const PtrSize = 8
+const CallFrameHeaderSlots = 6
+else
+const PtrSize = 4
+const CallFrameHeaderSlots = 5
+const CallFrameAlignSlots = 1
+end
+const SlotSize = 8
+
+const CallerFrameAndPCSize = 2 * PtrSize
+
+const CallerFrame = 0
+const ReturnPC = CallerFrame + PtrSize
+const CodeBlock = ReturnPC + PtrSize
+const ScopeChain = CodeBlock + SlotSize
+const Callee = ScopeChain + SlotSize
+const ArgumentCount = Callee + SlotSize
+const ThisArgumentOffset = ArgumentCount + SlotSize
+const CallFrameHeaderSize = ThisArgumentOffset
+
+# Some value representation constants.
+if JSVALUE64
+const TagBitTypeOther = 0x2
+const TagBitBool      = 0x4
+const TagBitUndefined = 0x8
+const ValueEmpty      = 0x0
+const ValueFalse      = TagBitTypeOther | TagBitBool
+const ValueTrue       = TagBitTypeOther | TagBitBool | 1
+const ValueUndefined  = TagBitTypeOther | TagBitUndefined
+const ValueNull       = TagBitTypeOther
+else
+const Int32Tag = -1
+const BooleanTag = -2
+const NullTag = -3
+const UndefinedTag = -4
+const CellTag = -5
+const EmptyValueTag = -6
+const DeletedValueTag = -7
+const LowestTag = DeletedValueTag
+end
 
-# These declarations must match interpreter/RegisterFile.h.
-const CallFrameHeaderSize = 48
-const ArgumentCount = -48
-const CallerFrame = -40
-const Callee = -32
-const ScopeChain = -24
-const ReturnPC = -16
-const CodeBlock = -8
+const CallOpCodeSize = 9
+
+if X86_64 or ARM64 or C_LOOP
+const maxFrameExtentForSlowPathCall = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
+const maxFrameExtentForSlowPathCall = 24
+elsif X86 or X86_WIN
+const maxFrameExtentForSlowPathCall = 40
+elsif MIPS
+const maxFrameExtentForSlowPathCall = 40
+elsif X86_64_WIN
+const maxFrameExtentForSlowPathCall = 64
+end
 
-const ThisArgumentOffset = -CallFrameHeaderSize - 8
+# Watchpoint states
+const ClearWatchpoint = 0
+const IsWatched = 1
+const IsInvalidated = 2
 
 # Some register conventions.
 if JSVALUE64
     # - Use a pair of registers to represent the PC: one register for the
-    #   base of the register file, and one register for the index.
+    #   base of the bytecodes, and one register for the index.
     # - The PC base (or PB for short) should be stored in the csr. It will
     #   get clobbered on calls to other JS code, but will get saved on calls
     #   to C functions.
     # - C calls are still given the Instruction* rather than the PC index.
     #   This requires an add before the call, and a sub after.
-    const PC = t4
+    const PC = t5
     const PB = t6
     const tagTypeNumber = csr1
     const tagMask = csr2
+    
+    macro loadisFromInstruction(offset, dest)
+        loadis offset * 8[PB, PC, 8], dest
+    end
+    
+    macro loadpFromInstruction(offset, dest)
+        loadp offset * 8[PB, PC, 8], dest
+    end
+    
+    macro storepToInstruction(value, offset)
+        storep value, offset * 8[PB, PC, 8]
+    end
+
 else
-    const PC = t4
+    const PC = t5
+    macro loadisFromInstruction(offset, dest)
+        loadis offset * 4[PC], dest
+    end
+    
+    macro loadpFromInstruction(offset, dest)
+        loadp offset * 4[PC], dest
+    end
 end
 
 # Constants for reasoning about value representation.
@@ -67,9 +139,20 @@ else
     const PayloadOffset = 0
 end
 
+# Constant for reasoning about butterflies.
+const IsArray                  = 1
+const IndexingShapeMask        = 30
+const NoIndexingShape          = 0
+const Int32Shape               = 20
+const DoubleShape              = 22
+const ContiguousShape          = 26
+const ArrayStorageShape        = 28
+const SlowPutArrayStorageShape = 30
+
 # Type constants.
 const StringType = 5
-const ObjectType = 13
+const ObjectType = 18
+const FinalObjectType = 19
 
 # Type flags constants.
 const MasqueradesAsUndefined = 1
@@ -88,7 +171,27 @@ const FunctionCode = 2
 const LLIntReturnPC = ArgumentCount + TagOffset
 
 # String flags.
-const HashFlags8BitBuffer = 64
+const HashFlags8BitBuffer = 32
+
+# Copied from PropertyOffset.h
+const firstOutOfLineOffset = 100
+
+# ResolveType
+const GlobalProperty = 0
+const GlobalVar = 1
+const ClosureVar = 2
+const GlobalPropertyWithVarInjectionChecks = 3
+const GlobalVarWithVarInjectionChecks = 4
+const ClosureVarWithVarInjectionChecks = 5
+const Dynamic = 6
+
+const ResolveModeMask = 0xffff
+
+const MarkedBlockSize = 64 * 1024
+const MarkedBlockMask = ~(MarkedBlockSize - 1)
+# Constants for checking mark bits.
+const AtomNumberShift = 3
+const BitMapWordShift = 4
 
 # Allocation constants
 if JSVALUE64
@@ -98,20 +201,20 @@ else
 end
 
 # This must match wtf/Vector.h
+const VectorBufferOffset = 0
 if JSVALUE64
-    const VectorSizeOffset = 0
-    const VectorBufferOffset = 8
+    const VectorSizeOffset = 12
 else
-    const VectorSizeOffset = 0
-    const VectorBufferOffset = 4
+    const VectorSizeOffset = 8
 end
 
-
 # Some common utilities.
 macro crash()
-    storei 0, 0xbbadbeef[]
-    move 0, t0
-    call t0
+    if C_LOOP
+        cloopCrash
+    else
+        call _llint_crash
+    end
 end
 
 macro assert(assertion)
@@ -122,10 +225,58 @@ macro assert(assertion)
     end
 end
 
+macro checkStackPointerAlignment(tempReg, location)
+    if ARM64 or C_LOOP or SH4
+        # ARM64 will check for us!
+        # C_LOOP does not need the alignment, and can use a little perf
+        # improvement from avoiding useless work.
+        # SH4 does not need specific alignment (4 bytes).
+    else
+        if ARM or ARMv7 or ARMv7_TRADITIONAL
+            # ARM can't do logical ops with the sp as a source
+            move sp, tempReg
+            andp 0xf, tempReg
+        else
+            andp sp, 0xf, tempReg
+        end
+        btpz tempReg, .stackPointerOkay
+        move location, tempReg
+        break
+    .stackPointerOkay:
+    end
+end
+
+macro preserveCallerPCAndCFR()
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        push lr
+        push cfr
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+        push cfr
+    elsif ARM64
+        pushLRAndFP
+    else
+        error
+    end
+    move sp, cfr
+end
+
+macro restoreCallerPCAndCFR()
+    move cfr, sp
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+        pop cfr
+    elsif ARM64
+        popLRAndFP
+    end
+end
+
 macro preserveReturnAddressAfterCall(destinationRegister)
-    if ARMv7
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
+        # In C_LOOP case, we're only preserving the bytecode vPC.
         move lr, destinationRegister
-    elsif X86 or X86_64
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
         pop destinationRegister
     else
         error
@@ -133,38 +284,182 @@ macro preserveReturnAddressAfterCall(destinationRegister)
 end
 
 macro restoreReturnAddressBeforeReturn(sourceRegister)
-    if ARMv7
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
+        # In C_LOOP case, we're only restoring the bytecode vPC.
         move sourceRegister, lr
-    elsif X86 or X86_64
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
         push sourceRegister
     else
         error
     end
 end
 
+macro functionPrologue()
+    if X86 or X86_WIN or X86_64 or X86_64_WIN
+        push cfr
+    elsif ARM64
+        pushLRAndFP
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        push lr
+        push cfr
+    end
+    move sp, cfr
+end
+
+macro functionEpilogue()
+    if X86 or X86_WIN or X86_64 or X86_64_WIN
+        pop cfr
+    elsif ARM64
+        popLRAndFP
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    end
+end
+
+macro callToJavaScriptPrologue()
+    if X86_64 or X86_64_WIN
+        push cfr
+        push t0
+    elsif X86 or X86_WIN
+        push cfr
+    elsif ARM64
+        pushLRAndFP
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        push lr
+        push cfr
+    end
+    pushCalleeSaves
+    if X86
+        subp 12, sp
+    elsif X86_WIN
+        subp 16, sp
+        move sp, t4
+        move t4, t0
+        move t4, t2
+        andp 0xf, t2
+        andp 0xfffffff0, t0
+        move t0, sp
+        storep t4, [sp]
+    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+        subp 4, sp
+        move sp, t4
+        clrbp t4, 0xf, t5
+        move t5, sp
+        storep t4, [sp]
+    end
+end
+
+macro callToJavaScriptEpilogue()
+    if ARMv7
+        addp CallFrameHeaderSlots * 8, cfr, t4
+        move t4, sp
+    else
+        addp CallFrameHeaderSlots * 8, cfr, sp
+    end
+
+    loadp CallerFrame[cfr], cfr
+
+    if X86
+        addp 12, sp
+    elsif X86_WIN
+        pop t4
+        move t4, sp
+        addp 16, sp
+    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+        pop t4
+        move t4, sp
+        addp 4, sp
+    end
+
+    popCalleeSaves
+    if X86_64 or X86_64_WIN
+        pop t2
+        pop cfr
+    elsif X86 or X86_WIN
+        pop cfr
+    elsif ARM64
+        popLRAndFP
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    end
+end
+
+macro moveStackPointerForCodeBlock(codeBlock, scratch)
+    loadi CodeBlock::m_numCalleeRegisters[codeBlock], scratch
+    lshiftp 3, scratch
+    addp maxFrameExtentForSlowPathCall, scratch
+    if ARMv7
+        subp cfr, scratch, scratch
+        move scratch, sp
+    else
+        subp cfr, scratch, sp
+    end
+end
+
+macro restoreStackPointerAfterCall()
+    loadp CodeBlock[cfr], t2
+    moveStackPointerForCodeBlock(t2, t4)
+end
+
 macro traceExecution()
     if EXECUTION_TRACING
         callSlowPath(_llint_trace)
     end
 end
 
-macro slowPathForCall(advance, slowPath)
+macro callTargetFunction(callLinkInfo, calleeFramePtr)
+    move calleeFramePtr, sp
+    if C_LOOP
+        cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+    else
+        call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+    end
+    restoreStackPointerAfterCall()
+    dispatchAfterCall()
+end
+
+macro slowPathForCall(slowPath)
     callCallSlowPath(
-        advance,
         slowPath,
         macro (callee)
-            call callee
+            btpz t1, .dontUpdateSP
+            if ARMv7
+                addp CallerFrameAndPCSize, t1, t1
+                move t1, sp
+            else
+                addp CallerFrameAndPCSize, t1, sp
+            end
+        .dontUpdateSP:
+            if C_LOOP
+                cloopCallJSFunction callee
+            else
+                call callee
+            end
+            restoreStackPointerAfterCall()
             dispatchAfterCall()
         end)
 end
 
+macro arrayProfile(cellAndIndexingType, profile, scratch)
+    const cell = cellAndIndexingType
+    const indexingType = cellAndIndexingType 
+    loadi JSCell::m_structureID[cell], scratch
+    storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
+    loadb JSCell::m_indexingType[cell], indexingType
+end
+
+macro checkMarkByte(cell, scratch1, scratch2, continuation)
+    loadb JSCell::m_gcData[cell], scratch1
+    continuation(scratch1)
+end
+
 macro checkSwitchToJIT(increment, action)
-    if JIT_ENABLED
-        loadp CodeBlock[cfr], t0
-        baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
-        action()
+    loadp CodeBlock[cfr], t0
+    baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
+    action()
     .continue:
-    end
 end
 
 macro checkSwitchToJITForEpilogue()
@@ -206,28 +501,48 @@ end
 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
 # in t1. May also trigger prologue entry OSR.
 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
-    preserveReturnAddressAfterCall(t2)
-    
     # Set up the call frame and check if we should OSR.
-    storep t2, ReturnPC[cfr]
+    preserveCallerPCAndCFR()
+
     if EXECUTION_TRACING
+        subp maxFrameExtentForSlowPathCall, sp
         callSlowPath(traceSlowPath)
+        addp maxFrameExtentForSlowPathCall, sp
     end
     codeBlockGetter(t1)
-    if JIT_ENABLED
-        baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
+if C_LOOP
+else
+    baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
+    if JSVALUE64
         cCall2(osrSlowPath, cfr, PC)
-        move t1, cfr
-        btpz t0, .recover
-        loadp ReturnPC[cfr], t2
-        restoreReturnAddressBeforeReturn(t2)
-        jmp t0
-    .recover:
-        codeBlockGetter(t1)
-    .continue:
+    else
+        # We are after the function prologue, but before we have set up sp from the CodeBlock.
+        # Temporarily align stack pointer for this call.
+        subp 8, sp
+        cCall2(osrSlowPath, cfr, PC)
+        addp 8, sp
+    end
+    btpz t0, .recover
+    move cfr, sp # restore the previous sp
+    # pop the callerFrame since we will jump to a function that wants to save it
+    if ARM64
+        popLRAndFP
+    elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    else
+        pop cfr
     end
+    jmp t0
+.recover:
+    codeBlockGetter(t1)
+.continue:
+end
+
     codeBlockSetter(t1)
     
+    moveStackPointerForCodeBlock(t1, t2)
+
     # Set up the PC.
     if JSVALUE64
         loadp CodeBlock::m_instructions[t1], PB
@@ -240,101 +555,224 @@ end
 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
 # Must call dispatch(0) after calling this.
 macro functionInitialization(profileArgSkip)
-    if VALUE_PROFILER
-        # Profile the arguments. Unfortunately, we have no choice but to do this. This
-        # code is pretty horrendous because of the difference in ordering between
-        # arguments and value profiles, the desire to have a simple loop-down-to-zero
-        # loop, and the desire to use only three registers so as to preserve the PC and
-        # the code block. It is likely that this code should be rewritten in a more
-        # optimal way for architectures that have more than five registers available
-        # for arbitrary use in the interpreter.
-        loadi CodeBlock::m_numParameters[t1], t0
-        addp -profileArgSkip, t0 # Use addi because that's what has the peephole
-        assert(macro (ok) bpgteq t0, 0, ok end)
-        btpz t0, .argumentProfileDone
-        loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
-        mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
-        negp t0
-        lshiftp 3, t0
-        addp t2, t3
-    .argumentProfileLoop:
-        if JSVALUE64
-            loadp ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storep t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
-        else
-            loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
-            loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
-        end
-        baddpnz 8, t0, .argumentProfileLoop
-    .argumentProfileDone:
+    # Profile the arguments. Unfortunately, we have no choice but to do this. This
+    # code is pretty horrendous because of the difference in ordering between
+    # arguments and value profiles, the desire to have a simple loop-down-to-zero
+    # loop, and the desire to use only three registers so as to preserve the PC and
+    # the code block. It is likely that this code should be rewritten in a more
+    # optimal way for architectures that have more than five registers available
+    # for arbitrary use in the interpreter.
+    loadi CodeBlock::m_numParameters[t1], t0
+    addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+    assert(macro (ok) bpgteq t0, 0, ok end)
+    btpz t0, .argumentProfileDone
+    loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+    mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+    lshiftp 3, t0
+    addp t2, t3
+.argumentProfileLoop:
+    if JSVALUE64
+        loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
+    else
+        loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+        loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
     end
+    baddpnz -8, t0, .argumentProfileLoop
+.argumentProfileDone:
         
     # Check stack height.
     loadi CodeBlock::m_numCalleeRegisters[t1], t0
-    loadp CodeBlock::m_globalData[t1], t2
-    loadp JSGlobalData::interpreter[t2], t2   # FIXME: Can get to the RegisterFile from the JITStackFrame
-    lshifti 3, t0
-    addp t0, cfr, t0
-    bpaeq Interpreter::m_registerFile + RegisterFile::m_end[t2], t0, .stackHeightOK
+    loadp CodeBlock::m_vm[t1], t2
+    lshiftp 3, t0
+    addi maxFrameExtentForSlowPathCall, t0
+    subp cfr, t0, t0
+    bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
 
     # Stack height check failed - need to call a slow_path.
-    callSlowPath(_llint_register_file_check)
+    callSlowPath(_llint_stack_check)
+    bpeq t1, 0, .stackHeightOK
+    move t1, cfr
 .stackHeightOK:
 end
 
-macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase)
+macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
     if ALWAYS_ALLOCATE_SLOW
         jmp slowCase
     else
-        const offsetOfMySizeClass =
-            JSGlobalData::heap +
-            Heap::m_objectSpace +
-            MarkedSpace::m_normalSpace +
-            MarkedSpace::Subspace::preciseAllocators +
-            sizeClassIndex * sizeof MarkedAllocator
-        
         const offsetOfFirstFreeCell = 
             MarkedAllocator::m_freeList + 
             MarkedBlock::FreeList::head
 
-        # FIXME: we can get the global data in one load from the stack.
-        loadp CodeBlock[cfr], scratch1
-        loadp CodeBlock::m_globalData[scratch1], scratch1
-        
         # Get the object from the free list.   
-        loadp offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1], result
+        loadp offsetOfFirstFreeCell[allocator], result
         btpz result, slowCase
         
         # Remove the object from the free list.
-        loadp [result], scratch2
-        storep scratch2, offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1]
+        loadp [result], scratch1
+        storep scratch1, offsetOfFirstFreeCell[allocator]
     
         # Initialize the object.
-        loadp classInfoOffset[scratch1], scratch2
-        storep scratch2, [result]
-        storep structure, JSCell::m_structure[result]
-        storep 0, JSObject::m_inheritorID[result]
-        addp sizeof JSObject, result, scratch1
-        storep scratch1, JSObject::m_propertyStorage[result]
+        storep 0, JSObject::m_butterfly[result]
+        storeStructureWithTypeInfo(result, structure, scratch1)
     end
 end
 
 macro doReturn()
-    loadp ReturnPC[cfr], t2
-    loadp CallerFrame[cfr], cfr
-    restoreReturnAddressBeforeReturn(t2)
+    restoreCallerPCAndCFR()
+    ret
+end
+
+# stub to call into JavaScript or Native functions
+# EncodedJSValue callToJavaScript(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
+# EncodedJSValue callToNativeFunction(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
+
+if C_LOOP
+_llint_call_to_javascript:
+else
+global _callToJavaScript
+_callToJavaScript:
+end
+    doCallToJavaScript(makeJavaScriptCall)
+
+
+if C_LOOP
+_llint_call_to_native_function:
+else
+global _callToNativeFunction
+_callToNativeFunction:
+end
+    doCallToJavaScript(makeHostFunctionCall)
+
+
+if C_LOOP
+else
+# void sanitizeStackForVMImpl(VM* vm)
+global _sanitizeStackForVMImpl
+_sanitizeStackForVMImpl:
+    if X86_64
+        const vm = t4
+        const address = t1
+        const zeroValue = t0
+    elsif X86_64_WIN
+        const vm = t2
+        const address = t1
+        const zeroValue = t0
+    elsif X86 or X86_WIN
+        const vm = t2
+        const address = t1
+        const zeroValue = t0
+    else
+        const vm = a0
+        const address = t1
+        const zeroValue = t2
+    end
+
+    if X86 or X86_WIN
+        loadp 4[sp], vm
+    end
+
+    loadp VM::m_lastStackTop[vm], address
+    bpbeq sp, address, .zeroFillDone
+
+    move 0, zeroValue
+.zeroFillLoop:
+    storep zeroValue, [address]
+    addp PtrSize, address
+    bpa sp, address, .zeroFillLoop
+
+.zeroFillDone:
+    move sp, address
+    storep address, VM::m_lastStackTop[vm]
     ret
 end
 
 
-# Indicate the beginning of LLInt.
-_llint_begin:
+if C_LOOP
+# Dummy entry point the C Loop uses to initialize.
+_llint_entry:
     crash()
+else
+macro initPCRelative(pcBase)
+    if X86_64 or X86_64_WIN
+        call _relativePCBase
+    _relativePCBase:
+        pop pcBase
+    elsif X86 or X86_WIN
+        call _relativePCBase
+    _relativePCBase:
+        pop pcBase
+        loadp 20[sp], t4
+    elsif ARM64
+    elsif ARMv7
+    _relativePCBase:
+        move pc, pcBase
+        subp 3, pcBase   # Need to back up the PC and set the Thumb2 bit
+    elsif ARM or ARMv7_TRADITIONAL
+    _relativePCBase:
+        move pc, pcBase
+        subp 8, pcBase
+    elsif MIPS
+        crash()  # Need to replace with any initialization steps needed to step up PC relative address calculation
+    elsif SH4
+        mova _relativePCBase, t0
+        move t0, pcBase
+        alignformova
+    _relativePCBase:
+    end
+end
+
+macro setEntryAddress(index, label)
+    if X86_64
+        leap (label - _relativePCBase)[t1], t0
+        move index, t2
+        storep t0, [t4, t2, 8]
+    elsif X86_64_WIN
+        leap (label - _relativePCBase)[t1], t0
+        move index, t4
+        storep t0, [t2, t4, 8]
+    elsif X86 or X86_WIN
+        leap (label - _relativePCBase)[t1], t0
+        move index, t2
+        storep t0, [t4, t2, 4]
+    elsif ARM64
+        pcrtoaddr label, t1
+        move index, t2
+        storep t1, [a0, t2, 8]
+    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+        mvlbl (label - _relativePCBase), t2
+        addp t2, t1, t2
+        move index, t3
+        storep t2, [a0, t3, 4]
+    elsif SH4
+        move (label - _relativePCBase), t2
+        addp t2, t1, t2
+        move index, t3
+        storep t2, [a0, t3, 4]
+        flushcp # Force constant pool flush to avoid "pcrel too far" link error.
+    elsif MIPS
+        crash()  # Need to replace with code to turn label into and absolute address and save at index
+    end
+end
 
+global _llint_entry
+# Entry point for the llint to initialize.
+_llint_entry:
+    functionPrologue()
+    pushCalleeSaves
+    initPCRelative(t1)
+
+    # Include generated bytecode initialization file.
+    include InitBytecodes
+
+    popCalleeSaves
+    functionEpilogue()
+    ret
+end
 
 _llint_program_prologue:
     prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
@@ -348,26 +786,30 @@ _llint_eval_prologue:
 
 _llint_function_for_call_prologue:
     prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
-.functionForCallBegin:
     functionInitialization(0)
     dispatch(0)
     
 
 _llint_function_for_construct_prologue:
     prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
-.functionForConstructBegin:
     functionInitialization(1)
     dispatch(0)
     
 
 _llint_function_for_call_arity_check:
     prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
-    functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
+    functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
+.functionForCallBegin:
+    functionInitialization(0)
+    dispatch(0)
 
 
 _llint_function_for_construct_arity_check:
     prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
-    functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
+    functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
+.functionForConstructBegin:
+    functionInitialization(1)
+    dispatch(0)
 
 
 # Value-representation-specific code.
@@ -379,16 +821,28 @@ end
 
 
 # Value-representation-agnostic code.
+_llint_op_touch_entry:
+    traceExecution()
+    callSlowPath(_slow_path_touch_entry)
+    dispatch(1)
+
+
 _llint_op_new_array:
     traceExecution()
     callSlowPath(_llint_slow_path_new_array)
+    dispatch(5)
+
+
+_llint_op_new_array_with_size:
+    traceExecution()
+    callSlowPath(_llint_slow_path_new_array_with_size)
     dispatch(4)
 
 
 _llint_op_new_array_buffer:
     traceExecution()
     callSlowPath(_llint_slow_path_new_array_buffer)
-    dispatch(4)
+    dispatch(5)
 
 
 _llint_op_new_regexp:
@@ -399,92 +853,70 @@ _llint_op_new_regexp:
 
 _llint_op_less:
     traceExecution()
-    callSlowPath(_llint_slow_path_less)
+    callSlowPath(_slow_path_less)
     dispatch(4)
 
 
 _llint_op_lesseq:
     traceExecution()
-    callSlowPath(_llint_slow_path_lesseq)
+    callSlowPath(_slow_path_lesseq)
     dispatch(4)
 
 
 _llint_op_greater:
     traceExecution()
-    callSlowPath(_llint_slow_path_greater)
+    callSlowPath(_slow_path_greater)
     dispatch(4)
 
 
 _llint_op_greatereq:
     traceExecution()
-    callSlowPath(_llint_slow_path_greatereq)
+    callSlowPath(_slow_path_greatereq)
     dispatch(4)
 
 
 _llint_op_mod:
     traceExecution()
-    callSlowPath(_llint_slow_path_mod)
+    callSlowPath(_slow_path_mod)
     dispatch(4)
 
 
 _llint_op_typeof:
     traceExecution()
-    callSlowPath(_llint_slow_path_typeof)
+    callSlowPath(_slow_path_typeof)
     dispatch(3)
 
 
 _llint_op_is_object:
     traceExecution()
-    callSlowPath(_llint_slow_path_is_object)
+    callSlowPath(_slow_path_is_object)
     dispatch(3)
 
 
 _llint_op_is_function:
     traceExecution()
-    callSlowPath(_llint_slow_path_is_function)
+    callSlowPath(_slow_path_is_function)
     dispatch(3)
 
 
 _llint_op_in:
     traceExecution()
-    callSlowPath(_llint_slow_path_in)
-    dispatch(4)
-
-
-_llint_op_resolve:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve)
+    callSlowPath(_slow_path_in)
     dispatch(4)
 
+macro withInlineStorage(object, propertyStorage, continuation)
+    # Indicate that the object is the property storage, and that the
+    # property storage register is unused.
+    continuation(object, propertyStorage)
+end
 
-_llint_op_resolve_skip:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve_skip)
-    dispatch(5)
-
-
-_llint_op_resolve_base:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve_base)
-    dispatch(5)
-
-
-_llint_op_ensure_property_exists:
-    traceExecution()
-    callSlowPath(_llint_slow_path_ensure_property_exists)
-    dispatch(3)
-
-
-_llint_op_resolve_with_base:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve_with_base)
-    dispatch(5)
-
-
-_llint_op_resolve_with_this:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve_with_this)
-    dispatch(5)
+macro withOutOfLineStorage(object, propertyStorage, continuation)
+    loadp JSObject::m_butterfly[object], propertyStorage
+    # Indicate that the propertyStorage register now points to the
+    # property storage, and that the object register may be reused
+    # if the object pointer is not needed anymore.
+    continuation(propertyStorage, object)
+end
 
 
 _llint_op_del_by_id:
@@ -511,14 +943,6 @@ _llint_op_put_getter_setter:
     dispatch(5)
 
 
-_llint_op_jmp_scopes:
-    traceExecution()
-    callSlowPath(_llint_slow_path_jmp_scopes)
-    dispatch(0)
-
-
-_llint_op_loop_if_true:
-    jmp _llint_op_jtrue
 _llint_op_jtrue:
     traceExecution()
     jumpTrueOrFalse(
@@ -526,8 +950,6 @@ _llint_op_jtrue:
         _llint_slow_path_jtrue)
 
 
-_llint_op_loop_if_false:
-   jmp _llint_op_jfalse
 _llint_op_jfalse:
     traceExecution()
     jumpTrueOrFalse(
@@ -535,8 +957,6 @@ _llint_op_jfalse:
         _llint_slow_path_jfalse)
 
 
-_llint_op_loop_if_less:
-    jmp _llint_op_jless
 _llint_op_jless:
     traceExecution()
     compare(
@@ -553,8 +973,6 @@ _llint_op_jnless:
         _llint_slow_path_jnless)
 
 
-_llint_op_loop_if_greater:
-    jmp _llint_op_jgreater
 _llint_op_jgreater:
     traceExecution()
     compare(
@@ -571,8 +989,6 @@ _llint_op_jngreater:
         _llint_slow_path_jngreater)
 
 
-_llint_op_loop_if_lesseq:
-    jmp _llint_op_jlesseq
 _llint_op_jlesseq:
     traceExecution()
     compare(
@@ -589,8 +1005,6 @@ _llint_op_jnlesseq:
         _llint_slow_path_jnlesseq)
 
 
-_llint_op_loop_if_greatereq:
-    jmp _llint_op_jgreatereq
 _llint_op_jgreatereq:
     traceExecution()
     compare(
@@ -609,9 +1023,18 @@ _llint_op_jngreatereq:
 
 _llint_op_loop_hint:
     traceExecution()
+    loadp CodeBlock[cfr], t1
+    loadp CodeBlock::m_vm[t1], t1
+    loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
+    btbnz t0, .handleWatchdogTimer
+.afterWatchdogTimerCheck:
     checkSwitchToJITForLoop()
     dispatch(1)
-
+.handleWatchdogTimer:
+    callWatchdogTimerHandler(.throwHandler)
+    jmp .afterWatchdogTimerCheck
+.throwHandler:
+    jmp _llint_throw_from_slow_path_trampoline
 
 _llint_op_switch_string:
     traceExecution()
@@ -627,6 +1050,7 @@ _llint_op_new_func_exp:
 
 _llint_op_call:
     traceExecution()
+    arrayProfileForCall()
     doCall(_llint_slow_path_call)
 
 
@@ -637,7 +1061,39 @@ _llint_op_construct:
 
 _llint_op_call_varargs:
     traceExecution()
-    slowPathForCall(6, _llint_slow_path_call_varargs)
+    callSlowPath(_llint_slow_path_size_frame_for_varargs)
+    branchIfException(_llint_throw_from_slow_path_trampoline)
+    # calleeFrame in t1
+    if JSVALUE64
+        move t1, sp
+    else
+        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+        if ARMv7
+            subp t1, CallerFrameAndPCSize, t2
+            move t2, sp
+        else
+            subp t1, CallerFrameAndPCSize, sp
+        end
+    end
+    slowPathForCall(_llint_slow_path_call_varargs)
+
+_llint_op_construct_varargs:
+    traceExecution()
+    callSlowPath(_llint_slow_path_size_frame_for_varargs)
+    branchIfException(_llint_throw_from_slow_path_trampoline)
+    # calleeFrame in t1
+    if JSVALUE64
+        move t1, sp
+    else
+        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+        if ARMv7
+            subp t1, CallerFrameAndPCSize, t2
+            move t2, sp
+        else
+            subp t1, CallerFrameAndPCSize, sp
+        end
+    end
+    slowPathForCall(_llint_slow_path_construct_varargs)
 
 
 _llint_op_call_eval:
@@ -676,7 +1132,7 @@ _llint_op_call_eval:
     # and a PC to call, and that PC may be a dummy thunk that just
     # returns the JS value that the eval returned.
     
-    slowPathForCall(4, _llint_slow_path_call_eval)
+    slowPathForCall(_llint_slow_path_call_eval)
 
 
 _llint_generic_return_point:
@@ -685,25 +1141,19 @@ _llint_generic_return_point:
 
 _llint_op_strcat:
     traceExecution()
-    callSlowPath(_llint_slow_path_strcat)
+    callSlowPath(_slow_path_strcat)
     dispatch(4)
 
 
-_llint_op_method_check:
-    traceExecution()
-    # We ignore method checks and use normal get_by_id optimizations.
-    dispatch(1)
-
-
 _llint_op_get_pnames:
     traceExecution()
     callSlowPath(_llint_slow_path_get_pnames)
     dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
 
 
-_llint_op_push_scope:
+_llint_op_push_with_scope:
     traceExecution()
-    callSlowPath(_llint_slow_path_push_scope)
+    callSlowPath(_llint_slow_path_push_with_scope)
     dispatch(2)
 
 
@@ -713,9 +1163,9 @@ _llint_op_pop_scope:
     dispatch(1)
 
 
-_llint_op_push_new_scope:
+_llint_op_push_name_scope:
     traceExecution()
-    callSlowPath(_llint_slow_path_push_new_scope)
+    callSlowPath(_llint_slow_path_push_name_scope)
     dispatch(4)
 
 
@@ -725,34 +1175,42 @@ _llint_op_throw:
     dispatch(2)
 
 
-_llint_op_throw_reference_error:
+_llint_op_throw_static_error:
     traceExecution()
-    callSlowPath(_llint_slow_path_throw_reference_error)
-    dispatch(2)
+    callSlowPath(_llint_slow_path_throw_static_error)
+    dispatch(3)
 
 
 _llint_op_profile_will_call:
     traceExecution()
-    loadp JITStackFrame::enabledProfilerReference[sp], t0
-    btpz [t0], .opProfileWillCallDone
+    loadp CodeBlock[cfr], t0
+    loadp CodeBlock::m_vm[t0], t0
+    loadi VM::m_enabledProfiler[t0], t0
+    btpz t0, .opProfilerWillCallDone
     callSlowPath(_llint_slow_path_profile_will_call)
-.opProfileWillCallDone:
+.opProfilerWillCallDone:
     dispatch(2)
 
 
 _llint_op_profile_did_call:
     traceExecution()
-    loadp JITStackFrame::enabledProfilerReference[sp], t0
-    btpz [t0], .opProfileWillCallDone
+    loadp CodeBlock[cfr], t0
+    loadp CodeBlock::m_vm[t0], t0
+    loadi VM::m_enabledProfiler[t0], t0
+    btpz t0, .opProfilerDidCallDone
     callSlowPath(_llint_slow_path_profile_did_call)
-.opProfileDidCallDone:
+.opProfilerDidCallDone:
     dispatch(2)
 
 
 _llint_op_debug:
     traceExecution()
+    loadp CodeBlock[cfr], t0
+    loadi CodeBlock::m_debuggerRequests[t0], t0
+    btiz t0, .opDebugDone
     callSlowPath(_llint_slow_path_debug)
-    dispatch(4)
+.opDebugDone:                    
+    dispatch(3)
 
 
 _llint_native_call_trampoline:
@@ -781,53 +1239,6 @@ macro notSupported()
     end
 end
 
-_llint_op_get_array_length:
-    notSupported()
-
-_llint_op_get_by_id_chain:
-    notSupported()
-
-_llint_op_get_by_id_custom_chain:
-    notSupported()
-
-_llint_op_get_by_id_custom_proto:
-    notSupported()
-
-_llint_op_get_by_id_custom_self:
-    notSupported()
-
-_llint_op_get_by_id_generic:
-    notSupported()
-
-_llint_op_get_by_id_getter_chain:
-    notSupported()
-
-_llint_op_get_by_id_getter_proto:
-    notSupported()
-
-_llint_op_get_by_id_getter_self:
-    notSupported()
-
-_llint_op_get_by_id_proto:
-    notSupported()
-
-_llint_op_get_by_id_self:
-    notSupported()
-
-_llint_op_get_string_length:
-    notSupported()
-
-_llint_op_put_by_id_generic:
-    notSupported()
-
-_llint_op_put_by_id_replace:
-    notSupported()
-
-_llint_op_put_by_id_transition:
-    notSupported()
-
-
-# Indicate the end of LLInt.
-_llint_end:
-    crash()
+_llint_op_init_global_const_nop:
+    dispatch(5)