]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - llint/LowLevelInterpreter.asm
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.asm
index 5be8979c6691d06f13853b3a5cc1ef8564549a3f..d9cd01b50c87fadc30cd38caec516781da6b4bf2 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2015 Apple Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions
 # First come the common protocols that both interpreters use. Note that each
 # of these must have an ASSERT() in LLIntData.cpp
 
-# Work-around for the fact that the toolchain's awareness of armv7s results in
-# a separate slab in the fat binary, yet the offlineasm doesn't know to expect
-# it.
+# Work-around for the fact that the toolchain's awareness of armv7k / armv7s
+# results in a separate slab in the fat binary, yet the offlineasm doesn't know
+# to expect it.
+if ARMv7k
+end
 if ARMv7s
 end
 
 # These declarations must match interpreter/JSStack.h.
-const CallFrameHeaderSize = 48
-const ArgumentCount = -48
-const CallerFrame = -40
-const Callee = -32
-const ScopeChain = -24
-const ReturnPC = -16
-const CodeBlock = -8
 
-const ThisArgumentOffset = -CallFrameHeaderSize - 8
+if JSVALUE64
+    const PtrSize = 8
+    const CallFrameHeaderSlots = 5
+else
+    const PtrSize = 4
+    const CallFrameHeaderSlots = 4
+    const CallFrameAlignSlots = 1
+end
+const SlotSize = 8
+
+const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
+const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
+
+const StackAlignment = 16
+const StackAlignmentMask = StackAlignment - 1
+
+const CallerFrameAndPCSize = 2 * PtrSize
+
+const CallerFrame = 0
+const ReturnPC = CallerFrame + PtrSize
+const CodeBlock = ReturnPC + PtrSize
+const Callee = CodeBlock + SlotSize
+const ArgumentCount = Callee + SlotSize
+const ThisArgumentOffset = ArgumentCount + SlotSize
+const FirstArgumentOffset = ThisArgumentOffset + SlotSize
+const CallFrameHeaderSize = ThisArgumentOffset
+
+# Some value representation constants.
+if JSVALUE64
+    const TagBitTypeOther = 0x2
+    const TagBitBool      = 0x4
+    const TagBitUndefined = 0x8
+    const ValueEmpty      = 0x0
+    const ValueFalse      = TagBitTypeOther | TagBitBool
+    const ValueTrue       = TagBitTypeOther | TagBitBool | 1
+    const ValueUndefined  = TagBitTypeOther | TagBitUndefined
+    const ValueNull       = TagBitTypeOther
+    const TagTypeNumber   = 0xffff000000000000
+    const TagMask         = TagTypeNumber | TagBitTypeOther
+else
+    const Int32Tag = -1
+    const BooleanTag = -2
+    const NullTag = -3
+    const UndefinedTag = -4
+    const CellTag = -5
+    const EmptyValueTag = -6
+    const DeletedValueTag = -7
+    const LowestTag = DeletedValueTag
+end
+
+const CallOpCodeSize = 9
+
+if X86_64 or ARM64 or C_LOOP
+    const maxFrameExtentForSlowPathCall = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
+    const maxFrameExtentForSlowPathCall = 24
+elsif X86 or X86_WIN
+    const maxFrameExtentForSlowPathCall = 40
+elsif MIPS
+    const maxFrameExtentForSlowPathCall = 40
+elsif X86_64_WIN
+    const maxFrameExtentForSlowPathCall = 64
+end
+
+# Watchpoint states
+const ClearWatchpoint = 0
+const IsWatched = 1
+const IsInvalidated = 2
 
 # Some register conventions.
 if JSVALUE64
     # - Use a pair of registers to represent the PC: one register for the
-    #   base of the stack, and one register for the index.
+    #   base of the bytecodes, and one register for the index.
     # - The PC base (or PB for short) should be stored in the csr. It will
     #   get clobbered on calls to other JS code, but will get saved on calls
     #   to C functions.
     # - C calls are still given the Instruction* rather than the PC index.
     #   This requires an add before the call, and a sub after.
-    const PC = t4
+    const PC = t5
     const PB = t6
     const tagTypeNumber = csr1
     const tagMask = csr2
@@ -68,7 +130,7 @@ if JSVALUE64
     end
 
 else
-    const PC = t4
+    const PC = t5
     macro loadisFromInstruction(offset, dest)
         loadis offset * 4[PC], dest
     end
@@ -98,8 +160,9 @@ const ArrayStorageShape        = 28
 const SlowPutArrayStorageShape = 30
 
 # Type constants.
-const StringType = 5
-const ObjectType = 17
+const StringType = 6
+const ObjectType = 18
+const FinalObjectType = 19
 
 # Type flags constants.
 const MasqueradesAsUndefined = 1
@@ -118,33 +181,28 @@ const FunctionCode = 2
 const LLIntReturnPC = ArgumentCount + TagOffset
 
 # String flags.
-const HashFlags8BitBuffer = 64
+const HashFlags8BitBuffer = 8
 
 # Copied from PropertyOffset.h
 const firstOutOfLineOffset = 100
 
-# From ResolveOperations.h
-const ResolveOperationFail = 0
-const ResolveOperationSetBaseToUndefined = 1
-const ResolveOperationReturnScopeAsBase = 2
-const ResolveOperationSetBaseToScope = 3
-const ResolveOperationSetBaseToGlobal = 4
-const ResolveOperationGetAndReturnScopedVar = 5
-const ResolveOperationGetAndReturnGlobalVar = 6
-const ResolveOperationGetAndReturnGlobalVarWatchable = 7
-const ResolveOperationSkipTopScopeNode = 8
-const ResolveOperationSkipScopes = 9
-const ResolveOperationReturnGlobalObjectAsBase = 10
-const ResolveOperationGetAndReturnGlobalProperty = 11
-const ResolveOperationCheckForDynamicEntriesBeforeGlobalScope = 12
-
-const PutToBaseOperationKindUninitialised = 0
-const PutToBaseOperationKindGeneric = 1
-const PutToBaseOperationKindReadonly = 2
-const PutToBaseOperationKindGlobalVariablePut = 3
-const PutToBaseOperationKindGlobalVariablePutChecked = 4
-const PutToBaseOperationKindGlobalPropertyPut = 5
-const PutToBaseOperationKindVariablePut = 6
+# ResolveType
+const GlobalProperty = 0
+const GlobalVar = 1
+const ClosureVar = 2
+const LocalClosureVar = 3
+const GlobalPropertyWithVarInjectionChecks = 4
+const GlobalVarWithVarInjectionChecks = 5
+const ClosureVarWithVarInjectionChecks = 6
+const Dynamic = 7
+
+const ResolveModeMask = 0xffff
+
+const MarkedBlockSize = 16 * 1024
+const MarkedBlockMask = ~(MarkedBlockSize - 1)
+# Constants for checking mark bits.
+const AtomNumberShift = 3
+const BitMapWordShift = 4
 
 # Allocation constants
 if JSVALUE64
@@ -161,15 +219,12 @@ else
     const VectorSizeOffset = 8
 end
 
-
 # Some common utilities.
 macro crash()
     if C_LOOP
         cloopCrash
     else
-        storei t0, 0xbbadbeef[]
-        move 0, t0
-        call t0
+        call _llint_crash
     end
 end
 
@@ -181,13 +236,178 @@ macro assert(assertion)
     end
 end
 
+macro checkStackPointerAlignment(tempReg, location)
+    if ARM64 or C_LOOP or SH4
+        # ARM64 will check for us!
+        # C_LOOP does not need the alignment, and can use a little perf
+        # improvement from avoiding useless work.
+        # SH4 does not need specific alignment (4 bytes).
+    else
+        if ARM or ARMv7 or ARMv7_TRADITIONAL
+            # ARM can't do logical ops with the sp as a source
+            move sp, tempReg
+            andp StackAlignmentMask, tempReg
+        else
+            andp sp, StackAlignmentMask, tempReg
+        end
+        btpz tempReg, .stackPointerOkay
+        move location, tempReg
+        break
+    .stackPointerOkay:
+    end
+end
+
+if C_LOOP
+    const CalleeSaveRegisterCount = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7
+    const CalleeSaveRegisterCount = 7
+elsif ARM64
+    const CalleeSaveRegisterCount = 10
+elsif SH4 or X86_64 or MIPS
+    const CalleeSaveRegisterCount = 5
+elsif X86 or X86_WIN
+    const CalleeSaveRegisterCount = 3
+elsif X86_64_WIN
+    const CalleeSaveRegisterCount = 7
+end
+
+const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
+
+# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
+# callee save registers rounded up to keep the stack aligned
+const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
+
+macro pushCalleeSaves()
+    if C_LOOP
+    elsif ARM or ARMv7_TRADITIONAL
+        emit "push {r4-r10}"
+    elsif ARMv7
+        emit "push {r4-r6, r8-r11}"
+    elsif ARM64
+        emit "stp x20, x19, [sp, #-16]!"
+        emit "stp x22, x21, [sp, #-16]!"
+        emit "stp x24, x23, [sp, #-16]!"
+        emit "stp x26, x25, [sp, #-16]!"
+        emit "stp x28, x27, [sp, #-16]!"
+    elsif MIPS
+        emit "addiu $sp, $sp, -20"
+        emit "sw $20, 16($sp)"
+        emit "sw $19, 12($sp)"
+        emit "sw $18, 8($sp)"
+        emit "sw $17, 4($sp)"
+        emit "sw $16, 0($sp)"
+    elsif SH4
+        emit "mov.l r13, @-r15"
+        emit "mov.l r11, @-r15"
+        emit "mov.l r10, @-r15"
+        emit "mov.l r9, @-r15"
+        emit "mov.l r8, @-r15"
+    elsif X86
+        emit "push %esi"
+        emit "push %edi"
+        emit "push %ebx"
+    elsif X86_WIN
+        emit "push esi"
+        emit "push edi"
+        emit "push ebx"
+    elsif X86_64
+        emit "push %r12"
+        emit "push %r13"
+        emit "push %r14"
+        emit "push %r15"
+        emit "push %rbx"
+    elsif X86_64_WIN
+        emit "push r12"
+        emit "push r13"
+        emit "push r14"
+        emit "push r15"
+        emit "push rbx"
+        emit "push rdi"
+        emit "push rsi"
+    end
+end
+
+macro popCalleeSaves()
+    if C_LOOP
+    elsif ARM or ARMv7_TRADITIONAL
+        emit "pop {r4-r10}"
+    elsif ARMv7
+        emit "pop {r4-r6, r8-r11}"
+    elsif ARM64
+        emit "ldp x28, x27, [sp], #16"
+        emit "ldp x26, x25, [sp], #16"
+        emit "ldp x24, x23, [sp], #16"
+        emit "ldp x22, x21, [sp], #16"
+        emit "ldp x20, x19, [sp], #16"
+    elsif MIPS
+        emit "lw $16, 0($sp)"
+        emit "lw $17, 4($sp)"
+        emit "lw $18, 8($sp)"
+        emit "lw $19, 12($sp)"
+        emit "lw $20, 16($sp)"
+        emit "addiu $sp, $sp, 20"
+    elsif SH4
+        emit "mov.l @r15+, r8"
+        emit "mov.l @r15+, r9"
+        emit "mov.l @r15+, r10"
+        emit "mov.l @r15+, r11"
+        emit "mov.l @r15+, r13"
+    elsif X86
+        emit "pop %ebx"
+        emit "pop %edi"
+        emit "pop %esi"
+    elsif X86_WIN
+        emit "pop ebx"
+        emit "pop edi"
+        emit "pop esi"
+    elsif X86_64
+        emit "pop %rbx"
+        emit "pop %r15"
+        emit "pop %r14"
+        emit "pop %r13"
+        emit "pop %r12"
+    elsif X86_64_WIN
+        emit "pop rsi"
+        emit "pop rdi"
+        emit "pop rbx"
+        emit "pop r15"
+        emit "pop r14"
+        emit "pop r13"
+        emit "pop r12"
+    end
+end
+
+macro preserveCallerPCAndCFR()
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        push lr
+        push cfr
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+        push cfr
+    elsif ARM64
+        push cfr, lr
+    else
+        error
+    end
+    move sp, cfr
+end
+
+macro restoreCallerPCAndCFR()
+    move cfr, sp
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+        pop cfr
+    elsif ARM64
+        pop lr, cfr
+    end
+end
+
 macro preserveReturnAddressAfterCall(destinationRegister)
-    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
         # In C_LOOP case, we're only preserving the bytecode vPC.
         move lr, destinationRegister
-    elsif SH4
-        stspr destinationRegister
-    elsif X86 or X86_64
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
         pop destinationRegister
     else
         error
@@ -195,63 +415,121 @@ macro preserveReturnAddressAfterCall(destinationRegister)
 end
 
 macro restoreReturnAddressBeforeReturn(sourceRegister)
-    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS
+    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
         # In C_LOOP case, we're only restoring the bytecode vPC.
         move sourceRegister, lr
-    elsif SH4
-        ldspr sourceRegister
-    elsif X86 or X86_64
+    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
         push sourceRegister
     else
         error
     end
 end
 
+macro functionPrologue()
+    if X86 or X86_WIN or X86_64 or X86_64_WIN
+        push cfr
+    elsif ARM64
+        push cfr, lr
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        push lr
+        push cfr
+    end
+    move sp, cfr
+end
+
+macro functionEpilogue()
+    if X86 or X86_WIN or X86_64 or X86_64_WIN
+        pop cfr
+    elsif ARM64
+        pop lr, cfr
+    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+        pop cfr
+        pop lr
+    end
+end
+
+macro vmEntryRecord(entryFramePointer, resultReg)
+    subp entryFramePointer, VMEntryTotalFrameSize, resultReg
+end
+
+macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
+    loadi CodeBlock::m_numCalleeRegisters[codeBlock], size
+    lshiftp 3, size
+    addp maxFrameExtentForSlowPathCall, size
+end
+
+macro restoreStackPointerAfterCall()
+    loadp CodeBlock[cfr], t2
+    getFrameRegisterSizeForCodeBlock(t2, t4)
+    if ARMv7
+        subp cfr, t4, t4
+        move t4, sp
+    else
+        subp cfr, t4, sp
+    end
+end
+
 macro traceExecution()
     if EXECUTION_TRACING
         callSlowPath(_llint_trace)
     end
 end
 
-macro callTargetFunction(callLinkInfo)
+macro callTargetFunction(callLinkInfo, calleeFramePtr)
+    move calleeFramePtr, sp
     if C_LOOP
         cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
     else
         call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
-        dispatchAfterCall()
     end
+    restoreStackPointerAfterCall()
+    dispatchAfterCall()
 end
 
-macro slowPathForCall(advance, slowPath)
+macro slowPathForCall(slowPath)
     callCallSlowPath(
-        advance,
         slowPath,
         macro (callee)
+            btpz t1, .dontUpdateSP
+            if ARMv7
+                addp CallerFrameAndPCSize, t1, t1
+                move t1, sp
+            else
+                addp CallerFrameAndPCSize, t1, sp
+            end
+        .dontUpdateSP:
             if C_LOOP
                 cloopCallJSFunction callee
             else
                 call callee
-                dispatchAfterCall()
             end
+            restoreStackPointerAfterCall()
+            dispatchAfterCall()
         end)
 end
 
-macro arrayProfile(structureAndIndexingType, profile, scratch)
-    const structure = structureAndIndexingType
-    const indexingType = structureAndIndexingType
-    if VALUE_PROFILER
-        storep structure, ArrayProfile::m_lastSeenStructure[profile]
-    end
-    loadb Structure::m_indexingType[structure], indexingType
+macro arrayProfile(cellAndIndexingType, profile, scratch)
+    const cell = cellAndIndexingType
+    const indexingType = cellAndIndexingType 
+    loadi JSCell::m_structureID[cell], scratch
+    storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
+    loadb JSCell::m_indexingType[cell], indexingType
+end
+
+macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation)
+    loadb JSCell::m_gcData[cell], scratch1
+    continuation(scratch1)
+end
+
+macro notifyWrite(set, slow)
+    bbneq WatchpointSet::m_state[set], IsInvalidated, slow
 end
 
 macro checkSwitchToJIT(increment, action)
-    if JIT_ENABLED
-        loadp CodeBlock[cfr], t0
-        baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
-        action()
+    loadp CodeBlock[cfr], t0
+    baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
+    action()
     .continue:
-    end
 end
 
 macro checkSwitchToJITForEpilogue()
@@ -267,13 +545,21 @@ macro assertNotConstant(index)
 end
 
 macro functionForCallCodeBlockGetter(targetRegister)
-    loadp Callee[cfr], targetRegister
+    if JSVALUE64
+        loadp Callee[cfr], targetRegister
+    else
+        loadp Callee + PayloadOffset[cfr], targetRegister
+    end
     loadp JSFunction::m_executable[targetRegister], targetRegister
     loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
 end
 
 macro functionForConstructCodeBlockGetter(targetRegister)
-    loadp Callee[cfr], targetRegister
+    if JSVALUE64
+        loadp Callee[cfr], targetRegister
+    else
+        loadp Callee + PayloadOffset[cfr], targetRegister
+    end
     loadp JSFunction::m_executable[targetRegister], targetRegister
     loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
 end
@@ -293,28 +579,45 @@ end
 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
 # in t1. May also trigger prologue entry OSR.
 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
-    preserveReturnAddressAfterCall(t2)
-    
     # Set up the call frame and check if we should OSR.
-    storep t2, ReturnPC[cfr]
+    preserveCallerPCAndCFR()
+
     if EXECUTION_TRACING
+        subp maxFrameExtentForSlowPathCall, sp
         callSlowPath(traceSlowPath)
+        addp maxFrameExtentForSlowPathCall, sp
     end
     codeBlockGetter(t1)
-    if JIT_ENABLED
-        baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
-        cCall2(osrSlowPath, cfr, PC)
-        move t1, cfr
+    if not C_LOOP
+        baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
+        if JSVALUE64
+            cCall2(osrSlowPath, cfr, PC)
+        else
+            # We are after the function prologue, but before we have set up sp from the CodeBlock.
+            # Temporarily align stack pointer for this call.
+            subp 8, sp
+            cCall2(osrSlowPath, cfr, PC)
+            addp 8, sp
+        end
         btpz t0, .recover
-        loadp ReturnPC[cfr], t2
-        restoreReturnAddressBeforeReturn(t2)
+        move cfr, sp # restore the previous sp
+        # pop the callerFrame since we will jump to a function that wants to save it
+        if ARM64
+            pop lr, cfr
+        elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+            pop cfr
+            pop lr
+        else
+            pop cfr
+        end
         jmp t0
     .recover:
         codeBlockGetter(t1)
     .continue:
     end
+
     codeBlockSetter(t1)
-    
+
     # Set up the PC.
     if JSVALUE64
         loadp CodeBlock::m_instructions[t1], PB
@@ -322,91 +625,259 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
     else
         loadp CodeBlock::m_instructions[t1], PC
     end
-end
 
-# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-# Must call dispatch(0) after calling this.
-macro functionInitialization(profileArgSkip)
-    if VALUE_PROFILER
-        # Profile the arguments. Unfortunately, we have no choice but to do this. This
-        # code is pretty horrendous because of the difference in ordering between
-        # arguments and value profiles, the desire to have a simple loop-down-to-zero
-        # loop, and the desire to use only three registers so as to preserve the PC and
-        # the code block. It is likely that this code should be rewritten in a more
-        # optimal way for architectures that have more than five registers available
-        # for arbitrary use in the interpreter.
-        loadi CodeBlock::m_numParameters[t1], t0
-        addp -profileArgSkip, t0 # Use addi because that's what has the peephole
-        assert(macro (ok) bpgteq t0, 0, ok end)
-        btpz t0, .argumentProfileDone
-        loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
-        mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
-        negp t0
-        lshiftp 3, t0
-        addp t2, t3
-    .argumentProfileLoop:
-        if JSVALUE64
-            loadq ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
-        else
-            loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            subp sizeof ValueProfile, t3
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
-            loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
-            storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
-        end
-        baddpnz 8, t0, .argumentProfileLoop
-    .argumentProfileDone:
-    end
-        
-    # Check stack height.
-    loadi CodeBlock::m_numCalleeRegisters[t1], t0
+    # Get new sp in t0 and check stack height.
+    getFrameRegisterSizeForCodeBlock(t1, t0)
+    subp cfr, t0, t0
     loadp CodeBlock::m_vm[t1], t2
-    loadp VM::interpreter[t2], t2   # FIXME: Can get to the JSStack from the JITStackFrame
-    lshifti 3, t0
-    addp t0, cfr, t0
-    bpaeq Interpreter::m_stack + JSStack::m_end[t2], t0, .stackHeightOK
+    bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
 
     # Stack height check failed - need to call a slow_path.
+    subp maxFrameExtentForSlowPathCall, sp # Set up temporary stack pointer for call
     callSlowPath(_llint_stack_check)
+    bpeq t1, 0, .stackHeightOKGetCodeBlock
+    move t1, cfr
+    dispatch(0) # Go to exception handler in PC
+
+.stackHeightOKGetCodeBlock:
+    # Stack check slow path returned that the stack was ok.
+    # Since they were clobbered, need to get CodeBlock and new sp
+    codeBlockGetter(t1)
+    getFrameRegisterSizeForCodeBlock(t1, t0)
+    subp cfr, t0, t0
+
 .stackHeightOK:
+    move t0, sp
 end
 
-macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
-    if ALWAYS_ALLOCATE_SLOW
-        jmp slowCase
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+# Must call dispatch(0) after calling this.
+macro functionInitialization(profileArgSkip)
+    # Profile the arguments. Unfortunately, we have no choice but to do this. This
+    # code is pretty horrendous because of the difference in ordering between
+    # arguments and value profiles, the desire to have a simple loop-down-to-zero
+    # loop, and the desire to use only three registers so as to preserve the PC and
+    # the code block. It is likely that this code should be rewritten in a more
+    # optimal way for architectures that have more than five registers available
+    # for arbitrary use in the interpreter.
+    loadi CodeBlock::m_numParameters[t1], t0
+    addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+    assert(macro (ok) bpgteq t0, 0, ok end)
+    btpz t0, .argumentProfileDone
+    loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+    mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+    lshiftp 3, t0
+    addp t2, t3
+.argumentProfileLoop:
+    if JSVALUE64
+        loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
     else
-        const offsetOfFirstFreeCell = 
-            MarkedAllocator::m_freeList + 
-            MarkedBlock::FreeList::head
-
-        # Get the object from the free list.   
-        loadp offsetOfFirstFreeCell[allocator], result
-        btpz result, slowCase
-        
-        # Remove the object from the free list.
-        loadp [result], scratch1
-        storep scratch1, offsetOfFirstFreeCell[allocator]
-    
-        # Initialize the object.
-        storep structure, JSCell::m_structure[result]
-        storep 0, JSObject::m_butterfly[result]
+        loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        subp sizeof ValueProfile, t3
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+        loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
     end
+    baddpnz -8, t0, .argumentProfileLoop
+.argumentProfileDone:
+end
+
+macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
+    const offsetOfFirstFreeCell = 
+        MarkedAllocator::m_freeList + 
+        MarkedBlock::FreeList::head
+
+    # Get the object from the free list.   
+    loadp offsetOfFirstFreeCell[allocator], result
+    btpz result, slowCase
+    
+    # Remove the object from the free list.
+    loadp [result], scratch1
+    storep scratch1, offsetOfFirstFreeCell[allocator]
+
+    # Initialize the object.
+    storep 0, JSObject::m_butterfly[result]
+    storeStructureWithTypeInfo(result, structure, scratch1)
 end
 
 macro doReturn()
-    loadp ReturnPC[cfr], t2
-    loadp CallerFrame[cfr], cfr
-    restoreReturnAddressBeforeReturn(t2)
+    restoreCallerPCAndCFR()
     ret
 end
 
+# stub to call into JavaScript or Native functions
+# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
+# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
+
+if C_LOOP
+    _llint_vm_entry_to_javascript:
+else
+    global _vmEntryToJavaScript
+    _vmEntryToJavaScript:
+end
+    doVMEntry(makeJavaScriptCall)
+
+
+if C_LOOP
+    _llint_vm_entry_to_native:
+else
+    global _vmEntryToNative
+    _vmEntryToNative:
+end
+    doVMEntry(makeHostFunctionCall)
+
+
+if not C_LOOP
+    # void sanitizeStackForVMImpl(VM* vm)
+    global _sanitizeStackForVMImpl
+    _sanitizeStackForVMImpl:
+        if X86_64
+            const vm = t4
+            const address = t1
+            const zeroValue = t0
+        elsif X86_64_WIN
+            const vm = t2
+            const address = t1
+            const zeroValue = t0
+        elsif X86 or X86_WIN
+            const vm = t2
+            const address = t1
+            const zeroValue = t0
+        else
+            const vm = a0
+            const address = t1
+            const zeroValue = t2
+        end
+    
+        if X86 or X86_WIN
+            loadp 4[sp], vm
+        end
+    
+        loadp VM::m_lastStackTop[vm], address
+        bpbeq sp, address, .zeroFillDone
+    
+        move 0, zeroValue
+    .zeroFillLoop:
+        storep zeroValue, [address]
+        addp PtrSize, address
+        bpa sp, address, .zeroFillLoop
+    
+    .zeroFillDone:
+        move sp, address
+        storep address, VM::m_lastStackTop[vm]
+        ret
+    
+    # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
+    global _vmEntryRecord
+    _vmEntryRecord:
+        if X86_64
+            const entryFrame = t4
+            const result = t0
+        elsif X86 or X86_WIN or X86_64_WIN
+            const entryFrame = t2
+            const result = t0
+        else
+            const entryFrame = a0
+            const result = t0
+        end
+    
+        if X86 or X86_WIN
+            loadp 4[sp], entryFrame
+        end
+    
+        vmEntryRecord(entryFrame, result)
+        ret
+end
+
+if C_LOOP
+    # Dummy entry point the C Loop uses to initialize.
+    _llint_entry:
+        crash()
+    else
+    macro initPCRelative(pcBase)
+        if X86_64 or X86_64_WIN
+            call _relativePCBase
+        _relativePCBase:
+            pop pcBase
+        elsif X86 or X86_WIN
+            call _relativePCBase
+        _relativePCBase:
+            pop pcBase
+            loadp 20[sp], t4
+        elsif ARM64
+        elsif ARMv7
+        _relativePCBase:
+            move pc, pcBase
+            subp 3, pcBase   # Need to back up the PC and set the Thumb2 bit
+        elsif ARM or ARMv7_TRADITIONAL
+        _relativePCBase:
+            move pc, pcBase
+            subp 8, pcBase
+        elsif MIPS
+            la _relativePCBase, pcBase
+        _relativePCBase:
+        elsif SH4
+            mova _relativePCBase, t0
+            move t0, pcBase
+            alignformova
+        _relativePCBase:
+        end
+end
+
+macro setEntryAddress(index, label)
+    if X86_64
+        leap (label - _relativePCBase)[t1], t0
+        move index, t2
+        storep t0, [t4, t2, 8]
+    elsif X86_64_WIN
+        leap (label - _relativePCBase)[t1], t0
+        move index, t4
+        storep t0, [t2, t4, 8]
+    elsif X86 or X86_WIN
+        leap (label - _relativePCBase)[t1], t0
+        move index, t2
+        storep t0, [t4, t2, 4]
+    elsif ARM64
+        pcrtoaddr label, t1
+        move index, t2
+        storep t1, [a0, t2, 8]
+    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+        mvlbl (label - _relativePCBase), t2
+        addp t2, t1, t2
+        move index, t3
+        storep t2, [a0, t3, 4]
+    elsif SH4
+        move (label - _relativePCBase), t2
+        addp t2, t1, t2
+        move index, t3
+        storep t2, [a0, t3, 4]
+        flushcp # Force constant pool flush to avoid "pcrel too far" link error.
+    elsif MIPS
+        la label, t2
+        la _relativePCBase, t3
+        subp t3, t2
+        addp t2, t1, t2
+        move index, t3
+        storep t2, [a0, t3, 4]
+    end
+end
+
+global _llint_entry
+# Entry point for the llint to initialize.
+_llint_entry:
+    functionPrologue()
+    pushCalleeSaves()
+    initPCRelative(t1)
 
-# Indicate the beginning of LLInt.
-_llint_begin:
-    crash()
+    # Include generated bytecode initialization file.
+    include InitBytecodes
 
+    popCalleeSaves()
+    functionEpilogue()
+    ret
+end
 
 _llint_program_prologue:
     prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
@@ -420,26 +891,30 @@ _llint_eval_prologue:
 
 _llint_function_for_call_prologue:
     prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
-.functionForCallBegin:
     functionInitialization(0)
     dispatch(0)
     
 
 _llint_function_for_construct_prologue:
     prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
-.functionForConstructBegin:
     functionInitialization(1)
     dispatch(0)
     
 
 _llint_function_for_call_arity_check:
     prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
-    functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
+    functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
+.functionForCallBegin:
+    functionInitialization(0)
+    dispatch(0)
 
 
 _llint_function_for_construct_arity_check:
     prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
-    functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
+    functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
+.functionForConstructBegin:
+    functionInitialization(1)
+    dispatch(0)
 
 
 # Value-representation-specific code.
@@ -451,6 +926,30 @@ end
 
 
 # Value-representation-agnostic code.
+_llint_op_create_direct_arguments:
+    traceExecution()
+    callSlowPath(_slow_path_create_direct_arguments)
+    dispatch(2)
+
+
+_llint_op_create_scoped_arguments:
+    traceExecution()
+    callSlowPath(_slow_path_create_scoped_arguments)
+    dispatch(3)
+
+
+_llint_op_create_out_of_band_arguments:
+    traceExecution()
+    callSlowPath(_slow_path_create_out_of_band_arguments)
+    dispatch(2)
+
+
+_llint_op_new_func:
+    traceExecution()
+    callSlowPath(_llint_slow_path_new_func)
+    dispatch(4)
+
+
 _llint_op_new_array:
     traceExecution()
     callSlowPath(_llint_slow_path_new_array)
@@ -477,462 +976,56 @@ _llint_op_new_regexp:
 
 _llint_op_less:
     traceExecution()
-    callSlowPath(_llint_slow_path_less)
+    callSlowPath(_slow_path_less)
     dispatch(4)
 
 
 _llint_op_lesseq:
     traceExecution()
-    callSlowPath(_llint_slow_path_lesseq)
+    callSlowPath(_slow_path_lesseq)
     dispatch(4)
 
 
 _llint_op_greater:
     traceExecution()
-    callSlowPath(_llint_slow_path_greater)
+    callSlowPath(_slow_path_greater)
     dispatch(4)
 
 
 _llint_op_greatereq:
     traceExecution()
-    callSlowPath(_llint_slow_path_greatereq)
+    callSlowPath(_slow_path_greatereq)
     dispatch(4)
 
 
 _llint_op_mod:
     traceExecution()
-    callSlowPath(_llint_slow_path_mod)
+    callSlowPath(_slow_path_mod)
     dispatch(4)
 
 
 _llint_op_typeof:
     traceExecution()
-    callSlowPath(_llint_slow_path_typeof)
+    callSlowPath(_slow_path_typeof)
     dispatch(3)
 
 
-_llint_op_is_object:
+_llint_op_is_object_or_null:
     traceExecution()
-    callSlowPath(_llint_slow_path_is_object)
+    callSlowPath(_slow_path_is_object_or_null)
     dispatch(3)
 
-
 _llint_op_is_function:
     traceExecution()
-    callSlowPath(_llint_slow_path_is_function)
+    callSlowPath(_slow_path_is_function)
     dispatch(3)
 
 
 _llint_op_in:
     traceExecution()
-    callSlowPath(_llint_slow_path_in)
+    callSlowPath(_slow_path_in)
     dispatch(4)
 
-macro getPutToBaseOperationField(scratch, scratch1, fieldOffset, fieldGetter)
-    loadpFromInstruction(4, scratch)
-    fieldGetter(fieldOffset[scratch])
-end
-
-macro moveJSValueFromRegisterWithoutProfiling(value, destBuffer, destOffsetReg)
-    storeq value, [destBuffer, destOffsetReg, 8]
-end
-
-
-macro moveJSValueFromRegistersWithoutProfiling(tag, payload, destBuffer, destOffsetReg)
-    storei tag, TagOffset[destBuffer, destOffsetReg, 8]
-    storei payload, PayloadOffset[destBuffer, destOffsetReg, 8]
-end
-
-macro putToBaseVariableBody(variableOffset, scratch1, scratch2, scratch3)
-    loadisFromInstruction(1, scratch1)
-    loadp PayloadOffset[cfr, scratch1, 8], scratch1
-    loadp JSVariableObject::m_registers[scratch1], scratch1
-    loadisFromInstruction(3, scratch2)
-    if JSVALUE64
-        loadConstantOrVariable(scratch2, scratch3)
-        moveJSValueFromRegisterWithoutProfiling(scratch3, scratch1, variableOffset)
-    else
-        loadConstantOrVariable2Reg(scratch2, scratch3, scratch2) # scratch3=tag, scratch2=payload
-        moveJSValueFromRegistersWithoutProfiling(scratch3, scratch2, scratch1, variableOffset)
-    end
-end
-
-_llint_op_put_to_base_variable:
-    traceExecution()
-    getPutToBaseOperationField(t0, t1, PutToBaseOperation::m_offset, macro(addr)
-                                              loadis  addr, t0
-                                          end)
-    putToBaseVariableBody(t0, t1, t2, t3)
-    dispatch(5)
-
-_llint_op_put_to_base:
-    traceExecution()
-    getPutToBaseOperationField(t0, t1, 0, macro(addr)
-                                              leap addr, t0
-                                              bbneq PutToBaseOperation::m_kindAsUint8[t0], PutToBaseOperationKindVariablePut, .notPutToBaseVariable
-                                              loadis PutToBaseOperation::m_offset[t0], t0
-                                              putToBaseVariableBody(t0, t1, t2, t3)
-                                              dispatch(5)
-                                              .notPutToBaseVariable:
-                                          end)
-    callSlowPath(_llint_slow_path_put_to_base)
-    dispatch(5)
-
-macro getResolveOperation(resolveOperationIndex, dest)
-    loadpFromInstruction(resolveOperationIndex, dest)
-    loadp VectorBufferOffset[dest], dest
-end
-
-macro getScope(loadInitialScope, scopeCount, dest, scratch)
-    loadInitialScope(dest)
-    loadi scopeCount, scratch
-
-    btiz scratch, .done
-.loop:
-    loadp JSScope::m_next[dest], dest
-    subi 1, scratch
-    btinz scratch, .loop
-
-.done:
-end
-
-macro moveJSValue(sourceBuffer, sourceOffsetReg, destBuffer, destOffsetReg, profileOffset, scratchRegister)
-    if JSVALUE64
-        loadq [sourceBuffer, sourceOffsetReg, 8], scratchRegister
-        storeq scratchRegister, [destBuffer, destOffsetReg, 8]
-        loadpFromInstruction(profileOffset, destOffsetReg)
-        valueProfile(scratchRegister, destOffsetReg)
-    else
-        loadi PayloadOffset[sourceBuffer, sourceOffsetReg, 8], scratchRegister
-        storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
-        loadi TagOffset[sourceBuffer, sourceOffsetReg, 8], sourceOffsetReg
-        storei sourceOffsetReg, TagOffset[destBuffer, destOffsetReg, 8]
-        loadpFromInstruction(profileOffset, destOffsetReg)
-        valueProfile(sourceOffsetReg, scratchRegister, destOffsetReg)
-    end
-end
-
-macro moveJSValueFromSlot(slot, destBuffer, destOffsetReg, profileOffset, scratchRegister)
-    if JSVALUE64
-        loadq [slot], scratchRegister
-        storeq scratchRegister, [destBuffer, destOffsetReg, 8]
-        loadpFromInstruction(profileOffset, destOffsetReg)
-        valueProfile(scratchRegister, destOffsetReg)
-    else
-        loadi PayloadOffset[slot], scratchRegister
-        storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
-        loadi TagOffset[slot], slot
-        storei slot, TagOffset[destBuffer, destOffsetReg, 8]
-        loadpFromInstruction(profileOffset, destOffsetReg)
-        valueProfile(slot, scratchRegister, destOffsetReg)
-    end
-end
-
-macro moveJSValueFromRegister(value, destBuffer, destOffsetReg, profileOffset)
-    storeq value, [destBuffer, destOffsetReg, 8]
-    loadpFromInstruction(profileOffset, destOffsetReg)
-    valueProfile(value, destOffsetReg)
-end
-
-macro moveJSValueFromRegisters(tag, payload, destBuffer, destOffsetReg, profileOffset)
-    storei tag, TagOffset[destBuffer, destOffsetReg, 8]
-    storei payload, PayloadOffset[destBuffer, destOffsetReg, 8]
-    loadpFromInstruction(profileOffset, destOffsetReg)
-    valueProfile(tag, payload, destOffsetReg)
-end
-
-_llint_op_resolve_global_property:
-    traceExecution()
-    getResolveOperation(3, t0)
-    loadp CodeBlock[cfr], t1
-    loadp CodeBlock::m_globalObject[t1], t1
-    loadp ResolveOperation::m_structure[t0], t2
-    bpneq JSCell::m_structure[t1], t2, .llint_op_resolve_local
-    loadis ResolveOperation::m_offset[t0], t0
-    if JSVALUE64
-        loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2)
-        loadisFromInstruction(1, t0)
-        moveJSValueFromRegister(t2, cfr, t0, 4)
-    else
-        loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2, t3)
-        loadisFromInstruction(1, t0)
-        moveJSValueFromRegisters(t2, t3, cfr, t0, 4)
-    end
-    dispatch(5)
-
-_llint_op_resolve_global_var:
-    traceExecution()
-    getResolveOperation(3, t0)
-    loadp ResolveOperation::m_registerAddress[t0], t0
-    loadisFromInstruction(1, t1)
-    moveJSValueFromSlot(t0, cfr, t1, 4, t3)
-    dispatch(5)
-
-macro resolveScopedVarBody(resolveOperations)
-    # First ResolveOperation is to skip scope chain nodes
-    getScope(macro(dest)
-                 loadp ScopeChain + PayloadOffset[cfr], dest
-             end,
-             ResolveOperation::m_scopesToSkip[resolveOperations], t1, t2)
-    loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-    
-    # Second ResolveOperation tells us what offset to use
-    loadis ResolveOperation::m_offset + sizeof ResolveOperation[resolveOperations], t2
-    loadisFromInstruction(1, t3)
-    moveJSValue(t1, t2, cfr, t3, 4, t0)
-end
-
-_llint_op_resolve_scoped_var:
-    traceExecution()
-    getResolveOperation(3, t0)
-    resolveScopedVarBody(t0)
-    dispatch(5)
-    
-_llint_op_resolve_scoped_var_on_top_scope:
-    traceExecution()
-    getResolveOperation(3, t0)
-
-    # Load destination index
-    loadisFromInstruction(1, t3)
-
-    # We know we want the top scope chain entry
-    loadp ScopeChain + PayloadOffset[cfr], t1
-    loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-    
-    # Second ResolveOperation tells us what offset to use
-    loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
-
-    moveJSValue(t1, t2, cfr, t3, 4, t0)
-    dispatch(5)
-
-_llint_op_resolve_scoped_var_with_top_scope_check:
-    traceExecution()
-    getResolveOperation(3, t0)
-    # First ResolveOperation tells us what register to check
-    loadis ResolveOperation::m_activationRegister[t0], t1
-
-    loadp PayloadOffset[cfr, t1, 8], t1
-
-    getScope(macro(dest)
-                 btpz t1, .scopeChainNotCreated
-                     loadp JSScope::m_next[t1], dest
-                 jmp .done
-                 .scopeChainNotCreated:
-                     loadp ScopeChain + PayloadOffset[cfr], dest
-                 .done:
-             end, 
-             # Second ResolveOperation tells us how many more nodes to skip
-             ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
-    loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-    
-    # Third operation tells us what offset to use
-    loadis ResolveOperation::m_offset + 2 * sizeof ResolveOperation[t0], t2
-    loadisFromInstruction(1, t3)
-    moveJSValue(t1, t2, cfr, t3, 4, t0)
-    dispatch(5)
-
-_llint_op_resolve:
-.llint_op_resolve_local:
-    traceExecution()
-    getResolveOperation(3, t0)
-    btpz t0, .noInstructions
-    loadis ResolveOperation::m_operation[t0], t1
-    bineq t1, ResolveOperationSkipScopes, .notSkipScopes
-        resolveScopedVarBody(t0)
-        dispatch(5)
-.notSkipScopes:
-    bineq t1, ResolveOperationGetAndReturnGlobalVar, .notGetAndReturnGlobalVar
-        loadp ResolveOperation::m_registerAddress[t0], t0
-        loadisFromInstruction(1, t1)
-        moveJSValueFromSlot(t0, cfr, t1, 4, t3)
-        dispatch(5)
-.notGetAndReturnGlobalVar:
-
-.noInstructions:
-    callSlowPath(_llint_slow_path_resolve)
-    dispatch(5)
-
-_llint_op_resolve_base_to_global:
-    traceExecution()
-    loadp CodeBlock[cfr], t1
-    loadp CodeBlock::m_globalObject[t1], t1
-    loadisFromInstruction(1, t3)
-    if JSVALUE64
-        moveJSValueFromRegister(t1, cfr, t3, 6)
-    else
-        move CellTag, t2
-        moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
-    end
-    dispatch(7)
-
-_llint_op_resolve_base_to_global_dynamic:
-    jmp _llint_op_resolve_base
-
-_llint_op_resolve_base_to_scope:
-    traceExecution()
-    getResolveOperation(4, t0)
-    # First ResolveOperation is to skip scope chain nodes
-    getScope(macro(dest)
-                 loadp ScopeChain + PayloadOffset[cfr], dest
-             end,
-             ResolveOperation::m_scopesToSkip[t0], t1, t2)
-    loadisFromInstruction(1, t3)
-    if JSVALUE64
-        moveJSValueFromRegister(t1, cfr, t3, 6)
-    else
-        move CellTag, t2
-        moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
-    end
-    dispatch(7)
-
-_llint_op_resolve_base_to_scope_with_top_scope_check:
-    traceExecution()
-    getResolveOperation(4, t0)
-    # First ResolveOperation tells us what register to check
-    loadis ResolveOperation::m_activationRegister[t0], t1
-
-    loadp PayloadOffset[cfr, t1, 8], t1
-
-    getScope(macro(dest)
-                 btpz t1, .scopeChainNotCreated
-                     loadp JSScope::m_next[t1], dest
-                 jmp .done
-                 .scopeChainNotCreated:
-                     loadp ScopeChain + PayloadOffset[cfr], dest
-                 .done:
-             end, 
-             # Second ResolveOperation tells us how many more nodes to skip
-             ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
-
-    loadisFromInstruction(1, t3)
-    if JSVALUE64
-        moveJSValueFromRegister(t1, cfr, t3, 6)
-    else
-        move CellTag, t2
-        moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
-    end
-    dispatch(7)
-
-_llint_op_resolve_base:
-    traceExecution()
-    callSlowPath(_llint_slow_path_resolve_base)
-    dispatch(7)
-
-macro interpretResolveWithBase(opcodeLength, slowPath)
-    traceExecution()
-    getResolveOperation(4, t0)
-    btpz t0, .slowPath
-
-    loadp ScopeChain[cfr], t3
-    # Get the base
-    loadis ResolveOperation::m_operation[t0], t2
-
-    bineq t2, ResolveOperationSkipScopes, .notSkipScopes
-        getScope(macro(dest) move t3, dest end,
-                 ResolveOperation::m_scopesToSkip[t0], t1, t2)
-        move t1, t3
-        addp sizeof ResolveOperation, t0, t0
-        jmp .haveCorrectScope
-
-    .notSkipScopes:
-
-    bineq t2, ResolveOperationSkipTopScopeNode, .notSkipTopScopeNode
-        loadis ResolveOperation::m_activationRegister[t0], t1
-        loadp PayloadOffset[cfr, t1, 8], t1
-
-        getScope(macro(dest)
-                     btpz t1, .scopeChainNotCreated
-                         loadp JSScope::m_next[t1], dest
-                     jmp .done
-                     .scopeChainNotCreated:
-                         loadp ScopeChain + PayloadOffset[cfr], dest
-                     .done:
-                 end,
-                 sizeof ResolveOperation + ResolveOperation::m_scopesToSkip[t0], t1, t2)
-        move t1, t3
-        # We've handled two opcodes here
-        addp 2 * sizeof ResolveOperation, t0, t0
-
-    .notSkipTopScopeNode:
-
-    .haveCorrectScope:
-
-    # t3 now contains the correct Scope
-    # t0 contains a pointer to the current ResolveOperation
-
-    loadis ResolveOperation::m_operation[t0], t2
-    # t2 contains the next instruction
-
-    loadisFromInstruction(1, t1)
-    # t1 now contains the index for the base register
-
-    bineq t2, ResolveOperationSetBaseToScope, .notSetBaseToScope
-        if JSVALUE64
-            storeq t3, [cfr, t1, 8]
-        else
-            storei t3, PayloadOffset[cfr, t1, 8]
-            storei CellTag, TagOffset[cfr, t1, 8]
-        end
-        jmp .haveSetBase
-
-    .notSetBaseToScope:
-
-    bineq t2, ResolveOperationSetBaseToUndefined, .notSetBaseToUndefined
-        if JSVALUE64
-            storeq ValueUndefined, [cfr, t1, 8]
-        else
-            storei 0, PayloadOffset[cfr, t1, 8]
-            storei UndefinedTag, TagOffset[cfr, t1, 8]
-        end
-        jmp .haveSetBase
-
-    .notSetBaseToUndefined:
-    bineq t2, ResolveOperationSetBaseToGlobal, .slowPath
-        loadp JSCell::m_structure[t3], t2
-        loadp Structure::m_globalObject[t2], t2
-        if JSVALUE64
-            storeq t2, [cfr, t1, 8]
-        else
-            storei t2, PayloadOffset[cfr, t1, 8]
-            storei CellTag, TagOffset[cfr, t1, 8]
-        end
-
-    .haveSetBase:
-
-    # Get the value
-
-    # Load the operation into t2
-    loadis ResolveOperation::m_operation + sizeof ResolveOperation[t0], t2
-
-    # Load the index for the value register into t1
-    loadisFromInstruction(2, t1)
-
-    bineq t2, ResolveOperationGetAndReturnScopedVar, .notGetAndReturnScopedVar
-        loadp JSVariableObject::m_registers[t3], t3 # t3 now contains the activation registers
-
-        # Second ResolveOperation tells us what offset to use
-        loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
-        moveJSValue(t3, t2, cfr, t1, opcodeLength - 1, t0)
-        dispatch(opcodeLength)
-
-    .notGetAndReturnScopedVar:
-    bineq t2, ResolveOperationGetAndReturnGlobalProperty, .slowPath
-        callSlowPath(slowPath)
-        dispatch(opcodeLength)
-
-.slowPath:
-    callSlowPath(slowPath)
-    dispatch(opcodeLength)
-end
-
-_llint_op_resolve_with_base:
-    interpretResolveWithBase(7, _llint_slow_path_resolve_with_base)
-
-
-_llint_op_resolve_with_this:
-    interpretResolveWithBase(6, _llint_slow_path_resolve_with_this)
-
-
 macro withInlineStorage(object, propertyStorage, continuation)
     # Indicate that the object is the property storage, and that the
     # property storage register is unused.
@@ -966,6 +1059,18 @@ _llint_op_put_by_index:
     dispatch(4)
 
 
+_llint_op_put_getter_by_id:
+    traceExecution()
+    callSlowPath(_llint_slow_path_put_getter_by_id)
+    dispatch(4)
+
+
+_llint_op_put_setter_by_id:
+    traceExecution()
+    callSlowPath(_llint_slow_path_put_setter_by_id)
+    dispatch(4)
+
+
 _llint_op_put_getter_setter:
     traceExecution()
     callSlowPath(_llint_slow_path_put_getter_setter)
@@ -1052,13 +1157,16 @@ _llint_op_jngreatereq:
 
 _llint_op_loop_hint:
     traceExecution()
-    loadp JITStackFrame::vm[sp], t1
-    loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
-    btbnz t0, .handleWatchdogTimer
+    loadp CodeBlock[cfr], t1
+    loadp CodeBlock::m_vm[t1], t1
+    loadp VM::watchdog[t1], t0
+    btpnz t0, .handleWatchdogTimer
 .afterWatchdogTimerCheck:
     checkSwitchToJITForLoop()
     dispatch(1)
 .handleWatchdogTimer:
+    loadb Watchdog::m_timerDidFire[t0], t0
+    btbz t0, .afterWatchdogTimerCheck
     callWatchdogTimerHandler(.throwHandler)
     jmp .afterWatchdogTimerCheck
 .throwHandler:
@@ -1073,7 +1181,7 @@ _llint_op_switch_string:
 _llint_op_new_func_exp:
     traceExecution()
     callSlowPath(_llint_slow_path_new_func_exp)
-    dispatch(3)
+    dispatch(4)
 
 
 _llint_op_call:
@@ -1089,7 +1197,39 @@ _llint_op_construct:
 
 _llint_op_call_varargs:
     traceExecution()
-    slowPathForCall(6, _llint_slow_path_call_varargs)
+    callSlowPath(_llint_slow_path_size_frame_for_varargs)
+    branchIfException(_llint_throw_from_slow_path_trampoline)
+    # calleeFrame in t1
+    if JSVALUE64
+        move t1, sp
+    else
+        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+        if ARMv7
+            subp t1, CallerFrameAndPCSize, t2
+            move t2, sp
+        else
+            subp t1, CallerFrameAndPCSize, sp
+        end
+    end
+    slowPathForCall(_llint_slow_path_call_varargs)
+
+_llint_op_construct_varargs:
+    traceExecution()
+    callSlowPath(_llint_slow_path_size_frame_for_varargs)
+    branchIfException(_llint_throw_from_slow_path_trampoline)
+    # calleeFrame in t1
+    if JSVALUE64
+        move t1, sp
+    else
+        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+        if ARMv7
+            subp t1, CallerFrameAndPCSize, t2
+            move t2, sp
+        else
+            subp t1, CallerFrameAndPCSize, sp
+        end
+    end
+    slowPathForCall(_llint_slow_path_construct_varargs)
 
 
 _llint_op_call_eval:
@@ -1128,7 +1268,7 @@ _llint_op_call_eval:
     # and a PC to call, and that PC may be a dummy thunk that just
     # returns the JS value that the eval returned.
     
-    slowPathForCall(4, _llint_slow_path_call_eval)
+    slowPathForCall(_llint_slow_path_call_eval)
 
 
 _llint_generic_return_point:
@@ -1137,32 +1277,26 @@ _llint_generic_return_point:
 
 _llint_op_strcat:
     traceExecution()
-    callSlowPath(_llint_slow_path_strcat)
+    callSlowPath(_slow_path_strcat)
     dispatch(4)
 
 
-_llint_op_get_pnames:
-    traceExecution()
-    callSlowPath(_llint_slow_path_get_pnames)
-    dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
-
-
 _llint_op_push_with_scope:
     traceExecution()
     callSlowPath(_llint_slow_path_push_with_scope)
-    dispatch(2)
+    dispatch(3)
 
 
 _llint_op_pop_scope:
     traceExecution()
     callSlowPath(_llint_slow_path_pop_scope)
-    dispatch(1)
+    dispatch(2)
 
 
 _llint_op_push_name_scope:
     traceExecution()
     callSlowPath(_llint_slow_path_push_name_scope)
-    dispatch(4)
+    dispatch(5)
 
 
 _llint_op_throw:
@@ -1179,20 +1313,34 @@ _llint_op_throw_static_error:
 
 _llint_op_profile_will_call:
     traceExecution()
+    loadp CodeBlock[cfr], t0
+    loadp CodeBlock::m_vm[t0], t0
+    loadi VM::m_enabledProfiler[t0], t0
+    btpz t0, .opProfilerWillCallDone
     callSlowPath(_llint_slow_path_profile_will_call)
+.opProfilerWillCallDone:
     dispatch(2)
 
 
 _llint_op_profile_did_call:
     traceExecution()
+    loadp CodeBlock[cfr], t0
+    loadp CodeBlock::m_vm[t0], t0
+    loadi VM::m_enabledProfiler[t0], t0
+    btpz t0, .opProfilerDidCallDone
     callSlowPath(_llint_slow_path_profile_did_call)
+.opProfilerDidCallDone:
     dispatch(2)
 
 
 _llint_op_debug:
     traceExecution()
+    loadp CodeBlock[cfr], t0
+    loadi CodeBlock::m_debuggerRequests[t0], t0
+    btiz t0, .opDebugDone
     callSlowPath(_llint_slow_path_debug)
-    dispatch(5)
+.opDebugDone:                    
+    dispatch(3)
 
 
 _llint_native_call_trampoline:
@@ -1202,6 +1350,56 @@ _llint_native_call_trampoline:
 _llint_native_construct_trampoline:
     nativeCallTrampoline(NativeExecutable::m_constructor)
 
+_llint_op_get_enumerable_length:
+    traceExecution()
+    callSlowPath(_slow_path_get_enumerable_length)
+    dispatch(3)
+
+_llint_op_has_indexed_property:
+    traceExecution()
+    callSlowPath(_slow_path_has_indexed_property)
+    dispatch(5)
+
+_llint_op_has_structure_property:
+    traceExecution()
+    callSlowPath(_slow_path_has_structure_property)
+    dispatch(5)
+
+_llint_op_has_generic_property:
+    traceExecution()
+    callSlowPath(_slow_path_has_generic_property)
+    dispatch(4)
+
+_llint_op_get_direct_pname:
+    traceExecution()
+    callSlowPath(_slow_path_get_direct_pname)
+    dispatch(7)
+
+_llint_op_get_property_enumerator:
+    traceExecution()
+    callSlowPath(_slow_path_get_property_enumerator)
+    dispatch(3)
+
+_llint_op_enumerator_structure_pname:
+    traceExecution()
+    callSlowPath(_slow_path_next_structure_enumerator_pname)
+    dispatch(4)
+
+_llint_op_enumerator_generic_pname:
+    traceExecution()
+    callSlowPath(_slow_path_next_generic_enumerator_pname)
+    dispatch(4)
+
+_llint_op_to_index_string:
+    traceExecution()
+    callSlowPath(_slow_path_to_index_string)
+    dispatch(3)
+
+_llint_op_profile_control_flow:
+    traceExecution()
+    loadpFromInstruction(1, t0)
+    storeb 1, BasicBlockLocation::m_hasExecuted[t0]
+    dispatch(2)
 
 # Lastly, make sure that we can link even though we don't support all opcodes.
 # These opcodes should never arise when using LLInt or either JIT. We assert
@@ -1221,52 +1419,5 @@ macro notSupported()
     end
 end
 
-_llint_op_get_by_id_chain:
-    notSupported()
-
-_llint_op_get_by_id_custom_chain:
-    notSupported()
-
-_llint_op_get_by_id_custom_proto:
-    notSupported()
-
-_llint_op_get_by_id_custom_self:
-    notSupported()
-
-_llint_op_get_by_id_generic:
-    notSupported()
-
-_llint_op_get_by_id_getter_chain:
-    notSupported()
-
-_llint_op_get_by_id_getter_proto:
-    notSupported()
-
-_llint_op_get_by_id_getter_self:
-    notSupported()
-
-_llint_op_get_by_id_proto:
-    notSupported()
-
-_llint_op_get_by_id_self:
-    notSupported()
-
-_llint_op_get_string_length:
-    notSupported()
-
-_llint_op_put_by_id_generic:
-    notSupported()
-
-_llint_op_put_by_id_replace:
-    notSupported()
-
-_llint_op_put_by_id_transition:
-    notSupported()
-
 _llint_op_init_global_const_nop:
     dispatch(5)
-
-# Indicate the end of LLInt.
-_llint_end:
-    crash()
-