]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - jit/JITPropertyAccess.cpp
JavaScriptCore-1218.0.1.tar.gz
[apple/javascriptcore.git] / jit / JITPropertyAccess.cpp
index a6a28316e498c5a578c256be2467fa63a7720de6..2d1b2929d799bfcf41a50a0e5207b2d9dd8d0efd 100644 (file)
  */
 
 #include "config.h"
-#include "JIT.h"
 
 #if ENABLE(JIT)
+#include "JIT.h"
 
 #include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "GCAwareJITStubRoutine.h"
+#include "GetterSetter.h"
+#include "Interpreter.h"
+#include "JITInlines.h"
 #include "JITStubCall.h"
 #include "JSArray.h"
 #include "JSFunction.h"
-#include "Interpreter.h"
+#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
 #include "LinkBuffer.h"
 #include "RepatchBuffer.h"
 #include "ResultType.h"
 #include "SamplingTool.h"
+#include <wtf/StringPrintStream.h>
 
 #ifndef NDEBUG
 #include <stdio.h>
 using namespace std;
 
 namespace JSC {
+#if USE(JSVALUE64)
 
-#if USE(JSVALUE32_64)
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
-    unsigned base = currentInstruction[1].u.operand;
-    unsigned property = currentInstruction[2].u.operand;
-    unsigned value = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, cti_op_put_by_index);
-    stubCall.addArgument(base);
-    stubCall.addArgument(Imm32(property));
-    stubCall.addArgument(value);
-    stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
-    unsigned base = currentInstruction[1].u.operand;
-    unsigned property = currentInstruction[2].u.operand;
-    unsigned function = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, cti_op_put_getter);
-    stubCall.addArgument(base);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
-    stubCall.addArgument(function);
-    stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
+JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
 {
-    unsigned base = currentInstruction[1].u.operand;
-    unsigned property = currentInstruction[2].u.operand;
-    unsigned function = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, cti_op_put_setter);
-    stubCall.addArgument(base);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
-    stubCall.addArgument(function);
-    stubCall.call();
-}
+    JSInterfaceJIT jit;
+    JumpList failures;
+    failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
 
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
-{
-    unsigned dst = currentInstruction[1].u.operand;
-    unsigned base = currentInstruction[2].u.operand;
-    unsigned property = currentInstruction[3].u.operand;
+    // Load string length to regT2, and start the process of loading the data pointer into regT0
+    jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+    jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+    failures.append(jit.branchTest32(Zero, regT0));
 
-    JITStubCall stubCall(this, cti_op_del_by_id);
-    stubCall.addArgument(base);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
-    stubCall.call(dst);
+    // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+    failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
+    
+    // Load the character
+    JumpList is16Bit;
+    JumpList cont8Bit;
+    // Load the string flags
+    jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2);
+    jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
+    is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit())));
+    jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
+    cont8Bit.append(jit.jump());
+    is16Bit.link(&jit);
+    jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
+    cont8Bit.link(&jit);
+
+    failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+    jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
+    jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+    jit.ret();
+    
+    failures.link(&jit);
+    jit.move(TrustedImm32(0), regT0);
+    jit.ret();
+    
+    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+    return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
 }
 
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
 {
     unsigned dst = currentInstruction[1].u.operand;
     unsigned base = currentInstruction[2].u.operand;
     unsigned property = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    emitGetVirtualRegisters(base, regT0, property, regT1);
+    emitJumpSlowCaseIfNotImmediateInteger(regT1);
 
-    JITStubCall stubCall(this, cti_op_get_by_val);
-    stubCall.addArgument(base);
-    stubCall.addArgument(property);
-    stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    ASSERT_NOT_REACHED();
-}
+    // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
+    // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+    // number was signed since m_vectorLength is always less than intmax (since the total allocation
+    // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
+    // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
+    // extending since it makes it easier to re-tag the value in the slow case.
+    zeroExtend32ToPtr(regT1, regT1);
 
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
-    unsigned base = currentInstruction[1].u.operand;
-    unsigned property = currentInstruction[2].u.operand;
-    unsigned value = currentInstruction[3].u.operand;
+    emitJumpSlowCaseIfNotJSCell(regT0, base);
+    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+    emitArrayProfilingSite(regT2, regT3, profile);
+    and32(TrustedImm32(IndexingShapeMask), regT2);
+
+    PatchableJump badType;
+    JumpList slowCases;
+
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+        slowCases = emitInt32GetByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoubleGetByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousGetByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    addSlowCase(badType);
+    addSlowCase(slowCases);
+    
+    Label done = label();
+    
+#if !ASSERT_DISABLED
+    Jump resultOK = branchTest64(NonZero, regT0);
+    breakpoint();
+    resultOK.link(this);
+#endif
 
-    JITStubCall stubCall(this, cti_op_put_by_val);
-    stubCall.addArgument(base);
-    stubCall.addArgument(property);
-    stubCall.addArgument(value);
-    stubCall.call();
+    emitValueProfilingSite();
+    emitPutVirtualRegister(dst);
+    
+    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
 }
 
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
 {
-    ASSERT_NOT_REACHED();
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+    loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
+    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+    moveDoubleTo64(fpRegT0, regT0);
+    sub64(tagTypeNumberRegister, regT0);
+    
+    return slowCases;
 }
 
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
 {
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int ident = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, cti_op_get_by_id_generic);
-    stubCall.addArgument(base);
-    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
-    stubCall.call(dst);
-
-    m_propertyAccessInstructionIndex++;
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+    load64(BaseIndex(regT2, regT1, TimesEight), regT0);
+    slowCases.append(branchTest64(Zero, regT0));
+    
+    return slowCases;
 }
 
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
 {
-    m_propertyAccessInstructionIndex++;
-    ASSERT_NOT_REACHED();
-}
+    JumpList slowCases;
 
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
-    int base = currentInstruction[1].u.operand;
-    int ident = currentInstruction[2].u.operand;
-    int value = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, cti_op_put_by_id_generic);
-    stubCall.addArgument(base);
-    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
-    stubCall.addArgument(value);
-    stubCall.call();
+    add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
+    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
 
-    m_propertyAccessInstructionIndex++;
-}
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
 
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    m_propertyAccessInstructionIndex++;
-    ASSERT_NOT_REACHED();
+    load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
+    slowCases.append(branchTest64(Zero, regT0));
+    
+    return slowCases;
 }
 
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    // Assert that the following instruction is a get_by_id.
-    ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-
-    // Do the method check - check the object & its prototype's structure inline (this is the common case).
-    m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
-    MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-
-    emitLoad(base, regT1, regT0);
-    emitJumpSlowCaseIfNotJSCell(base, regT1);
-
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-    DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
-    Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
-    // This will be relinked to load the function without doing a load.
-    DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-    move(Imm32(JSValue::CellTag), regT1);
-    Jump match = jump();
-
-    ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
-    ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
-    ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
-    // Link the failure cases here.
-    structureCheck.link(this);
-    protoStructureCheck.link(this);
-
-    // Do a regular(ish) get_by_id (the slow case will be link to
-    // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
-    compileGetByIdHotPath();
+    unsigned dst = currentInstruction[1].u.operand;
+    unsigned base = currentInstruction[2].u.operand;
+    unsigned property = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    linkSlowCase(iter); // property int32 check
+    linkSlowCaseIfNotJSCell(iter, base); // base cell check
+    Jump nonCell = jump();
+    linkSlowCase(iter); // base array check
+    Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+    emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
+    Jump failed = branchTest64(Zero, regT0);
+    emitPutVirtualRegister(dst, regT0);
+    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+    failed.link(this);
+    notString.link(this);
+    nonCell.link(this);
+    
+    Jump skipProfiling = jump();
+    
+    linkSlowCase(iter); // vector length check
+    linkSlowCase(iter); // empty value
+    
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    
+    skipProfiling.link(this);
+    
+    Label slowPath = label();
+    
+    JITStubCall stubCall(this, cti_op_get_by_val);
+    stubCall.addArgument(base, regT2);
+    stubCall.addArgument(property, regT2);
+    Call call = stubCall.call(dst);
 
-    match.link(this);
-    emitStore(dst, regT1, regT0);
-    map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+    m_byValInstructionIndex++;
 
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+    emitValueProfilingSite();
 }
 
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
 {
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int ident = currentInstruction[3].u.operand;
-
-    compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
-
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
+    ASSERT(sizeof(JSValue) == 8);
+    
+    if (finalObjectMode == MayBeFinal) {
+        Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+        neg32(offset);
+        Jump done = jump();
+        isInline.link(this);
+        addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
+        done.link(this);
+    } else {
+#if !ASSERT_DISABLED
+        Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+        breakpoint();
+        isOutOfLine.link(this);
 #endif
+        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+        neg32(offset);
+    }
+    signExtend32ToPtr(offset, offset);
+    load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+}
 
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
 {
     unsigned dst = currentInstruction[1].u.operand;
     unsigned base = currentInstruction[2].u.operand;
     unsigned property = currentInstruction[3].u.operand;
-    
-    emitLoad2(base, regT1, regT0, property, regT3, regT2);
+    unsigned expected = currentInstruction[4].u.operand;
+    unsigned iter = currentInstruction[5].u.operand;
+    unsigned i = currentInstruction[6].u.operand;
 
-    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-    emitJumpSlowCaseIfNotJSCell(base, regT1);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+    emitGetVirtualRegister(property, regT0);
+    addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
+    emitGetVirtualRegisters(base, regT0, iter, regT1);
+    emitJumpSlowCaseIfNotJSCell(regT0, base);
 
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
-    emitStore(dst, regT1, regT0);
-    map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+    // Test base's structure
+    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+    addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+    load32(addressFor(i), regT3);
+    sub32(TrustedImm32(1), regT3);
+    addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+    Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+    add32(TrustedImm32(firstOutOfLineOffset), regT3);
+    sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+    inlineProperty.link(this);
+    compileGetDirectOffset(regT0, regT0, regT3, regT1);
+
+    emitPutVirtualRegister(dst, regT0);
 }
 
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned dst = currentInstruction[1].u.operand;
     unsigned base = currentInstruction[2].u.operand;
     unsigned property = currentInstruction[3].u.operand;
 
-    // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. 
-    Label callGetByValJITStub(this);
-
-    linkSlowCase(iter); // property int32 check
-    linkSlowCaseIfNotJSCell(iter, base); // base cell check
-    linkSlowCase(iter); // base array check
+    linkSlowCase(iter);
+    linkSlowCaseIfNotJSCell(iter, base);
+    linkSlowCase(iter);
+    linkSlowCase(iter);
 
-    JITStubCall stubCall(this, cti_op_get_by_val);
-    stubCall.addArgument(base);
-    stubCall.addArgument(property);
+    JITStubCall stubCall(this, cti_op_get_by_val_generic);
+    stubCall.addArgument(base, regT2);
+    stubCall.addArgument(property, regT2);
     stubCall.call(dst);
-
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
-    linkSlowCase(iter); // array fast cut-off check
-
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
-    branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
-
-    // Missed the fast region, but it is still in the vector.
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
-
-    // FIXME: Maybe we can optimize this comparison to JSValue().
-    Jump skip = branch32(NotEqual, regT0, Imm32(0));
-    branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
-
-    skip.link(this);
-    emitStore(dst, regT1, regT0);
 }
 
 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
 {
     unsigned base = currentInstruction[1].u.operand;
     unsigned property = currentInstruction[2].u.operand;
-    unsigned value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+    emitGetVirtualRegisters(base, regT0, property, regT1);
+    emitJumpSlowCaseIfNotImmediateInteger(regT1);
+    // See comment in op_get_by_val.
+    zeroExtend32ToPtr(regT1, regT1);
+    emitJumpSlowCaseIfNotJSCell(regT0, base);
+    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+    emitArrayProfilingSite(regT2, regT3, profile);
+    and32(TrustedImm32(IndexingShapeMask), regT2);
+    
+    PatchableJump badType;
+    JumpList slowCases;
+    
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+        slowCases = emitInt32PutByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoublePutByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousPutByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    addSlowCase(badType);
+    addSlowCase(slowCases);
+    
+    Label done = label();
+    
+    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+
+    emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+}
 
-    emitLoad2(base, regT1, regT0, property, regT3, regT2);
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
+{
+    unsigned value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    JumpList slowCases;
 
-    addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-    emitJumpSlowCaseIfNotJSCell(base, regT1);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
+    
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
+
+    Label storeResult = label();
+    emitGetVirtualRegister(value, regT3);
+    switch (indexingShape) {
+    case Int32Shape:
+        slowCases.append(emitJumpIfNotImmediateInteger(regT3));
+        store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+        break;
+    case DoubleShape: {
+        Jump notInt = emitJumpIfNotImmediateInteger(regT3);
+        convertInt32ToDouble(regT3, fpRegT0);
+        Jump ready = jump();
+        notInt.link(this);
+        add64(tagTypeNumberRegister, regT3);
+        move64ToDouble(regT3, fpRegT0);
+        slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+        ready.link(this);
+        storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
+        break;
+    }
+    case ContiguousShape:
+        store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    Jump done = jump();
+    outOfBounds.link(this);
+    
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
+    
+    emitArrayProfileStoreToHoleSpecialCase(profile);
+    
+    add32(TrustedImm32(1), regT1, regT3);
+    store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
+    jump().linkTo(storeResult, this);
+    
+    done.link(this);
+    
+    return slowCases;
+}
 
-    Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
+JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+    unsigned value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
 
-    // Check if the access is within the vector.
-    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
+    Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
 
-    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
-    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
-    Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
-    addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
-    skip.link(this);
+    Label storeResult(this);
+    emitGetVirtualRegister(value, regT3);
+    store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+    Jump end = jump();
+    
+    empty.link(this);
+    emitArrayProfileStoreToHoleSpecialCase(profile);
+    add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
+    branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
 
-    inFastVector.link(this);
+    add32(TrustedImm32(1), regT1);
+    store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
+    sub32(TrustedImm32(1), regT1);
+    jump().linkTo(storeResult, this);
 
-    emitLoad(value, regT1, regT0);
-    store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
-    store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+    end.link(this);
+    
+    return slowCases;
 }
 
 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -355,820 +453,66 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
     unsigned base = currentInstruction[1].u.operand;
     unsigned property = currentInstruction[2].u.operand;
     unsigned value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
 
     linkSlowCase(iter); // property int32 check
     linkSlowCaseIfNotJSCell(iter, base); // base cell check
     linkSlowCase(iter); // base not array check
+    
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+    case JITDouble:
+        linkSlowCase(iter); // value type check
+        break;
+    default:
+        break;
+    }
+    
+    Jump skipProfiling = jump();
+    linkSlowCase(iter); // out of bounds
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    skipProfiling.link(this);
+    
+    Label slowPath = label();
 
     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
-    stubPutByValCall.addArgument(base);
-    stubPutByValCall.addArgument(property);
-    stubPutByValCall.addArgument(value);
-    stubPutByValCall.call();
-
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
-    // Slow cases for immediate int accesses to arrays.
-    linkSlowCase(iter); // in vector check
-    linkSlowCase(iter); // written to slot check
-
-    JITStubCall stubCall(this, cti_op_put_by_val_array);
-    stubCall.addArgument(regT1, regT0);
-    stubCall.addArgument(regT2);
-    stubCall.addArgument(value);
-    stubCall.call();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    
-    emitLoad(base, regT1, regT0);
-    emitJumpSlowCaseIfNotJSCell(base, regT1);
-    compileGetByIdHotPath();
-    emitStore(dst, regT1, regT0);
-    map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-
-void JIT::compileGetByIdHotPath()
-{
-    // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
-    // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
-    // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
-    // to jump back to if one of these trampolies finds a match.
-    Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-    m_propertyAccessInstructionIndex++;
-
-    DataLabelPtr structureToCompare;
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-    addSlowCase(structureCheck);
-    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
-    ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
-
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
-    Label externalLoadComplete(this);
-    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
-    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
-    DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
-    DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
-
-    Label putResult(this);
-    ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    int dst = currentInstruction[1].u.operand;
-    int base = currentInstruction[2].u.operand;
-    int ident = currentInstruction[3].u.operand;
-
-    compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
-}
+    stubPutByValCall.addArgument(regT0);
+    stubPutByValCall.addArgument(property, regT2);
+    stubPutByValCall.addArgument(value, regT2);
+    Call call = stubPutByValCall.call();
 
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
-    // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
-    // so that we only need track one pointer into the slow case code - we track a pointer to the location
-    // of the call (which we can use to look up the patch information), but should a array-length or
-    // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
-    // the distance from the call to the head of the slow case.
-    linkSlowCaseIfNotJSCell(iter, base);
-    linkSlowCase(iter);
-
-    Label coldPathBegin(this);
-
-    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
-    stubCall.addArgument(regT1, regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    Call call = stubCall.call(dst);
-
-    ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
-
-    // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
-    m_propertyAccessInstructionIndex++;
+    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+    m_byValInstructionIndex++;
 }
 
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
 {
-    // In order to be able to patch both the Structure, and the object offset, we store one pointer,
-    // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
-    // such that the Structure & offset are always at the same distance from this.
-
-    int base = currentInstruction[1].u.operand;
-    int value = currentInstruction[3].u.operand;
-
-    emitLoad2(base, regT1, regT0, value, regT3, regT2);
-
-    emitJumpSlowCaseIfNotJSCell(base, regT1);
-
-    Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-    m_propertyAccessInstructionIndex++;
-
-    // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
-    DataLabelPtr structureToCompare;
-    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
-    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
-
-    // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
-    Label externalLoadComplete(this);
-    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
-    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
-
-    DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
-    DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+    JITStubCall stubCall(this, cti_op_put_by_index);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.call();
 }
 
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
 {
-    int base = currentInstruction[1].u.operand;
-    int ident = currentInstruction[2].u.operand;
-
-    linkSlowCaseIfNotJSCell(iter, base);
-    linkSlowCase(iter);
-
-    JITStubCall stubCall(this, cti_op_put_by_id);
-    stubCall.addArgument(regT1, regT0);
-    stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
-    stubCall.addArgument(regT3, regT2); 
-    Call call = stubCall.call();
-
-    // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
-    m_propertyAccessInstructionIndex++;
+    JITStubCall stubCall(this, cti_op_put_getter_setter);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.addArgument(currentInstruction[4].u.operand, regT2);
+    stubCall.call();
 }
 
-// Compile a store into an object's property storage.  May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
-{
-    int offset = cachedOffset;
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) /  sizeof(Register);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    emitStore(offset, valueTag, valuePayload, base);
-}
-
-// Compile a load from an object's property storage.  May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
-{
-    int offset = cachedOffset;
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    emitLoad(offset, resultTag, resultPayload, base);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
-{
-    if (base->isUsingInlineStorage()) {
-        load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
-        load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
-        return;
-    }
-
-    size_t offset = cachedOffset * sizeof(JSValue);
-
-    PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
-    loadPtr(static_cast<void*>(protoPropertyStorage), temp);
-    load32(Address(temp, offset), resultPayload);
-    load32(Address(temp, offset + 4), resultTag);
-}
-
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
-{
-    // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag.  The value can be found on the stack.
-
-    JumpList failureCases;
-    failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
-    failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
-
-    // Verify that nothing in the prototype chain has a setter for this property. 
-    for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
-        loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
-        loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
-        failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
-    }
-
-    // Reallocate property storage if needed.
-    Call callTarget;
-    bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
-    if (willNeedStorageRealloc) {
-        // This trampoline was called to like a JIT stub; before we can can call again we need to
-        // remove the return address from the stack, to prevent the stack from becoming misaligned.
-        preserveReturnAddressAfterCall(regT3);
-        JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
-        stubCall.skipArgument(); // base
-        stubCall.skipArgument(); // ident
-        stubCall.skipArgument(); // value
-        stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
-        stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
-        stubCall.call(regT0);
-
-        restoreReturnAddressBeforeReturn(regT3);
-    }
-
-    sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
-    add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
-    storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-    load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
-    load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
-
-    // Write the value
-    compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
-
-    ret();
-    
-    ASSERT(!failureCases.empty());
-    failureCases.link(this);
-    restoreArgumentReferenceForTrampoline();
-    Call failureCall = tailRecursiveCall();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
-
-    if (willNeedStorageRealloc) {
-        ASSERT(m_calls.size() == 1);
-        patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
-    }
-    
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
-}
-
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
-    RepatchBuffer repatchBuffer(codeBlock);
-
-    // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
-    // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
-    int offset = sizeof(JSValue) * cachedOffset;
-
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
-    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
-{
-    RepatchBuffer repatchBuffer(codeBlock);
-
-    ASSERT(!methodCallLinkInfo.cachedStructure);
-    methodCallLinkInfo.cachedStructure = structure;
-    structure->ref();
-
-    Structure* prototypeStructure = proto->structure();
-    ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
-    methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
-    prototypeStructure->ref();
-
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
-    RepatchBuffer repatchBuffer(codeBlock);
-
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
-
-    int offset = sizeof(JSValue) * cachedOffset;
-
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
-    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
-{
-    StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-    
-    // regT0 holds a JSCell*
-
-    // Check for array
-    Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
-    // Checks out okay! - get the length from the storage
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
-    Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
-    move(regT2, regT0);
-    move(Imm32(JSValue::Int32Tag), regT1);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-    patchBuffer.link(failureCases1, slowCaseBegin);
-    patchBuffer.link(failureCases2, slowCaseBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
-}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
-    // regT0 holds a JSCell*
-
-    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
-    // referencing the prototype object - let's speculatively load it's table nice and early!)
-    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
-    Jump failureCases1 = checkStructure(regT0, structure);
-
-    // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
-    // Checks out okay! - getDirectOffset
-    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-    patchBuffer.link(failureCases1, slowCaseBegin);
-    patchBuffer.link(failureCases2, slowCaseBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
-{
-    // regT0 holds a JSCell*
-    
-    Jump failureCase = checkStructure(regT0, structure);
-    compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
-    if (!lastProtoBegin)
-        lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
-    patchBuffer.link(failureCase, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    structure->ref();
-    polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
-{
-    // regT0 holds a JSCell*
-    
-    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
-    // referencing the prototype object - let's speculatively load it's table nice and early!)
-    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
-    // Check eax is an object of the right Structure.
-    Jump failureCases1 = checkStructure(regT0, structure);
-
-    // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
-    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-    patchBuffer.link(failureCases1, lastProtoBegin);
-    patchBuffer.link(failureCases2, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    structure->ref();
-    prototypeStructure->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
-{
-    // regT0 holds a JSCell*
-    
-    ASSERT(count);
-    
-    JumpList bucketsOfFail;
-
-    // Check eax is an object of the right Structure.
-    bucketsOfFail.append(checkStructure(regT0, structure));
-
-    Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
-    JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
-        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
-
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
-    }
-    ASSERT(protoObject);
-
-    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
-    patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    // Track the stub we have created so that it will be deleted later.
-    structure->ref();
-    chain->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
-    // regT0 holds a JSCell*
-    
-    ASSERT(count);
-    
-    JumpList bucketsOfFail;
-
-    // Check eax is an object of the right Structure.
-    bucketsOfFail.append(checkStructure(regT0, structure));
-
-    Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
-    JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
-        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
-
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
-    }
-    ASSERT(protoObject);
-
-    compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-#else // USE(JSVALUE32_64)
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
-    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-    emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
-    // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
-    // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
-    // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
-    // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
-    // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
-    // extending since it makes it easier to re-tag the value in the slow case.
-    zeroExtend32ToPtr(regT1, regT1);
-#else
-    emitFastArithImmToInt(regT1);
-#endif
-    emitJumpSlowCaseIfNotJSCell(regT0);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
-    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
-
-    // Get the value from the vector
-    loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
-    emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
-    emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
-    emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
-    // See comment in op_get_by_val.
-    zeroExtend32ToPtr(regT1, regT1);
-#else
-    emitFastArithImmToInt(regT1);
-#endif
-    emitJumpSlowCaseIfNotJSCell(regT0);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
-    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
-    // No; oh well, check if the access if within the vector - if so, we may still be okay.
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
-
-    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
-    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
-    addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
-
-    // All good - put the value into the array.
-    inFastVector.link(this);
-    emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-    storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-}
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
-    JITStubCall stubCall(this, cti_op_put_by_index);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
-    JITStubCall stubCall(this, cti_op_put_getter);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
-    JITStubCall stubCall(this, cti_op_put_setter);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
-}
-
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
 {
     JITStubCall stubCall(this, cti_op_del_by_id);
-    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
-    stubCall.call(currentInstruction[1].u.operand);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    emitGetVirtualRegister(baseVReg, regT0);
-    JITStubCall stubCall(this, cti_op_get_by_id_generic);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    stubCall.call(resultVReg);
-
-    m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
-    unsigned baseVReg = currentInstruction[1].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
-    unsigned valueVReg = currentInstruction[3].u.operand;
-
-    emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
-
-    JITStubCall stubCall(this, cti_op_put_by_id_generic);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    stubCall.addArgument(regT1);
-    stubCall.call();
-
-    m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
-{
-    // Assert that the following instruction is a get_by_id.
-    ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    emitGetVirtualRegister(baseVReg, regT0);
-
-    // Do the method check - check the object & its prototype's structure inline (this is the common case).
-    m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
-    MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-    Jump notCell = emitJumpIfNotJSCell(regT0);
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-    DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
-    Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
-    // This will be relinked to load the function without doing a load.
-    DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-    Jump match = jump();
-
-    ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
-    ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
-    ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
-    // Link the failure cases here.
-    notCell.link(this);
-    structureCheck.link(this);
-    protoStructureCheck.link(this);
-
-    // Do a regular(ish) get_by_id (the slow case will be link to
-    // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
-    compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
-
-    match.link(this);
-    emitPutVirtualRegister(resultVReg);
-
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
-
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
+    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
 
 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
 {
@@ -1177,11 +521,12 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
 
     emitGetVirtualRegister(baseVReg, regT0);
-    compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+    compileGetByIdHotPath(baseVReg, ident);
+    emitValueProfilingSite();
     emitPutVirtualRegister(resultVReg);
 }
 
-void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident)
 {
     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
@@ -1189,26 +534,28 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
     // to jump back to if one of these trampolies finds a match.
 
     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+    
+    if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
+        loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+        emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
+    }
+
+    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
 
     Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
 
     DataLabelPtr structureToCompare;
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+    PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
     addSlowCase(structureCheck);
-    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
-    ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
 
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
-    Label externalLoadComplete(this);
-    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
-    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
-    DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
+    ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+    DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
 
     Label putResult(this);
-    ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+
+    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+    m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult));
 }
 
 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1217,10 +564,11 @@ void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCase
     unsigned baseVReg = currentInstruction[2].u.operand;
     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
 
-    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
+    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter);
+    emitValueProfilingSite();
 }
 
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter)
 {
     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
     // so that we only need track one pointer into the slow case code - we track a pointer to the location
@@ -1231,19 +579,18 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
     linkSlowCaseIfNotJSCell(iter, baseVReg);
     linkSlowCase(iter);
 
-#ifndef NDEBUG
+    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
     Label coldPathBegin(this);
-#endif
-    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+    JITStubCall stubCall(this, cti_op_get_by_id);
     stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
+    stubCall.addArgument(TrustedImmPtr(ident));
     Call call = stubCall.call(resultVReg);
 
-    ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
 
     // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
-    m_propertyAccessInstructionIndex++;
+    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
 }
 
 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
@@ -1251,8 +598,6 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     unsigned baseVReg = currentInstruction[1].u.operand;
     unsigned valueVReg = currentInstruction[3].u.operand;
 
-    unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
-
     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
     // such that the Structure & offset are always at the same distance from this.
@@ -1262,112 +607,113 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
 
+    BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
     Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
 
     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
     DataLabelPtr structureToCompare;
-    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
-    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+
+    ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
+
+    END_UNINTERRUPTED_SEQUENCE(sequencePutById);
 
-    // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
-    Label externalLoadComplete(this);
-    ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
-    ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+    emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
 
-    DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
-    ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
+    m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel));
 }
 
 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned baseVReg = currentInstruction[1].u.operand;
     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
-
-    unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+    unsigned direct = currentInstruction[8].u.operand;
 
     linkSlowCaseIfNotJSCell(iter, baseVReg);
     linkSlowCase(iter);
 
-    JITStubCall stubCall(this, cti_op_put_by_id);
+    JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
     stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
+    stubCall.addArgument(TrustedImmPtr(ident));
     stubCall.addArgument(regT1);
+    move(regT0, nonArgGPR1);
     Call call = stubCall.call();
 
     // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
 }
 
 // Compile a store into an object's property storage.  May overwrite the
 // value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
 {
-    int offset = cachedOffset * sizeof(JSValue);
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    storePtr(value, Address(base, offset));
+    if (isInlineOffset(cachedOffset)) {
+        store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+        return;
+    }
+    
+    loadPtr(Address(base, JSObject::butterflyOffset()), base);
+    store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
 }
 
 // Compile a load from an object's property storage.  May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
 {
-    int offset = cachedOffset * sizeof(JSValue);
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    loadPtr(Address(base, offset), result);
+    if (isInlineOffset(cachedOffset)) {
+        load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+        return;
+    }
+    
+    loadPtr(Address(base, JSObject::butterflyOffset()), result);
+    load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
 }
 
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
 {
-    if (base->isUsingInlineStorage())
-        loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
-    else {
-        PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
-        loadPtr(static_cast<void*>(protoPropertyStorage), temp);
-        loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
-    } 
+    if (isInlineOffset(cachedOffset)) {
+        load64(base->locationForOffset(cachedOffset), result);
+        return;
+    }
+    
+    loadPtr(base->butterflyAddress(), result);
+    load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
 }
 
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
 {
+    move(nonArgGPR1, regT0);
+
     JumpList failureCases;
     // Check eax is an object of the right Structure.
     failureCases.append(emitJumpIfNotJSCell(regT0));
-    failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
-    JumpList successCases;
-
-    // ecx = baseObject
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
-    // proto(ecx) = baseObject->structure()->prototype()
-    failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
-
-    loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+    failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
     
-    // ecx = baseObject->m_structure
-    for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
-        // null check the prototype
-        successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
+    testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo);
+    
+    ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
 
-        // Check the structure id
-        failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
-        
-        loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
-        failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
-        loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+    // ecx = baseObject->m_structure
+    if (!direct) {
+        for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+            ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
+            testPrototype((*it)->storedPrototype(), failureCases, stubInfo);
+        }
     }
 
-    successCases.link(this);
-
-    Call callTarget;
-
+    // If we succeed in all of our checks, and the code was optimizable, then make sure we
+    // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+    if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
+        sub32(
+            TrustedImm32(1),
+            AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+    }
+#endif
+    
     // emit a call only if storage realloc is needed
-    bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+    bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
     if (willNeedStorageRealloc) {
         // This trampoline was called to like a JIT stub; before we can can call again we need to
         // remove the return address from the stack, to prevent the stack from becoming misaligned.
@@ -1377,22 +723,21 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
         stubCall.skipArgument(); // base
         stubCall.skipArgument(); // ident
         stubCall.skipArgument(); // value
-        stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
-        stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+        stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
+        stubCall.addArgument(TrustedImmPtr(newStructure));
         stubCall.call(regT0);
-        emitGetJITStubArg(3, regT1);
+        emitGetJITStubArg(2, regT1);
 
         restoreReturnAddressBeforeReturn(regT3);
     }
 
-    // Assumes m_refCount can be decremented easily, refcount decrement is safe as 
-    // codeblock should ensure oldStructure->m_refCount > 0
-    sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
-    add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
-    storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+    // Planting the new structure triggers the write barrier so we need
+    // an unconditional barrier here.
+    emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
 
-    // write the value
-    compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
+    ASSERT(newStructure->classInfo() == oldStructure->classInfo());
+    storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
+    compilePutDirectOffset(regT0, regT1, cachedOffset);
 
     ret();
     
@@ -1401,22 +746,29 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
     restoreArgumentReferenceForTrampoline();
     Call failureCall = tailRecursiveCall();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
 
-    patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
+    patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
 
     if (willNeedStorageRealloc) {
         ASSERT(m_calls.size() == 1);
         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
     }
     
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline put_by_id transition for %s, return point %p",
+                toCString(*m_codeBlock).data(), returnAddress.value())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        willNeedStorageRealloc,
+        newStructure);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+    repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
 }
 
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
 {
     RepatchBuffer repatchBuffer(codeBlock);
 
@@ -1424,55 +776,24 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
 
-    int offset = sizeof(JSValue) * cachedOffset;
-
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
-{
-    RepatchBuffer repatchBuffer(codeBlock);
-
-    ASSERT(!methodCallLinkInfo.cachedStructure);
-    methodCallLinkInfo.cachedStructure = structure;
-    structure->ref();
-
-    Structure* prototypeStructure = proto->structure();
-    ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
-    methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
-    prototypeStructure->ref();
-
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
+    repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
 }
 
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
 {
     RepatchBuffer repatchBuffer(codeBlock);
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
-
-    int offset = sizeof(JSValue) * cachedOffset;
-
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
 
     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
+    repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
 }
 
 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
@@ -1480,41 +801,47 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
 
     // Check eax is an array
-    Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+    Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
+    Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
 
     // Checks out okay! - get the length from the storage
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
-    Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+    load32(Address(regT3, ArrayStorage::lengthOffset()), regT2);
+    Jump failureCases3 = branch32(LessThan, regT2, TrustedImm32(0));
 
     emitFastArithIntToImmNoCheck(regT2, regT0);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
     patchBuffer.link(failureCases1, slowCaseBegin);
     patchBuffer.link(failureCases2, slowCaseBegin);
+    patchBuffer.link(failureCases3, slowCaseBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
 
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+        patchBuffer,
+        ("Basline JIT get_by_id array length stub for %s, return point %p",
+            toCString(*m_codeBlock).data(),
+            stubInfo->hotPathBegin.labelAtOffset(
+                stubInfo->patch.baseline.u.get.putResult).executableAddress()));
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
 }
 
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
 {
     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     // referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -1524,72 +851,132 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     Jump failureCases1 = checkStructure(regT0, structure);
 
     // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
-    // Checks out okay! - getDirectOffset
-    compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+    Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
 
+    bool needsStubLink = false;
+    
+    // Checks out okay!
+    if (slot.cachedPropertyType() == PropertySlot::Getter) {
+        needsStubLink = true;
+        compileGetDirectOffset(protoObject, regT1, cachedOffset);
+        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+        stubCall.addArgument(regT1);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+        needsStubLink = true;
+        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+        stubCall.addArgument(TrustedImmPtr(protoObject));
+        stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+        stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else
+        compileGetDirectOffset(protoObject, regT0, cachedOffset);
     Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
     patchBuffer.link(failureCases1, slowCaseBegin);
-    patchBuffer.link(failureCases2, slowCaseBegin);
+    if (failureCases2.isSet())
+        patchBuffer.link(failureCases2, slowCaseBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
 
+    if (needsStubLink) {
+        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+            if (iter->to)
+                patchBuffer.link(iter->from, FunctionPtr(iter->to));
+        }
+    }
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline JIT get_by_id proto stub for %s, return point %p",
+                toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+                    stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        needsStubLink);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
 }
 
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
 {
     Jump failureCase = checkStructure(regT0, structure);
-    compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
+    bool needsStubLink = false;
+    bool isDirect = false;
+    if (slot.cachedPropertyType() == PropertySlot::Getter) {
+        needsStubLink = true;
+        compileGetDirectOffset(regT0, regT1, cachedOffset);
+        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+        stubCall.addArgument(regT1);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+        needsStubLink = true;
+        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+        stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else {
+        isDirect = true;
+        compileGetDirectOffset(regT0, regT0, cachedOffset);
+    }
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+    if (needsStubLink) {
+        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+            if (iter->to)
+                patchBuffer.link(iter->from, FunctionPtr(iter->to));
+        }
+    }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
     if (!lastProtoBegin)
-        lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+        lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
 
     patchBuffer.link(failureCase, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
 
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline JIT get_by_id list stub for %s, return point %p",
+                toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+                    stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        needsStubLink);
 
-    structure->ref();
-    polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+    polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
 }
 
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
 {
     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     // referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -1599,45 +986,72 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
     Jump failureCases1 = checkStructure(regT0, structure);
 
     // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
-    // Checks out okay! - getDirectOffset
-    compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+    Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
+
+    // Checks out okay!
+    bool needsStubLink = false;
+    bool isDirect = false;
+    if (slot.cachedPropertyType() == PropertySlot::Getter) {
+        needsStubLink = true;
+        compileGetDirectOffset(protoObject, regT1, cachedOffset);
+        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+        stubCall.addArgument(regT1);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+        needsStubLink = true;
+        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+        stubCall.addArgument(TrustedImmPtr(protoObject));
+        stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+        stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else {
+        isDirect = true;
+        compileGetDirectOffset(protoObject, regT0, cachedOffset);
+    }
 
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+    if (needsStubLink) {
+        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+            if (iter->to)
+                patchBuffer.link(iter->from, FunctionPtr(iter->to));
+        }
+    }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
     patchBuffer.link(failureCases1, lastProtoBegin);
-    patchBuffer.link(failureCases2, lastProtoBegin);
+    if (failureCases2.isSet())
+        patchBuffer.link(failureCases2, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    structure->ref();
-    prototypeStructure->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
+
+    RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline JIT get_by_id proto list stub for %s, return point %p",
+                toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+                    stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        needsStubLink);
+    prototypeStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
 }
 
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
 {
     ASSERT(count);
-    
     JumpList bucketsOfFail;
 
     // Check eax is an object of the right Structure.
@@ -1645,105 +1059,728 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
     bucketsOfFail.append(baseObjectCheck);
 
     Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
+    WriteBarrier<Structure>* it = chain->head();
     JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
+    for (unsigned i = 0; i < count; ++i, ++it) {
         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
-
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+        currStructure = it->get();
+        testPrototype(protoObject, bucketsOfFail, stubInfo);
     }
     ASSERT(protoObject);
-
-    compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+    
+    bool needsStubLink = false;
+    bool isDirect = false;
+    if (slot.cachedPropertyType() == PropertySlot::Getter) {
+        needsStubLink = true;
+        compileGetDirectOffset(protoObject, regT1, cachedOffset);
+        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+        stubCall.addArgument(regT1);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+        needsStubLink = true;
+        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+        stubCall.addArgument(TrustedImmPtr(protoObject));
+        stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+        stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else {
+        isDirect = true;
+        compileGetDirectOffset(protoObject, regT0, cachedOffset);
+    }
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+    
+    if (needsStubLink) {
+        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+            if (iter->to)
+                patchBuffer.link(iter->from, FunctionPtr(iter->to));
+        }
+    }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
 
     patchBuffer.link(bucketsOfFail, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
+
+    RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline JIT get_by_id chain list stub for %s, return point %p",
+                toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+                    stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        needsStubLink);
 
     // Track the stub we have created so that it will be deleted later.
-    structure->ref();
-    chain->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+    prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
 }
 
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
 {
     ASSERT(count);
-    
+
     JumpList bucketsOfFail;
 
     // Check eax is an object of the right Structure.
     bucketsOfFail.append(checkStructure(regT0, structure));
 
     Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
+    WriteBarrier<Structure>* it = chain->head();
     JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
+    for (unsigned i = 0; i < count; ++i, ++it) {
         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
-
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+        currStructure = it->get();
+        testPrototype(protoObject, bucketsOfFail, stubInfo);
     }
     ASSERT(protoObject);
 
-    compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+    bool needsStubLink = false;
+    if (slot.cachedPropertyType() == PropertySlot::Getter) {
+        needsStubLink = true;
+        compileGetDirectOffset(protoObject, regT1, cachedOffset);
+        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+        stubCall.addArgument(regT1);
+        stubCall.addArgument(regT0);
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+        needsStubLink = true;
+        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+        stubCall.addArgument(TrustedImmPtr(protoObject));
+        stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+        stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+        stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+        stubCall.call();
+    } else
+        compileGetDirectOffset(protoObject, regT0, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+    if (needsStubLink) {
+        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+            if (iter->to)
+                patchBuffer.link(iter->from, FunctionPtr(iter->to));
+        }
+    }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+    patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
 
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+        FINALIZE_CODE(
+            patchBuffer,
+            ("Baseline JIT get_by_id chain stub for %s, return point %p",
+                toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+                    stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+        *m_vm,
+        m_codeBlock->ownerExecutable(),
+        needsStubLink);
+    stubInfo->stubRoutine = stubRoutine;
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
 }
 
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+    int skip = currentInstruction[3].u.operand;
+
+    emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
+    bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+    ASSERT(skip || !checkTopLevel);
+    if (checkTopLevel && skip--) {
+        Jump activationNotCreated;
+        if (checkTopLevel)
+            activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+        loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+        activationNotCreated.link(this);
+    }
+    while (skip--)
+        loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+
+    loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+    loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+    emitValueProfilingSite();
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+    int skip = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+
+    emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
+    bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+    ASSERT(skip || !checkTopLevel);
+    if (checkTopLevel && skip--) {
+        Jump activationNotCreated;
+        if (checkTopLevel)
+            activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+        loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
+        activationNotCreated.link(this);
+    }
+    while (skip--)
+        loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
+
+    emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+
+    loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+    storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+}
+
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+{
+    JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+    store64(regT0, currentInstruction[1].u.registerPointer);
+    if (Heap::isWriteBarrierEnabled())
+        emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+    addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
+
+    JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+    store64(regT0, currentInstruction[1].u.registerPointer);
+    if (Heap::isWriteBarrierEnabled())
+        emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    linkSlowCase(iter);
+
+    JITStubCall stubCall(this, cti_op_init_global_const_check);
+    stubCall.addArgument(regT0);
+    stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
+    stubCall.call();
+}
+
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+    repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
+    repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
+}
+
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+    if (isDirectPutById(stubInfo))
+        repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+    else
+        repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
+    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
+}
+
+#endif // USE(JSVALUE64)
 
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(scratch);
+    UNUSED_PARAM(scratch2);
+    UNUSED_PARAM(useKind);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
+    ASSERT(owner != scratch);
+    ASSERT(owner != scratch2);
+    
+#if ENABLE(WRITE_BARRIER_PROFILING)
+    emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(scratch);
+    UNUSED_PARAM(useKind);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
+    
+#if ENABLE(WRITE_BARRIER_PROFILING)
+    emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch)
+{
+    if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
+        structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock));
+#if !ASSERT_DISABLED
+        move(TrustedImmPtr(object), scratch);
+        Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
+        breakpoint();
+        ok.link(this);
+#endif
+        Jump result; // Returning an unset jump this way because otherwise VC++ would complain.
+        return result;
+    }
+    
+    move(TrustedImmPtr(object), scratch);
+    return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
+}
+
+void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch)
+{
+    Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch);
+    if (!failureCase.isSet())
+        return;
+    
+    failureCases.append(failureCase);
+}
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo)
+{
+    if (prototype.isNull())
+        return;
+
+    ASSERT(prototype.isCell());
+    addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3);
+}
+
+bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
+{
+    switch (stubInfo->accessType) {
+    case access_put_by_id_transition_normal:
+        return false;
+    case access_put_by_id_transition_direct:
+        return true;
+    case access_put_by_id_replace:
+    case access_put_by_id_generic: {
+        void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
+        if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
+            || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
+            || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
+            return true;
+        ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
+               || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
+               || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
+        return false;
+    }
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+}
+
+void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+    Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+    
+    PatchableJump badType;
+    JumpList slowCases;
+    
+    switch (arrayMode) {
+    case JITInt32:
+        slowCases = emitInt32GetByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoubleGetByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousGetByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+        break;
+    case JITInt8Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray);
+        break;
+    case JITInt16Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray);
+        break;
+    case JITInt32Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray);
+        break;
+    case JITUint8Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray);
+        break;
+    case JITUint8ClampedArray:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray);
+        break;
+    case JITUint16Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray);
+        break;
+    case JITUint32Array:
+        slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray);
+        break;
+    case JITFloat32Array:
+        slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
+        break;
+    case JITFloat64Array:
+        slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
+        break;
+    default:
+        CRASH();
+    }
+    
+    Jump done = jump();
+
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+    
+    patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    
+    patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+    
+    byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+        patchBuffer,
+        ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+    
+    RepatchBuffer repatchBuffer(m_codeBlock);
+    repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_val_generic));
+}
+
+void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+    Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+    
+    PatchableJump badType;
+    JumpList slowCases;
+    
+    switch (arrayMode) {
+    case JITInt32:
+        slowCases = emitInt32PutByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoublePutByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousPutByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+        break;
+    case JITInt8Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding);
+        break;
+    case JITInt16Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding);
+        break;
+    case JITInt32Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding);
+        break;
+    case JITUint8Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding);
+        break;
+    case JITUint8ClampedArray:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding);
+        break;
+    case JITUint16Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding);
+        break;
+    case JITUint32Array:
+        slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding);
+        break;
+    case JITFloat32Array:
+        slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
+        break;
+    case JITFloat64Array:
+        slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    Jump done = jump();
+
+    LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+    
+    patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    
+    patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+    
+    byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+        patchBuffer,
+        ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+    
+    RepatchBuffer repatchBuffer(m_codeBlock);
+    repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_val_generic));
+}
+
+JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness)
+{
+    // The best way to test the array type is to use the classInfo. We need to do so without
+    // clobbering the register that holds the indexing type, base, and property.
+
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID resultPayload = regT0;
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID resultPayload = regT0;
+    RegisterID resultTag = regT1;
+    RegisterID scratch = regT3;
+#endif
+    
+    JumpList slowCases;
+    
+    loadPtr(Address(base, JSCell::structureOffset()), scratch);
+    badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+    loadPtr(Address(base, descriptor.m_storageOffset), base);
+    
+    switch (elementSize) {
+    case 1:
+        if (signedness == SignedTypedArray)
+            load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
+        else
+            load8(BaseIndex(base, property, TimesOne), resultPayload);
+        break;
+    case 2:
+        if (signedness == SignedTypedArray)
+            load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
+        else
+            load16(BaseIndex(base, property, TimesTwo), resultPayload);
+        break;
+    case 4:
+        load32(BaseIndex(base, property, TimesFour), resultPayload);
+        break;
+    default:
+        CRASH();
+    }
+    
+    Jump done;
+    if (elementSize == 4 && signedness == UnsignedTypedArray) {
+        Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
+        
+        convertInt32ToDouble(resultPayload, fpRegT0);
+        addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
+#if USE(JSVALUE64)
+        moveDoubleTo64(fpRegT0, resultPayload);
+        sub64(tagTypeNumberRegister, resultPayload);
+#else
+        moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+        
+        done = jump();
+        canBeInt.link(this);
+    }
+
+#if USE(JSVALUE64)
+    or64(tagTypeNumberRegister, resultPayload);
+#else
+    move(TrustedImm32(JSValue::Int32Tag), resultTag);
+#endif
+    if (done.isSet())
+        done.link(this);
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID resultPayload = regT0;
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID resultPayload = regT0;
+    RegisterID resultTag = regT1;
+    RegisterID scratch = regT3;
+#endif
+    
+    JumpList slowCases;
+    
+    loadPtr(Address(base, JSCell::structureOffset()), scratch);
+    badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+    loadPtr(Address(base, descriptor.m_storageOffset), base);
+    
+    switch (elementSize) {
+    case 4:
+        loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
+        convertFloatToDouble(fpRegT0, fpRegT0);
+        break;
+    case 8: {
+        loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
+        break;
+    }
+    default:
+        CRASH();
+    }
+    
+    Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
+    static const double NaN = QNaN;
+    loadDouble(&NaN, fpRegT0);
+    notNaN.link(this);
+    
+#if USE(JSVALUE64)
+    moveDoubleTo64(fpRegT0, resultPayload);
+    sub64(tagTypeNumberRegister, resultPayload);
+#else
+    moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+    return slowCases;    
+}
+
+JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
+{
+    unsigned value = currentInstruction[3].u.operand;
+
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT2;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT1;
+#endif
+    
+    JumpList slowCases;
+    
+    loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+    badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+    
+#if USE(JSVALUE64)
+    emitGetVirtualRegister(value, earlyScratch);
+    slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
+#else
+    emitLoad(value, lateScratch, earlyScratch);
+    slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
+#endif
+    
+    // We would be loading this into base as in get_by_val, except that the slow
+    // path expects the base to be unclobbered.
+    loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+    
+    if (rounding == ClampRounding) {
+        ASSERT(elementSize == 1);
+        ASSERT_UNUSED(signedness, signedness = UnsignedTypedArray);
+        Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
+        Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
+        xor32(earlyScratch, earlyScratch);
+        Jump clamped = jump();
+        tooBig.link(this);
+        move(TrustedImm32(0xff), earlyScratch);
+        clamped.link(this);
+        inBounds.link(this);
+    }
+    
+    switch (elementSize) {
+    case 1:
+        store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
+        break;
+    case 2:
+        store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
+        break;
+    case 4:
+        store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
+        break;
+    default:
+        CRASH();
+    }
+    
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+{
+    unsigned value = currentInstruction[3].u.operand;
 
-#endif // USE(JSVALUE32_64)
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT2;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT1;
+#endif
+    
+    JumpList slowCases;
+    
+    loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+    badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+    
+#if USE(JSVALUE64)
+    emitGetVirtualRegister(value, earlyScratch);
+    Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
+    convertInt32ToDouble(earlyScratch, fpRegT0);
+    Jump ready = jump();
+    doubleCase.link(this);
+    slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
+    add64(tagTypeNumberRegister, earlyScratch);
+    move64ToDouble(earlyScratch, fpRegT0);
+    ready.link(this);
+#else
+    emitLoad(value, lateScratch, earlyScratch);
+    Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag));
+    convertInt32ToDouble(earlyScratch, fpRegT0);
+    Jump ready = jump();
+    doubleCase.link(this);
+    slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
+    moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
+    ready.link(this);
+#endif
+    
+    // We would be loading this into base as in get_by_val, except that the slow
+    // path expects the base to be unclobbered.
+    loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+    
+    switch (elementSize) {
+    case 4:
+        convertDoubleToFloat(fpRegT0, fpRegT0);
+        storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
+        break;
+    case 8:
+        storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
+        break;
+    default:
+        CRASH();
+    }
+    
+    return slowCases;
+}
 
 } // namespace JSC