]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - jit/JITPropertyAccess.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / jit / JITPropertyAccess.cpp
index 1b2b4dd005927a0a63c8eca5dc4e222a26e8ac8b..34d51517db490ca489c96c6696398f086109935d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 #include "config.h"
 
-#if !USE(JSVALUE32_64)
-
-#include "JIT.h"
-
 #if ENABLE(JIT)
+#include "JIT.h"
 
 #include "CodeBlock.h"
+#include "GCAwareJITStubRoutine.h"
 #include "GetterSetter.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
+#include "Interpreter.h"
+#include "JITInlines.h"
 #include "JSArray.h"
 #include "JSFunction.h"
 #include "JSPropertyNameIterator.h"
-#include "Interpreter.h"
+#include "JSVariableObject.h"
 #include "LinkBuffer.h"
 #include "RepatchBuffer.h"
 #include "ResultType.h"
 #include "SamplingTool.h"
+#include <wtf/StringPrintStream.h>
 
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
 
 namespace JSC {
+#if USE(JSVALUE64)
 
-PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
 {
-    JSInterfaceJIT jit;
+    JSInterfaceJIT jit(vm);
     JumpList failures;
-    failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
-    failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
-#if USE(JSVALUE64)
-    jit.zeroExtend32ToPtr(regT1, regT1);
-#else
-    jit.emitFastArithImmToInt(regT1);
-#endif
+    failures.append(JSC::branchStructure(jit,
+        NotEqual, 
+        Address(regT0, JSCell::structureIDOffset()), 
+        vm->stringStructure.get()));
 
-    // Load string length to regT1, and start the process of loading the data pointer into regT0
+    // Load string length to regT2, and start the process of loading the data pointer into regT0
     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
-    jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
-    
+    failures.append(jit.branchTest32(Zero, regT0));
+
     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
     failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
     
     // Load the character
+    JumpList is16Bit;
+    JumpList cont8Bit;
+    // Load the string flags
+    jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2);
+    jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
+    is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit())));
+    jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
+    cont8Bit.append(jit.jump());
+    is16Bit.link(&jit);
     jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
-    
-    failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
-    jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+    cont8Bit.link(&jit);
+
+    failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+    jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
     jit.ret();
     
     failures.link(&jit);
-    jit.move(Imm32(0), regT0);
+    jit.move(TrustedImm32(0), regT0);
     jit.ret();
     
-    LinkBuffer patchBuffer(&jit, pool, 0);
-    return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+    LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+    return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
 }
 
 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
 {
-    unsigned dst = currentInstruction[1].u.operand;
-    unsigned base = currentInstruction[2].u.operand;
-    unsigned property = currentInstruction[3].u.operand;
-
+    int dst = currentInstruction[1].u.operand;
+    int base = currentInstruction[2].u.operand;
+    int property = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
     emitGetVirtualRegisters(base, regT0, property, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
+
     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
     // number was signed since m_vectorLength is always less than intmax (since the total allocation
@@ -104,403 +107,454 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
     // extending since it makes it easier to re-tag the value in the slow case.
     zeroExtend32ToPtr(regT1, regT1);
-#else
-    emitFastArithImmToInt(regT1);
-#endif
+
     emitJumpSlowCaseIfNotJSCell(regT0, base);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+    emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+    and32(TrustedImm32(IndexingShapeMask), regT2);
+
+    PatchableJump badType;
+    JumpList slowCases;
+
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+        slowCases = emitInt32GetByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoubleGetByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousGetByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    addSlowCase(badType);
+    addSlowCase(slowCases);
+    
+    Label done = label();
+    
+    if (!ASSERT_DISABLED) {
+        Jump resultOK = branchTest64(NonZero, regT0);
+        abortWithReason(JITGetByValResultIsNotEmpty);
+        resultOK.link(this);
+    }
 
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+    emitValueProfilingSite();
+    emitPutVirtualRegister(dst);
+    
+    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+}
 
-    loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
-    addSlowCase(branchTestPtr(Zero, regT0));
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+{
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+    loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
+    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+    moveDoubleTo64(fpRegT0, regT0);
+    sub64(tagTypeNumberRegister, regT0);
+    
+    return slowCases;
+}
 
-    emitPutVirtualRegister(dst);
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+{
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+    load64(BaseIndex(regT2, regT1, TimesEight), regT0);
+    slowCases.append(branchTest64(Zero, regT0));
+    
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+{
+    JumpList slowCases;
+
+    add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
+    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
+
+    load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
+    slowCases.append(branchTest64(Zero, regT0));
+    
+    return slowCases;
 }
 
 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    unsigned dst = currentInstruction[1].u.operand;
-    unsigned base = currentInstruction[2].u.operand;
-    unsigned property = currentInstruction[3].u.operand;
+    int dst = currentInstruction[1].u.operand;
+    int base = currentInstruction[2].u.operand;
+    int property = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
     
     linkSlowCase(iter); // property int32 check
     linkSlowCaseIfNotJSCell(iter, base); // base cell check
     Jump nonCell = jump();
     linkSlowCase(iter); // base array check
-    Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-    emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCode().addressForCall());
-    Jump failed = branchTestPtr(Zero, regT0);
+    Jump notString = branchStructure(NotEqual, 
+        Address(regT0, JSCell::structureIDOffset()), 
+        m_vm->stringStructure.get());
+    emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
+    Jump failed = branchTest64(Zero, regT0);
     emitPutVirtualRegister(dst, regT0);
     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
     failed.link(this);
     notString.link(this);
     nonCell.link(this);
     
+    Jump skipProfiling = jump();
+    
     linkSlowCase(iter); // vector length check
     linkSlowCase(iter); // empty value
     
-    JITStubCall stubCall(this, cti_op_get_by_val);
-    stubCall.addArgument(base, regT2);
-    stubCall.addArgument(property, regT2);
-    stubCall.call(dst);
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    
+    skipProfiling.link(this);
+    
+    Label slowPath = label();
+    
+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(property, regT1);
+    Call call = callOperation(operationGetByValDefault, dst, regT0, regT1);
+
+    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+    m_byValInstructionIndex++;
+
+    emitValueProfilingSite();
 }
 
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
 {
-    ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
-    ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
-
-    Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
-    loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
-    Jump finishedLoad = jump();
-    notUsingInlineStorage.link(this);
-    loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
-    loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
-    finishedLoad.link(this);
+    ASSERT(sizeof(JSValue) == 8);
+    
+    if (finalObjectMode == MayBeFinal) {
+        Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+        neg32(offset);
+        Jump done = jump();
+        isInline.link(this);
+        addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
+        done.link(this);
+    } else {
+        if (!ASSERT_DISABLED) {
+            Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+            abortWithReason(JITOffsetIsNotOutOfLine);
+            isOutOfLine.link(this);
+        }
+        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+        neg32(offset);
+    }
+    signExtend32ToPtr(offset, offset);
+    load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
 }
 
 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
 {
-    unsigned dst = currentInstruction[1].u.operand;
-    unsigned base = currentInstruction[2].u.operand;
-    unsigned property = currentInstruction[3].u.operand;
+    int dst = currentInstruction[1].u.operand;
+    int base = currentInstruction[2].u.operand;
+    int property = currentInstruction[3].u.operand;
     unsigned expected = currentInstruction[4].u.operand;
-    unsigned iter = currentInstruction[5].u.operand;
-    unsigned i = currentInstruction[6].u.operand;
+    int iter = currentInstruction[5].u.operand;
+    int i = currentInstruction[6].u.operand;
 
     emitGetVirtualRegister(property, regT0);
-    addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+    addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
     emitGetVirtualRegisters(base, regT0, iter, regT1);
     emitJumpSlowCaseIfNotJSCell(regT0, base);
 
     // Test base's structure
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+    emitLoadStructure(regT0, regT2, regT3);
     addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
     load32(addressFor(i), regT3);
-    sub32(Imm32(1), regT3);
+    sub32(TrustedImm32(1), regT3);
     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
-    compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
+    Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+    add32(TrustedImm32(firstOutOfLineOffset), regT3);
+    sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+    inlineProperty.link(this);
+    compileGetDirectOffset(regT0, regT0, regT3, regT1);
 
     emitPutVirtualRegister(dst, regT0);
 }
 
 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    unsigned dst = currentInstruction[1].u.operand;
-    unsigned base = currentInstruction[2].u.operand;
-    unsigned property = currentInstruction[3].u.operand;
+    int dst = currentInstruction[1].u.operand;
+    int base = currentInstruction[2].u.operand;
+    int property = currentInstruction[3].u.operand;
 
     linkSlowCase(iter);
     linkSlowCaseIfNotJSCell(iter, base);
     linkSlowCase(iter);
     linkSlowCase(iter);
 
-    JITStubCall stubCall(this, cti_op_get_by_val);
-    stubCall.addArgument(base, regT2);
-    stubCall.addArgument(property, regT2);
-    stubCall.call(dst);
+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(property, regT1);
+    callOperation(operationGetByValGeneric, dst, regT0, regT1);
 }
 
 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
 {
-    unsigned base = currentInstruction[1].u.operand;
-    unsigned property = currentInstruction[2].u.operand;
-    unsigned value = currentInstruction[3].u.operand;
+    int base = currentInstruction[1].u.operand;
+    int property = currentInstruction[2].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
 
     emitGetVirtualRegisters(base, regT0, property, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
     // See comment in op_get_by_val.
     zeroExtend32ToPtr(regT1, regT1);
-#else
-    emitFastArithImmToInt(regT1);
-#endif
     emitJumpSlowCaseIfNotJSCell(regT0, base);
-    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-
-    Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-
-    Label storeResult(this);
-    emitGetVirtualRegister(value, regT0);
-    storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-    Jump end = jump();
+    emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+    and32(TrustedImm32(IndexingShapeMask), regT2);
     
-    empty.link(this);
-    add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
-    branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
-
-    move(regT1, regT0);
-    add32(Imm32(1), regT0);
-    store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
-    jump().linkTo(storeResult, this);
-
-    end.link(this);
-}
+    PatchableJump badType;
+    JumpList slowCases;
+    
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+        slowCases = emitInt32PutByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoublePutByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousPutByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    addSlowCase(badType);
+    addSlowCase(slowCases);
+    
+    Label done = label();
+    
+    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
 
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
-    JITStubCall stubCall(this, cti_op_put_by_index);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
 }
 
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
 {
-    JITStubCall stubCall(this, cti_op_put_getter);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
-}
+    int value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    JumpList slowCases;
 
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
-    JITStubCall stubCall(this, cti_op_put_setter);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
+    
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
+
+    Label storeResult = label();
+    emitGetVirtualRegister(value, regT3);
+    switch (indexingShape) {
+    case Int32Shape:
+        slowCases.append(emitJumpIfNotImmediateInteger(regT3));
+        store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+        break;
+    case DoubleShape: {
+        Jump notInt = emitJumpIfNotImmediateInteger(regT3);
+        convertInt32ToDouble(regT3, fpRegT0);
+        Jump ready = jump();
+        notInt.link(this);
+        add64(tagTypeNumberRegister, regT3);
+        move64ToDouble(regT3, fpRegT0);
+        slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+        ready.link(this);
+        storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
+        break;
+    }
+    case ContiguousShape:
+        store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+        emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
+        break;
+    default:
+        CRASH();
+        break;
+    }
+    
+    Jump done = jump();
+    outOfBounds.link(this);
+    
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
+    
+    emitArrayProfileStoreToHoleSpecialCase(profile);
+    
+    add32(TrustedImm32(1), regT1, regT3);
+    store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
+    jump().linkTo(storeResult, this);
+    
+    done.link(this);
+    
+    return slowCases;
 }
 
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
 {
-    JITStubCall stubCall(this, cti_op_del_by_id);
-    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
-    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
-    stubCall.call(currentInstruction[1].u.operand);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
+    int value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    
+    JumpList slowCases;
+    
+    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
 
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
 
-    emitGetVirtualRegister(baseVReg, regT0);
-    JITStubCall stubCall(this, cti_op_get_by_id_generic);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    stubCall.call(resultVReg);
+    Label storeResult(this);
+    emitGetVirtualRegister(value, regT3);
+    store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+    emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
+    Jump end = jump();
+    
+    empty.link(this);
+    emitArrayProfileStoreToHoleSpecialCase(profile);
+    add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
+    branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
 
-    m_propertyAccessInstructionIndex++;
-}
+    add32(TrustedImm32(1), regT1);
+    store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
+    sub32(TrustedImm32(1), regT1);
+    jump().linkTo(storeResult, this);
 
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    ASSERT_NOT_REACHED();
+    end.link(this);
+    
+    return slowCases;
 }
 
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    unsigned baseVReg = currentInstruction[1].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
-    unsigned valueVReg = currentInstruction[3].u.operand;
-    unsigned direct = currentInstruction[8].u.operand;
+    int base = currentInstruction[1].u.operand;
+    int property = currentInstruction[2].u.operand;
+    int value = currentInstruction[3].u.operand;
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
 
-    emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+    linkSlowCase(iter); // property int32 check
+    linkSlowCaseIfNotJSCell(iter, base); // base cell check
+    linkSlowCase(iter); // base not array check
+    
+    JITArrayMode mode = chooseArrayMode(profile);
+    switch (mode) {
+    case JITInt32:
+    case JITDouble:
+        linkSlowCase(iter); // value type check
+        break;
+    default:
+        break;
+    }
+    
+    Jump skipProfiling = jump();
+    linkSlowCase(iter); // out of bounds
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    skipProfiling.link(this);
+    
+    Label slowPath = label();
 
-    JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    stubCall.addArgument(regT1);
-    stubCall.call();
+    emitGetVirtualRegister(property, regT1);
+    emitGetVirtualRegister(value, regT2);
+    bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
+    Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2);
 
-    m_propertyAccessInstructionIndex++;
+    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+    m_byValInstructionIndex++;
 }
 
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
 {
-    ASSERT_NOT_REACHED();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+    callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
 }
 
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
 {
-    // Assert that the following instruction is a get_by_id.
-    ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    emitGetVirtualRegister(baseVReg, regT0);
-
-    // Do the method check - check the object & its prototype's structure inline (this is the common case).
-    m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
-    MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
-    Jump notCell = emitJumpIfNotJSCell(regT0);
-
-    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-    DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
-    Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
-    // This will be relinked to load the function without doing a load.
-    DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
-    END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
-    Jump match = jump();
-
-    ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
-    ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
-    ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
-
-    // Link the failure cases here.
-    notCell.link(this);
-    structureCheck.link(this);
-    protoStructureCheck.link(this);
-
-    // Do a regular(ish) get_by_id (the slow case will be link to
-    // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
-    compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
-
-    match.link(this);
-    emitPutVirtualRegister(resultVReg);
-
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+    emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
+    callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1, regT2);
 }
 
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
 {
-    currentInstruction += OPCODE_LENGTH(op_method_check);
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
-
-    // We've already generated the following get_by_id, so make sure it's skipped over.
-    m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+    int dst = currentInstruction[1].u.operand;
+    int base = currentInstruction[2].u.operand;
+    int property = currentInstruction[3].u.operand;
+    emitGetVirtualRegister(base, regT0);
+    callOperation(operationDeleteById, dst, regT0, &m_codeBlock->identifier(property));
 }
 
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
 {
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+    int resultVReg = currentInstruction[1].u.operand;
+    int baseVReg = currentInstruction[2].u.operand;
+    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
 
     emitGetVirtualRegister(baseVReg, regT0);
-    compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
-    emitPutVirtualRegister(resultVReg);
-}
-
-void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
-{
-    // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
-    // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
-    // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
-    // to jump back to if one of these trampolies finds a match.
-
+    
     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+    
+    if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
+        emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
 
-    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
-    Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-
-    DataLabelPtr structureToCompare;
-    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-    addSlowCase(structureCheck);
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
-
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
-    Label externalLoadComplete(this);
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
-    ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
-
-    DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
-
-    Label putResult(this);
-
-    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+    JITGetByIdGenerator gen(
+        m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+        JSValueRegs(regT0), JSValueRegs(regT0), DontSpill);
+    gen.generateFastPath(*this);
+    addSlowCase(gen.slowPathJump());
+    m_getByIds.append(gen);
 
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+    emitValueProfilingSite();
+    emitPutVirtualRegister(resultVReg);
 }
 
 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    unsigned resultVReg = currentInstruction[1].u.operand;
-    unsigned baseVReg = currentInstruction[2].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
-    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
-}
-
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
-    // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
-    // so that we only need track one pointer into the slow case code - we track a pointer to the location
-    // of the call (which we can use to look up the patch information), but should a array-length or
-    // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
-    // the distance from the call to the head of the slow case.
+    int resultVReg = currentInstruction[1].u.operand;
+    int baseVReg = currentInstruction[2].u.operand;
+    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
 
     linkSlowCaseIfNotJSCell(iter, baseVReg);
     linkSlowCase(iter);
 
-    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-#ifndef NDEBUG
-    Label coldPathBegin(this);
-#endif
-    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    Call call = stubCall.call(resultVReg);
-
-    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-    ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
+    JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
+    
+    Label coldPathBegin = label();
+    
+    Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
 
-    // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
-    m_propertyAccessInstructionIndex++;
+    gen.reportSlowPathCall(coldPathBegin, call);
 }
 
 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
 {
-    unsigned baseVReg = currentInstruction[1].u.operand;
-    unsigned valueVReg = currentInstruction[3].u.operand;
+    int baseVReg = currentInstruction[1].u.operand;
+    int valueVReg = currentInstruction[3].u.operand;
+    unsigned direct = currentInstruction[8].u.operand;
 
-    unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+    emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
 
     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
@@ -511,616 +565,799 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
 
-    BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
-    Label hotPathBegin(this);
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-
-    // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
-    DataLabelPtr structureToCompare;
-    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
-
-    // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
-    Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
-    Label externalLoadComplete(this);
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
-    ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
-
-    DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
-
-    END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
-    ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
+    JITPutByIdGenerator gen(
+        m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+        JSValueRegs(regT0), JSValueRegs(regT1), regT2, DontSpill, m_codeBlock->ecmaMode(),
+        direct ? Direct : NotDirect);
+    
+    gen.generateFastPath(*this);
+    addSlowCase(gen.slowPathJump());
+    
+    m_putByIds.append(gen);
 }
 
 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    unsigned baseVReg = currentInstruction[1].u.operand;
-    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
-    unsigned direct = currentInstruction[8].u.operand;
-
-    unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+    int baseVReg = currentInstruction[1].u.operand;
+    const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
 
     linkSlowCaseIfNotJSCell(iter, baseVReg);
     linkSlowCase(iter);
 
-    JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(ImmPtr(ident));
-    stubCall.addArgument(regT1);
-    Call call = stubCall.call();
+    Label coldPathBegin(this);
+    
+    JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
+
+    Call call = callOperation(
+        gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
 
-    // Track the location of the call; this will be used to recover patch information.
-    m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+    gen.reportSlowPathCall(coldPathBegin, call);
 }
 
 // Compile a store into an object's property storage.  May overwrite the
 // value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
 {
-    int offset = cachedOffset * sizeof(JSValue);
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    storePtr(value, Address(base, offset));
+    if (isInlineOffset(cachedOffset)) {
+        store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+        return;
+    }
+    
+    loadPtr(Address(base, JSObject::butterflyOffset()), base);
+    store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
 }
 
 // Compile a load from an object's property storage.  May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
 {
-    int offset = cachedOffset * sizeof(JSValue);
-    if (structure->isUsingInlineStorage())
-        offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
-    else
-        loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
-    loadPtr(Address(base, offset), result);
+    if (isInlineOffset(cachedOffset)) {
+        load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+        return;
+    }
+    
+    loadPtr(Address(base, JSObject::butterflyOffset()), result);
+    load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
 }
 
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
 {
-    if (base->isUsingInlineStorage())
-        loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
-    else {
-        PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
-        loadPtr(static_cast<void*>(protoPropertyStorage), temp);
-        loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
-    } 
+    if (isInlineOffset(cachedOffset)) {
+        load64(base->locationForOffset(cachedOffset), result);
+        return;
+    }
+    
+    loadPtr(base->butterflyAddress(), result);
+    load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
 }
 
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
 {
-    if (structure->m_prototype.isNull())
+    if (!needsVarInjectionChecks)
         return;
-
-    move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
-    move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
-    failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
+    addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
 }
 
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
 {
-    JumpList failureCases;
-    // Check eax is an object of the right Structure.
-    failureCases.append(emitJumpIfNotJSCell(regT0));
-    failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
-    testPrototype(oldStructure, failureCases);
-
-    // ecx = baseObject->m_structure
-    if (!direct) {
-        for (RefPtr<Structure>* it = chain->head(); *it; ++it)
-            testPrototype(it->get(), failureCases);
-    }
-
-    Call callTarget;
-
-    // emit a call only if storage realloc is needed
-    bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
-    if (willNeedStorageRealloc) {
-        // This trampoline was called to like a JIT stub; before we can can call again we need to
-        // remove the return address from the stack, to prevent the stack from becoming misaligned.
-        preserveReturnAddressAfterCall(regT3);
-        JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
-        stubCall.skipArgument(); // base
-        stubCall.skipArgument(); // ident
-        stubCall.skipArgument(); // value
-        stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
-        stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
-        stubCall.call(regT0);
-        emitGetJITStubArg(2, regT1);
-
-        restoreReturnAddressBeforeReturn(regT3);
+    emitVarInjectionCheck(needsVarInjectionChecks);
+    emitGetVirtualRegister(JSStack::ScopeChain, regT0);
+    if (m_codeBlock->needsActivation()) {
+        emitGetVirtualRegister(m_codeBlock->activationRegister(), regT1);
+        Jump noActivation = branchTestPtr(Zero, regT1);
+        loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+        noActivation.link(this);
     }
+    for (unsigned i = 0; i < depth; ++i)
+        loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+    emitPutVirtualRegister(dst);
+}
 
-    // Assumes m_refCount can be decremented easily, refcount decrement is safe as 
-    // codeblock should ensure oldStructure->m_refCount > 0
-    sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
-    add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
-    storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
-    // write the value
-    compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
-
-    ret();
-    
-    ASSERT(!failureCases.empty());
-    failureCases.link(this);
-    restoreArgumentReferenceForTrampoline();
-    Call failureCall = tailRecursiveCall();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
-    patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
-
-    if (willNeedStorageRealloc) {
-        ASSERT(m_calls.size() == 1);
-        patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
+{
+    int dst = currentInstruction[1].u.operand;
+    ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+    unsigned depth = currentInstruction[4].u.operand;
+
+    switch (resolveType) {
+    case GlobalProperty:
+    case GlobalVar:
+    case GlobalPropertyWithVarInjectionChecks:
+    case GlobalVarWithVarInjectionChecks:
+        emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+        move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+        emitPutVirtualRegister(dst);
+        break;
+    case ClosureVar:
+    case ClosureVarWithVarInjectionChecks:
+        emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
+        break;
+    case Dynamic:
+        addSlowCase(jump());
+        break;
     }
-    
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
 }
 
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    RepatchBuffer repatchBuffer(codeBlock);
+    int dst = currentInstruction[1].u.operand;
+    ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
 
-    // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
-    // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
-    int offset = sizeof(JSValue) * cachedOffset;
-
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+    if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
+        return;
 
-    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
+    linkSlowCase(iter);
+    int32_t indentifierIndex = currentInstruction[2].u.operand;
+    callOperation(operationResolveScope, dst, indentifierIndex);
 }
 
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
 {
-    RepatchBuffer repatchBuffer(codeBlock);
+    emitGetVirtualRegister(scope, regT0);
+    loadPtr(structureSlot, regT1);
+    addSlowCase(branchTestPtr(Zero, regT1));
+    load32(Address(regT1, Structure::structureIDOffset()), regT1);
+    addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
+}
 
-    ASSERT(!methodCallLinkInfo.cachedStructure);
-    methodCallLinkInfo.cachedStructure = structure;
-    structure->ref();
+void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
+{
+    load32(operandSlot, regT1);
+    compileGetDirectOffset(regT0, regT0, regT1, regT2, KnownNotFinal);
+}
 
-    Structure* prototypeStructure = proto->structure();
-    methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
-    prototypeStructure->ref();
+void JIT::emitGetGlobalVar(uintptr_t operand)
+{
+    loadPtr(reinterpret_cast<void*>(operand), regT0);
+}
 
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
-    repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+void JIT::emitGetClosureVar(int scope, uintptr_t operand)
+{
+    emitGetVirtualRegister(scope, regT0);
+    loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+    loadPtr(Address(regT0, operand * sizeof(Register)), regT0);
+}
 
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
+{
+    int dst = currentInstruction[1].u.operand;
+    int scope = currentInstruction[2].u.operand;
+    ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+    Structure** structureSlot = currentInstruction[5].u.structure.slot();
+    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+    switch (resolveType) {
+    case GlobalProperty:
+    case GlobalPropertyWithVarInjectionChecks:
+        emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+        emitGetGlobalProperty(operandSlot);
+        break;
+    case GlobalVar:
+    case GlobalVarWithVarInjectionChecks:
+        emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+        emitGetGlobalVar(*operandSlot);
+        break;
+    case ClosureVar:
+    case ClosureVarWithVarInjectionChecks:
+        emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+        emitGetClosureVar(scope, *operandSlot);
+        break;
+    case Dynamic:
+        addSlowCase(jump());
+        break;
+    }
+    emitPutVirtualRegister(dst);
+    emitValueProfilingSite();
 }
 
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    RepatchBuffer repatchBuffer(codeBlock);
+    int dst = currentInstruction[1].u.operand;
+    ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
 
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+    if (resolveType == GlobalVar || resolveType == ClosureVar)
+        return;
 
-    int offset = sizeof(JSValue) * cachedOffset;
+    if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
+        linkSlowCase(iter);
+    linkSlowCase(iter);
+    callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
+}
 
-    // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
-    // and makes the subsequent load's offset automatically correct
-    if (structure->isUsingInlineStorage())
-        repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
+{
+    emitGetVirtualRegister(value, regT2);
 
-    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
-    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+    loadPtr(operandSlot, regT1);
+    negPtr(regT1);
+    storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
 }
 
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+void JIT::emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet* set)
 {
-    StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+    if (!set || set->state() == IsInvalidated)
+        return;
+    
+    load8(set->addressOfState(), scratch);
+    Jump isDone = branch32(Equal, scratch, TrustedImm32(IsInvalidated));
+    addSlowCase(branch64(NotEqual, AbsoluteAddress(set->addressOfInferredValue()), value));
+    isDone.link(this);
+}
 
-    // Check eax is an array
-    Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
+{
+    emitGetVirtualRegister(value, regT0);
+    emitNotifyWrite(regT0, regT1, set);
+    storePtr(regT0, reinterpret_cast<void*>(operand));
+}
 
-    // Checks out okay! - get the length from the storage
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
+{
+    emitGetVirtualRegister(value, regT1);
+    emitGetVirtualRegister(scope, regT0);
+    loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+    storePtr(regT1, Address(regT0, operand * sizeof(Register)));
+}
 
-    Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
+void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
+{
+    int scope = currentInstruction[1].u.operand;
+    int value = currentInstruction[3].u.operand;
+    ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+    Structure** structureSlot = currentInstruction[5].u.structure.slot();
+    uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+    switch (resolveType) {
+    case GlobalProperty:
+    case GlobalPropertyWithVarInjectionChecks:
+        emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+        emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+        emitPutGlobalProperty(operandSlot, value);
+        break;
+    case GlobalVar:
+    case GlobalVarWithVarInjectionChecks:
+        emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+        emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+        emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
+        break;
+    case ClosureVar:
+    case ClosureVarWithVarInjectionChecks:
+        emitWriteBarrier(scope, value, ShouldFilterValue);
+        emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+        emitPutClosureVar(scope, *operandSlot, value);
+        break;
+    case Dynamic:
+        addSlowCase(jump());
+        break;
+    }
+}
 
-    emitFastArithIntToImmNoCheck(regT2, regT0);
-    Jump success = jump();
+void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+    unsigned linkCount = 0;
+    if (resolveType != GlobalVar && resolveType != ClosureVar)
+        linkCount++;
+    if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+        && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
+        linkCount++;
+    if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
+        linkCount++;
+    if (!linkCount)
+        return;
+    while (linkCount--)
+        linkSlowCase(iter);
+    callOperation(operationPutToScope, currentInstruction);
+}
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+{
+    JSGlobalObject* globalObject = m_codeBlock->globalObject();
+    emitWriteBarrier(globalObject, currentInstruction[2].u.operand, ShouldFilterValue);
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+    store64(regT0, currentInstruction[1].u.registerPointer);
+}
 
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-    patchBuffer.link(failureCases1, slowCaseBegin);
-    patchBuffer.link(failureCases2, slowCaseBegin);
+#endif // USE(JSVALUE64)
 
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+#if USE(JSVALUE64)
+void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
+{
+#if ENABLE(GGC)
+    Jump valueNotCell;
+    if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+        emitGetVirtualRegister(value, regT0);
+        valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+    }
+    
+    emitGetVirtualRegister(owner, regT0);
+    Jump ownerNotCell;
+    if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+        ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+
+    Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(regT0);
+    callOperation(operationUnconditionalWriteBarrier, regT0);
+    ownerNotMarkedOrAlreadyRemembered.link(this);
+
+    if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+        ownerNotCell.link(this);
+    if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) 
+        valueNotCell.link(this);
+#else
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
+#endif
+}
 
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
+{
+#if ENABLE(GGC)
+    emitGetVirtualRegister(value, regT0);
+    Jump valueNotCell;
+    if (mode == ShouldFilterValue)
+        valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
 
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    emitWriteBarrier(owner);
 
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+    if (mode == ShouldFilterValue) 
+        valueNotCell.link(this);
+#else
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
+#endif
 }
 
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+#else // USE(JSVALUE64)
+
+void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
 {
-    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
-    // referencing the prototype object - let's speculatively load it's table nice and early!)
-    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
-    // Check eax is an object of the right Structure.
-    Jump failureCases1 = checkStructure(regT0, structure);
-
-    // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#if ENABLE(GGC)
+    Jump valueNotCell;
+    if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+        emitLoadTag(value, regT0);
+        valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+    }
+    
+    emitLoad(owner, regT0, regT1);
+    Jump ownerNotCell;
+    if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+        ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+
+    Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(regT1);
+    callOperation(operationUnconditionalWriteBarrier, regT1);
+    ownerNotMarkedOrAlreadyRemembered.link(this);
+
+    if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+        ownerNotCell.link(this);
+    if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) 
+        valueNotCell.link(this);
 #else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
 #endif
+}
 
-    bool needsStubLink = false;
-    
-    // Checks out okay!
-    if (slot.cachedPropertyType() == PropertySlot::Getter) {
-        needsStubLink = true;
-        compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
-        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
-        needsStubLink = true;
-        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
-        stubCall.addArgument(ImmPtr(protoObject));
-        stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
-        stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else
-        compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-    Jump success = jump();
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-    patchBuffer.link(failureCases1, slowCaseBegin);
-    patchBuffer.link(failureCases2, slowCaseBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    if (needsStubLink) {
-        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
-            if (iter->to)
-                patchBuffer.link(iter->from, FunctionPtr(iter->to));
-        }
+void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
+{
+#if ENABLE(GGC)
+    Jump valueNotCell;
+    if (mode == ShouldFilterValue) {
+        emitLoadTag(value, regT0);
+        valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
     }
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
 
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    emitWriteBarrier(owner);
 
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+    if (mode == ShouldFilterValue) 
+        valueNotCell.link(this);
+#else
+    UNUSED_PARAM(owner);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(mode);
+#endif
 }
 
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+#endif // USE(JSVALUE64)
+
+void JIT::emitWriteBarrier(JSCell* owner)
 {
-    Jump failureCase = checkStructure(regT0, structure);
-    bool needsStubLink = false;
-    if (slot.cachedPropertyType() == PropertySlot::Getter) {
-        needsStubLink = true;
-        if (!structure->isUsingInlineStorage()) {
-            move(regT0, regT1);
-            compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
-        } else
-            compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
-        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
-        needsStubLink = true;
-        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
-        stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
+#if ENABLE(GGC)
+    if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
+        Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(owner);
+        callOperation(operationUnconditionalWriteBarrier, owner);
+        ownerNotMarkedOrAlreadyRemembered.link(this);
     } else
-        compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+        callOperation(operationUnconditionalWriteBarrier, owner);
+#else
+    UNUSED_PARAM(owner);
+#endif // ENABLE(GGC)
+}
 
-    if (needsStubLink) {
-        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
-            if (iter->to)
-                patchBuffer.link(iter->from, FunctionPtr(iter->to));
-        }
+void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+    Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+    
+    PatchableJump badType;
+    JumpList slowCases;
+    
+    switch (arrayMode) {
+    case JITInt32:
+        slowCases = emitInt32GetByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoubleGetByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousGetByVal(currentInstruction, badType);
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+        break;
+    default:
+        TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
+        if (isInt(type))
+            slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type);
+        else 
+            slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type);
+        break;
     }
+    
+    Jump done = jump();
 
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
-    if (!lastProtoBegin)
-        lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
-    patchBuffer.link(failureCase, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    structure->ref();
-    polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+    LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+    
+    patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    
+    patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+    
+    byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+        m_codeBlock, patchBuffer,
+        ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+    
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric));
 }
 
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
 {
-    // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
-    // referencing the prototype object - let's speculatively load it's table nice and early!)
-    JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
-    // Check eax is an object of the right Structure.
-    Jump failureCases1 = checkStructure(regT0, structure);
-
-    // Check the prototype object's Structure had not changed.
-    Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
-    move(ImmPtr(prototypeStructure), regT3);
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
-    Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
-    // Checks out okay!
-    bool needsStubLink = false;
-    if (slot.cachedPropertyType() == PropertySlot::Getter) {
-        needsStubLink = true;
-        compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
-        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
-        needsStubLink = true;
-        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
-        stubCall.addArgument(ImmPtr(protoObject));
-        stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
-        stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else
-        compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-
-    Jump success = jump();
+    Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+    
+    PatchableJump badType;
+    JumpList slowCases;
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+#if ENABLE(GGC)
+    bool needsLinkForWriteBarrier = false;
+#endif
 
-    if (needsStubLink) {
-        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
-            if (iter->to)
-                patchBuffer.link(iter->from, FunctionPtr(iter->to));
-        }
+    switch (arrayMode) {
+    case JITInt32:
+        slowCases = emitInt32PutByVal(currentInstruction, badType);
+        break;
+    case JITDouble:
+        slowCases = emitDoublePutByVal(currentInstruction, badType);
+        break;
+    case JITContiguous:
+        slowCases = emitContiguousPutByVal(currentInstruction, badType);
+#if ENABLE(GGC)
+        needsLinkForWriteBarrier = true;
+#endif
+        break;
+    case JITArrayStorage:
+        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+#if ENABLE(GGC)
+        needsLinkForWriteBarrier = true;
+#endif
+        break;
+    default:
+        TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
+        if (isInt(type))
+            slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, type);
+        else 
+            slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, type);
+        break;
+    }
+    
+    Jump done = jump();
+
+    LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+    patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+    patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+#if ENABLE(GGC)
+    if (needsLinkForWriteBarrier) {
+        ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
+        patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
+    }
+#endif
+    
+    bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
+    if (!isDirect) {
+        byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+            m_codeBlock, patchBuffer,
+            ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+        
+    } else {
+        byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+            m_codeBlock, patchBuffer,
+            ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
     }
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-    patchBuffer.link(failureCases1, lastProtoBegin);
-    patchBuffer.link(failureCases2, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    structure->ref();
-    prototypeStructure->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
 }
 
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
 {
-    ASSERT(count);
-    JumpList bucketsOfFail;
-
-    // Check eax is an object of the right Structure.
-    Jump baseObjectCheck = checkStructure(regT0, structure);
-    bucketsOfFail.append(baseObjectCheck);
-
-    Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
-    JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
-        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
-
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+    ASSERT(isInt(type));
+    
+    // The best way to test the array type is to use the classInfo. We need to do so without
+    // clobbering the register that holds the indexing type, base, and property.
+
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID resultPayload = regT0;
+    RegisterID scratch = regT3;
 #else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID resultPayload = regT0;
+    RegisterID resultTag = regT1;
+    RegisterID scratch = regT3;
 #endif
+    
+    JumpList slowCases;
+    
+    load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+    badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+    loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
+    
+    switch (elementSize(type)) {
+    case 1:
+        if (isSigned(type))
+            load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
+        else
+            load8(BaseIndex(base, property, TimesOne), resultPayload);
+        break;
+    case 2:
+        if (isSigned(type))
+            load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
+        else
+            load16(BaseIndex(base, property, TimesTwo), resultPayload);
+        break;
+    case 4:
+        load32(BaseIndex(base, property, TimesFour), resultPayload);
+        break;
+    default:
+        CRASH();
     }
-    ASSERT(protoObject);
-    
-    bool needsStubLink = false;
-    if (slot.cachedPropertyType() == PropertySlot::Getter) {
-        needsStubLink = true;
-        compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
-        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
-        needsStubLink = true;
-        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
-        stubCall.addArgument(ImmPtr(protoObject));
-        stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
-        stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else
-        compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     
-    if (needsStubLink) {
-        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
-            if (iter->to)
-                patchBuffer.link(iter->from, FunctionPtr(iter->to));
-        }
+    Jump done;
+    if (type == TypeUint32) {
+        Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
+        
+        convertInt32ToDouble(resultPayload, fpRegT0);
+        addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
+#if USE(JSVALUE64)
+        moveDoubleTo64(fpRegT0, resultPayload);
+        sub64(tagTypeNumberRegister, resultPayload);
+#else
+        moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+        
+        done = jump();
+        canBeInt.link(this);
     }
 
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
-    patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
-    // Track the stub we have created so that it will be deleted later.
-    structure->ref();
-    chain->ref();
-    prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+#if USE(JSVALUE64)
+    or64(tagTypeNumberRegister, resultPayload);
+#else
+    move(TrustedImm32(JSValue::Int32Tag), resultTag);
+#endif
+    if (done.isSet())
+        done.link(this);
+    return slowCases;
 }
 
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
 {
-    ASSERT(count);
-
-    JumpList bucketsOfFail;
+    ASSERT(isFloat(type));
+    
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID resultPayload = regT0;
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID resultPayload = regT0;
+    RegisterID resultTag = regT1;
+    RegisterID scratch = regT3;
+#endif
+    
+    JumpList slowCases;
 
-    // Check eax is an object of the right Structure.
-    bucketsOfFail.append(checkStructure(regT0, structure));
+    load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+    badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+    slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+    loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
+    
+    switch (elementSize(type)) {
+    case 4:
+        loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
+        convertFloatToDouble(fpRegT0, fpRegT0);
+        break;
+    case 8: {
+        loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
+        break;
+    }
+    default:
+        CRASH();
+    }
+    
+    Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
+    static const double NaN = PNaN;
+    loadDouble(TrustedImmPtr(&NaN), fpRegT0);
+    notNaN.link(this);
+    
+#if USE(JSVALUE64)
+    moveDoubleTo64(fpRegT0, resultPayload);
+    sub64(tagTypeNumberRegister, resultPayload);
+#else
+    moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+    return slowCases;    
+}
 
-    Structure* currStructure = structure;
-    RefPtr<Structure>* chainEntries = chain->head();
-    JSObject* protoObject = 0;
-    for (unsigned i = 0; i < count; ++i) {
-        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
-        currStructure = chainEntries[i].get();
+JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
+{
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    ASSERT(isInt(type));
+    
+    int value = currentInstruction[3].u.operand;
 
-        // Check the prototype object's Structure had not changed.
-        Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
-        move(ImmPtr(currStructure), regT3);
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT2;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT1;
+#endif
+    
+    JumpList slowCases;
+    
+    load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+    badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+    Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    Jump done = jump();
+    inBounds.link(this);
+    
+#if USE(JSVALUE64)
+    emitGetVirtualRegister(value, earlyScratch);
+    slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
 #else
-        bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+    emitLoad(value, lateScratch, earlyScratch);
+    slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
 #endif
+    
+    // We would be loading this into base as in get_by_val, except that the slow
+    // path expects the base to be unclobbered.
+    loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
+    
+    if (isClamped(type)) {
+        ASSERT(elementSize(type) == 1);
+        ASSERT(!isSigned(type));
+        Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
+        Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
+        xor32(earlyScratch, earlyScratch);
+        Jump clamped = jump();
+        tooBig.link(this);
+        move(TrustedImm32(0xff), earlyScratch);
+        clamped.link(this);
+        inBounds.link(this);
     }
-    ASSERT(protoObject);
-
-    bool needsStubLink = false;
-    if (slot.cachedPropertyType() == PropertySlot::Getter) {
-        needsStubLink = true;
-        compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
-        JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
-        needsStubLink = true;
-        JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
-        stubCall.addArgument(ImmPtr(protoObject));
-        stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
-        stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
-        stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
-        stubCall.call();
-    } else
-        compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-    Jump success = jump();
-
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
-    if (needsStubLink) {
-        for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
-            if (iter->to)
-                patchBuffer.link(iter->from, FunctionPtr(iter->to));
-        }
+    
+    switch (elementSize(type)) {
+    case 1:
+        store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
+        break;
+    case 2:
+        store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
+        break;
+    case 4:
+        store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
+        break;
+    default:
+        CRASH();
     }
-
-    // Use the patch information to link the failure cases back to the original slow case routine.
-    patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
-    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
-    // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
-
-    // Finally patch the jump to slow case back in the hot path to jump here instead.
-    CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
-    RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
-
-    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
-    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+    
+    done.link(this);
+    
+    return slowCases;
 }
 
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
+{
+    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+    ASSERT(isFloat(type));
+    
+    int value = currentInstruction[3].u.operand;
 
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT2;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID earlyScratch = regT3;
+    RegisterID lateScratch = regT1;
+#endif
+    
+    JumpList slowCases;
+    
+    load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+    badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+    Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+    emitArrayProfileOutOfBoundsSpecialCase(profile);
+    Jump done = jump();
+    inBounds.link(this);
+    
+#if USE(JSVALUE64)
+    emitGetVirtualRegister(value, earlyScratch);
+    Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
+    convertInt32ToDouble(earlyScratch, fpRegT0);
+    Jump ready = jump();
+    doubleCase.link(this);
+    slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
+    add64(tagTypeNumberRegister, earlyScratch);
+    move64ToDouble(earlyScratch, fpRegT0);
+    ready.link(this);
+#else
+    emitLoad(value, lateScratch, earlyScratch);
+    Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag));
+    convertInt32ToDouble(earlyScratch, fpRegT0);
+    Jump ready = jump();
+    doubleCase.link(this);
+    slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
+    moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
+    ready.link(this);
+#endif
+    
+    // We would be loading this into base as in get_by_val, except that the slow
+    // path expects the base to be unclobbered.
+    loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
+    
+    switch (elementSize(type)) {
+    case 4:
+        convertDoubleToFloat(fpRegT0, fpRegT0);
+        storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
+        break;
+    case 8:
+        storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
+        break;
+    default:
+        CRASH();
+    }
+    
+    done.link(this);
+    
+    return slowCases;
+}
 
 } // namespace JSC
 
 #endif // ENABLE(JIT)
-
-#endif // !USE(JSVALUE32_64)